diff --git a/.codespell.ignore.txt b/.codespell.ignore.txt deleted file mode 100644 index c23f057a9c0..00000000000 --- a/.codespell.ignore.txt +++ /dev/null @@ -1,10 +0,0 @@ -geting -keypair -vas -strat -hist -dur -uint -iff -cas -te diff --git a/.dockerignore b/.dockerignore index 7fcd950a051..8797cc5274e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,2 +1,10 @@ bin +Containerfile +container-build.sh tags +test/certs/ipki +test/certs/misc +test/certs/webpki +test/certs/.softhsm-tokens +.git +.gocache diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 99747467fbe..41be4668d81 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,7 +3,21 @@ version: 2 updates: - package-ecosystem: "gomod" directory: "/" - open-pull-requests-limit: 2 + groups: + aws: + patterns: + - "github.com/aws/*" + otel: + patterns: + - "go.opentelemetry.io/*" + open-pull-requests-limit: 1 schedule: interval: "weekly" day: "wednesday" + cooldown: + default-days: 30 + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: monthly + open-pull-requests-limit: 1 diff --git a/.github/issue_template.md b/.github/issue_template.md new file mode 100644 index 00000000000..61510640d55 --- /dev/null +++ b/.github/issue_template.md @@ -0,0 +1,21 @@ +--- +name: Default Template +about: File a bug report or feature request +title: '' +labels: '' +assignees: '' +--- + +**Summary:** + + +**Steps to reproduce:** + + +**Expected result:** + + +**Actual result:** + + +**Additional details:** diff --git a/.github/workflows/boulder-ci.yml b/.github/workflows/boulder-ci.yml index 2aa21908edd..09872cf2adb 100644 --- a/.github/workflows/boulder-ci.yml +++ b/.github/workflows/boulder-ci.yml @@ -2,11 +2,11 @@ name: Boulder CI -# Controls when the action will run. +# Controls when the action will run. on: # Triggers the workflow on push or pull request events but only for the main branch push: - branches: + branches: - main - release-branch-* pull_request: @@ -17,6 +17,9 @@ on: workflow_dispatch: # A workflow run is made up of one or more jobs that can run sequentially or in parallel +permissions: + contents: read + jobs: # Main test jobs. This looks like a single job, but the matrix # items will multiply it. For example every entry in the @@ -24,7 +27,7 @@ jobs: # tags and 5 tests there would be 10 jobs run. b: # The type of runner that the job will run on - runs-on: ubuntu-20.04 + runs-on: ubuntu-24.04 strategy: # When set to true, GitHub cancels all in-progress jobs if any matrix job fails. Default: true @@ -33,48 +36,49 @@ jobs: matrix: # Add additional docker image tags here and all tests will be run with the additional image. BOULDER_TOOLS_TAG: - - go1.17.7_2022-03-08 - - go1.18_2022-03-15 - # Tests command definitions. Use the entire docker-compose command you want to run. + - go1.25.5_2025-12-03 + # Tests command definitions. Use the entire "docker compose" command you want to run. tests: # Run ./test.sh --help for a description of each of the flags. - - "./t.sh --generate --make-artifacts" + - "./t.sh --lints --generate" - "./t.sh --integration" # Testing Config Changes: # Config changes that have landed in main but not yet been applied to # production can be made in `test/config-next/.json`. # # Testing DB Schema Changes: - # Database migrations in `sa/_db-next/migrations` are only performed - # when `docker-compose` is called using `-f docker-compose.yml -f + # Database migrations in `sa/_db-next/migrations` are only performed + # when `docker compose` is called using `-f docker-compose.yml -f # docker-compose.next.yml`. - "./tn.sh --integration" - "./t.sh --unit --enable-race-detection" - "./tn.sh --unit --enable-race-detection" - "./t.sh --start-py" - # gomod-vendor runs with a separate network access definition - # because it needs to fetch packages from GitHub et. al., which - # is incompatible with the DNS server override in the boulder - # container (used for service discovery). - - "docker-compose run --use-aliases netaccess ./test.sh --gomod-vendor" - - # This sets the docker image tag for the boulder-tools repository to - # use in tests. It will be set appropriately for each tag in the list - # defined in the matrix. + # Same cases but backed by Vitess + MySQL 8 instead of ProxySQL + MariaDB + - "USE_VITESS=true ./t.sh --integration" + - "USE_VITESS=true ./tn.sh --integration" + - "USE_VITESS=true ./t.sh --unit --enable-race-detection" + - "USE_VITESS=true ./tn.sh --unit --enable-race-detection" + - "USE_VITESS=true ./t.sh --start-py" + env: + # This sets the docker image tag for the boulder-tools repository to + # use in tests. It will be set appropriately for each tag in the list + # defined in the matrix. BOULDER_TOOLS_TAG: ${{ matrix.BOULDER_TOOLS_TAG }} + BOULDER_VTCOMBOSERVER_TAG: vitessv23.0.0_2025-12-02 # Sequence of tasks that will be executed as part of the job. steps: # Checks out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@v2 + - uses: actions/checkout@v6 with: persist-credentials: false - name: Docker Login # You may pin to the exact commit or the version. # uses: docker/login-action@f3364599c6aa293cdc2b8391b1b56d0c30e45c8a - uses: docker/login-action@v1.8.0 + uses: docker/login-action@v3.6.0 with: # Username used to log against the Docker registry username: ${{ secrets.DOCKER_USERNAME}} @@ -90,23 +94,78 @@ jobs: run: echo "Using BOULDER_TOOLS_TAG ${BOULDER_TOOLS_TAG}" # Pre-pull the docker containers before running the tests. - - name: docker-compose pull - run: docker-compose pull - + - name: docker compose pull + run: docker compose pull + # Run the test matrix. This will run - name: "Run Test: ${{ matrix.tests }}" run: ${{ matrix.tests }} + govulncheck: + runs-on: ubuntu-24.04 + strategy: + fail-fast: false + + steps: + # Checks out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v6 + with: + persist-credentials: false + + - name: Setup Go + uses: actions/setup-go@v6 + with: + # When Go produces a security release, we want govulncheck to run + # against the most recently released Go version. + check-latest: true + go-version: "stable" + + - name: Run govulncheck + run: go run golang.org/x/vuln/cmd/govulncheck@latest ./... + + vendorcheck: + runs-on: ubuntu-24.04 + strategy: + # When set to true, GitHub cancels all in-progress jobs if any matrix job fails. Default: true + fail-fast: false + matrix: + go-version: [ '1.25.5' ] + + steps: + # Checks out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v6 + with: + persist-credentials: false + + - name: Setup Go ${{ matrix.go-version }} + uses: actions/setup-go@v6 + with: + go-version: ${{ matrix.go-version }} + + - name: Verify vendor + shell: bash + run: | + go mod tidy + go mod vendor + git diff --exit-code + + # This is a utility build job to detect if the status of any of the # above jobs have failed and fail if so. It is needed so there can be # one static job name that can be used to determine success of the job # in GitHub branch protection. + # It does not block on the result of govulncheck so that a new vulnerability + # disclosure does not prevent any other PRs from being merged. boulder_ci_test_matrix_status: + permissions: + contents: none if: ${{ always() }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 name: Boulder CI Test Matrix - needs: b + needs: + - b + - vendorcheck steps: - name: Check boulder ci test matrix status - if: ${{ needs.b.result != 'success' }} + if: ${{ needs.b.result != 'success' || needs.vendorcheck.result != 'success' }} run: exit 1 diff --git a/.github/workflows/check-iana-registries.yml b/.github/workflows/check-iana-registries.yml new file mode 100644 index 00000000000..7506354c0c0 --- /dev/null +++ b/.github/workflows/check-iana-registries.yml @@ -0,0 +1,54 @@ +name: Check for IANA special-purpose address registry updates + +on: + schedule: + - cron: "20 16 * * *" + workflow_dispatch: + +jobs: + check-iana-registries: + runs-on: ubuntu-latest + + permissions: + contents: write + pull-requests: write + + steps: + - name: Checkout iana/data from main branch + uses: actions/checkout@v6 + with: + persist-credentials: false + sparse-checkout: iana/data + + # If the branch already exists, this will fail, which will remind us about + # the outstanding PR. + - name: Create an iana-registries-gha branch + run: | + git checkout --track origin/main -b iana-registries-gha + + - name: Retrieve the IANA special-purpose address registries + run: | + IANA_IPV4="https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry-1.csv" + IANA_IPV6="https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry-1.csv" + + REPO_IPV4="iana/data/iana-ipv4-special-registry-1.csv" + REPO_IPV6="iana/data/iana-ipv6-special-registry-1.csv" + + curl --fail --location --show-error --silent --output "${REPO_IPV4}" "${IANA_IPV4}" + curl --fail --location --show-error --silent --output "${REPO_IPV6}" "${IANA_IPV6}" + + - name: Create a commit and pull request + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + shell: + bash + # `git diff --exit-code` returns an error code if there are any changes. + run: | + if ! git diff --exit-code; then + git add iana/data/ + git config user.name "Irwin the IANA Bot" + git commit \ + --message "Update IANA special-purpose address registries" + git push origin HEAD + gh pr create --fill + fi diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 00000000000..216650cdafd --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,29 @@ +name: "Code Scanning - Action" + +on: + pull_request: + branches: [ release-branch-*, main] + push: + branches: [ release-branch-*, main] + + +jobs: + CodeQL-Build: + # CodeQL runs on ubuntu-latest, windows-latest, and macos-latest + runs-on: ubuntu-latest + + permissions: + # required for all workflows + security-events: write + + steps: + - name: Checkout repository + uses: actions/checkout@v6 + with: + persist-credentials: false + - name: Initialize CodeQL + uses: github/codeql-action/init@v4 + - name: Autobuild + uses: github/codeql-action/autobuild@v4 + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/cps-review.yml b/.github/workflows/cps-review.yml new file mode 100644 index 00000000000..27c64a3cbb5 --- /dev/null +++ b/.github/workflows/cps-review.yml @@ -0,0 +1,72 @@ +name: Check PR for changes that trigger CP/CPS review + +on: + pull_request: + types: [ready_for_review, review_requested] + paths: + - 'features/features.go' + +jobs: + check-features: + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Setup Go + uses: actions/setup-go@v6 + with: + go-version: "stable" + + - name: Checkout Upstream + uses: actions/checkout@v6 + with: + persist-credentials: false + ref: ${{ github.event.pull_request.base.ref }} + - name: Get Current Flags + run: go run ./test/list-features/list-features.go | sort >| /tmp/currflags.txt + + - name: Checkout PR + uses: actions/checkout@v6 + with: + persist-credentials: false + - name: Get PR Flags + run: go run ./test/list-features/list-features.go | sort >| /tmp/prflags.txt + + - name: Identify New Flags + id: newflags + run: echo flagnames=$(comm -13 /tmp/currflags.txt /tmp/prflags.txt | paste -sd,) >> $GITHUB_OUTPUT + + - name: Comment PR + if: ${{ steps.newflags.outputs.flagnames != '' }} + uses: actions/github-script@v8 + with: + script: | + const { owner, repo, number: issue_number } = context.issue; + + // No need to comment if the PR description already has a CPS review. + const reviewRegexp = /^CPS Compliance Review:/; + if (reviewRegexp.test(context.payload.pull_request.body)) { + return; + } + + // No need to comment if this task has previously commented on this PR. + const commentMarker = ''; + const comments = await github.rest.issues.listComments({ + owner, + repo, + issue_number + }); + if (comments.data.find(c => c.body.includes(commentMarker))) { + return; + } + + // No existing review or comment found, post the comment. + const prAuthor = context.payload.pull_request.user.login; + const flagNames = '${{ steps.newflags.outputs.flagnames }}'; + const commentBody = `${commentMarker}\n@${prAuthor}, this PR adds one or more new feature flags: ${flagNames}. As such, this PR must be accompanied by a review of the Let's Encrypt CP/CPS to ensure that our behavior both before and after this flag is flipped is compliant with that document.\n\nPlease conduct such a review, then add your findings to the PR description in a paragraph beginning with "CPS Compliance Review:".`; + await github.rest.issues.createComment({ + owner, + repo, + issue_number, + body: commentBody + }); diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml deleted file mode 100644 index 55595e5c93b..00000000000 --- a/.github/workflows/golangci-lint.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: golangci-lint -on: - # Triggers the workflow on push or pull request events but only for the main branch - push: - branches: - - main - - release-branch-* - pull_request: - branches: - - '*' -permissions: - contents: read - pull-requests: read -jobs: - golangci: - name: lint - runs-on: ubuntu-latest - steps: - - uses: actions/setup-go@v2 - - uses: actions/checkout@v2 - - name: golangci-lint - uses: golangci/golangci-lint-action@v2 - with: - version: v1.42.1 - args: --timeout 9m - only-new-issues: true diff --git a/.github/workflows/issue-for-sre-handoff.yml b/.github/workflows/issue-for-sre-handoff.yml new file mode 100644 index 00000000000..efea3e78e28 --- /dev/null +++ b/.github/workflows/issue-for-sre-handoff.yml @@ -0,0 +1,55 @@ +name: Check PR for configuration and SQL changes + +on: + pull_request: + types: [review_requested] + paths: + - 'test/config-next/*.json' + - 'test/config-next/*.yaml' + - 'test/config-next/*.yml' + - 'sa/db-users/*.sql' + - 'sa/db-next/**/*.sql' + - 'sa/db/**/*.sql' + +jobs: + check-changes: + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Comment PR + uses: actions/github-script@v8 + with: + script: | + const commentMarker = ''; + const prAuthor = context.payload.pull_request.user.login; + const commentBody = `${commentMarker}\n@${prAuthor}, this PR appears to contain configuration and/or SQL schema changes. Please ensure that a corresponding deployment ticket has been filed with the new values.\n`; + const { owner, repo, number: issue_number } = context.issue; + const issueRegexp = /IN-\d+/; + + // Get PR body and all issue comments. + const prBody = context.payload.pull_request.body; + const comments = await github.rest.issues.listComments({ + owner, + repo, + issue_number + }); + + if (issueRegexp.test(prBody) || comments.data.some(c => issueRegexp.test(c.body))) { + // Issue number exists in PR body or comments. + return; + } + + if (comments.data.find(c => c.body.includes(commentMarker))) { + // Comment already exists. + return; + } + + // No issue number or comment were found, post the comment. + await github.rest.issues.createComment({ + owner, + repo, + issue_number, + body: commentBody + }); + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/merged-to-main-or-release-branch.yml b/.github/workflows/merged-to-main-or-release-branch.yml new file mode 100644 index 00000000000..aacf553d701 --- /dev/null +++ b/.github/workflows/merged-to-main-or-release-branch.yml @@ -0,0 +1,19 @@ +# This GitHub Action runs only on pushes to main or a hotfix branch. It can +# be used by tag protection rules to ensure that tags may only be pushed if +# their corresponding commit was first pushed to one of those branches. +name: Merged to main (or hotfix) +permissions: + contents: read +on: + push: + branches: + - main + - release-branch-* +jobs: + merged-to-main: + name: Merged to main (or hotfix) + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v6 + with: + persist-credentials: false diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5a7189b7ba7..ee87484f739 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,27 +1,100 @@ -name: Build and Release +# Build the Boulder Debian package on tag push, and attach it to a GitHub +# release. +# +# Keep the GO_VERSION matrix and the container-building steps in sync with +# try-release.yml. +name: Build release on: - # Runs automatically when a tag beginning with 'release-' is pushed. push: tags: - - release-* - -permissions: - # Overrides the org default of 'read'. This allows us to upload and post the - # resulting package file as part of a release. - contents: write + - '**' jobs: - gh-release: - runs-on: ubuntu-20.04 + draft-release: + runs-on: ubuntu-24.04 + permissions: + contents: write steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v6 with: persist-credentials: false + fetch-depth: '0' # Needed for verify-release-ancestry.sh to see origin/main + + - name: Verify release ancestry + run: ./tools/verify-release-ancestry.sh "$GITHUB_SHA" - - name: build and release + - name: Create draft release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # https://cli.github.com/manual/gh_release_create - run: | - ./tools/make-deb.sh - gh release create "${GITHUB_REF_NAME}" *.deb + run: gh release create --draft --generate-notes "${GITHUB_REF_NAME}" + + push-release: + needs: draft-release + strategy: + fail-fast: false + matrix: + GO_VERSION: + - "1.25.5" + runs-on: ubuntu-24.04 + permissions: + contents: write + packages: write + steps: + - uses: actions/checkout@v6 + with: + persist-credentials: false + fetch-depth: '0' # Needed for verify-release-ancestry.sh to see origin/main + + - name: Build Boulder container and .deb + id: build + env: + GO_VERSION: ${{ matrix.GO_VERSION }} + run: ./tools/container-build.sh + + - name: Tag Boulder container + run: docker tag boulder "ghcr.io/letsencrypt/boulder:${GITHUB_REF_NAME}-go${{ matrix.GO_VERSION }}" + + - name: Compute checksums + id: checksums + # The files listed on this line must be identical to the files uploaded + # in the last step. + run: sha256sum boulder*.deb boulder*.tar.gz >| boulder-${{ matrix.GO_VERSION }}.$(date +%s)-$(git rev-parse --short=8 HEAD).checksums.txt + + - name: Upload release files + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # https://cli.github.com/manual/gh_release_upload + run: gh release upload "${GITHUB_REF_NAME}" boulder*.deb boulder*.tar.gz boulder*.checksums.txt + + - name: Build ct-test-srv container + run: docker buildx build . --build-arg "GO_VERSION=${{ matrix.GO_VERSION }}" -f test/ct-test-srv/Dockerfile -t "ghcr.io/letsencrypt/ct-test-srv:${GITHUB_REF_NAME}-go${{ matrix.GO_VERSION }}" + + - name: Login to GitHub Container Registry + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Push Boulder container + run: docker push "ghcr.io/letsencrypt/boulder:${GITHUB_REF_NAME}-go${{ matrix.GO_VERSION }}" + + - name: Push ct-test-srv container + run: docker push "ghcr.io/letsencrypt/ct-test-srv:${GITHUB_REF_NAME}-go${{ matrix.GO_VERSION }}" + + publish-release: + needs: push-release + runs-on: ubuntu-24.04 + permissions: + contents: write + steps: + - uses: actions/checkout@v6 + with: + persist-credentials: false + + - name: Publish release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # https://cli.github.com/manual/gh_release_edit + run: gh release edit --draft=false "${GITHUB_REF_NAME}" diff --git a/.github/workflows/try-release.yml b/.github/workflows/try-release.yml new file mode 100644 index 00000000000..0b5a371f1f3 --- /dev/null +++ b/.github/workflows/try-release.yml @@ -0,0 +1,51 @@ +# Try building the Boulder Debian package on every PR and push to main. This is +# to make sure the actual release job will succeed when we tag a release. +# +# Keep the GO_VERSION matrix and the container-building steps in sync with +# release.yml. +name: Try release +on: + push: + branches: [main] + pull_request: + branches: [main] + workflow_dispatch: + +permissions: + contents: read + +jobs: + try-release: + strategy: + fail-fast: false + matrix: + GO_VERSION: + - "1.25.5" + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v6 + with: + persist-credentials: false + + - name: Build Boulder container and .deb + id: build + env: + GO_VERSION: ${{ matrix.GO_VERSION }} + run: ./tools/container-build.sh + + - name: Compute checksums + id: checksums + # The files listed on this line must be identical to the files uploaded + # in the last step of the real release action. + run: sha256sum boulder*.deb boulder*.tar.gz >| boulder-${{ matrix.GO_VERSION }}.$(date +%s)-$(git rev-parse --short=8 HEAD).checksums.txt + + - name: List files + id: files + run: ls boulder*.deb boulder*.tar.gz boulder*.checksums.txt + + - name: Show checksums + id: check + run: cat boulder*.checksums.txt + + - name: Build ct-test-srv container + run: docker buildx build . --build-arg "GO_VERSION=${{ matrix.GO_VERSION }}" -f test/ct-test-srv/Dockerfile -t "ghcr.io/letsencrypt/ct-test-srv:${{ github.sha }}-go${{ matrix.GO_VERSION }}" diff --git a/.gitignore b/.gitignore index c1e121cf31b..5e1426c919a 100644 --- a/.gitignore +++ b/.gitignore @@ -37,5 +37,17 @@ tags .idea .vscode/* -.hierarchy/ -.softhsm-tokens/ + +# ProxySQL log files +test/proxysql/*.log* + +# Coverage files +test/coverage + +# DSN symlinks +test/secrets/badkeyrevoker_dburl +test/secrets/cert_checker_dburl +test/secrets/incidents_dburl +test/secrets/revoker_dburl +test/secrets/sa_dburl +test/secrets/sa_ro_dburl diff --git a/.golangci.yml b/.golangci.yml index ddd84676aee..ac87b422273 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,42 +1,84 @@ +version: "2" linters: - disable-all: true + default: none enable: + - asciicheck + - bidichk - errcheck - - gofmt - gosec - - gosimple - govet - ineffassign - misspell + - nolintlint + - spancheck + - sqlclosecheck - staticcheck - - stylecheck + - unconvert + - unparam - unused -linters-settings: - errcheck: - ignore: fmt:[FS]?[Pp]rint*,io:Write,os:Remove,net/http:Write,github.com/miekg/dns:WriteMsg,net:Write,encoding/binary:Write - gosimple: - # S1029: Range over the string directly - checks: ["all", "-S1029"] - staticcheck: - # SA1019: Using a deprecated function, variable, constant or field - # SA6003: Converting a string to a slice of runes before ranging over it - checks: ["all", "-SA1019", "-SA6003"] - stylecheck: - # ST1003: Poorly chosen identifier - # ST1005: Incorrectly formatted error string - checks: ["all", "-ST1003", "-ST1005"] - gosec: - excludes: - # TODO: Identify, fix, and remove violations of most of these rules - - G101 # Potential hardcoded credentials - - G102 # Binds to all network interfaces - - G107 # Potential HTTP request made with variable url - - G201 # SQL string formatting - - G202 # SQL string concatenation - - G306 # Expect WriteFile permissions to be 0600 or less - - G401 # Use of weak cryptographic primitive - - G402 # TLS InsecureSkipVerify set true. - - G403 # RSA keys should be at least 2048 bits - - G404 # Use of weak random number generator (math/rand instead of crypto/rand) - - G501 # Blacklisted import `crypto/md5`: weak cryptographic primitive - - G505 # Blacklisted import `crypto/sha1`: weak cryptographic primitive + - wastedassign + settings: + errcheck: + exclude-functions: + - (net/http.ResponseWriter).Write + - (net.Conn).Write + - encoding/binary.Write + - io.Write + - net/http.Write + - os.Remove + - github.com/miekg/dns.WriteMsg + govet: + disable: + - fieldalignment + - shadow + enable-all: true + settings: + printf: + funcs: + - (github.com/letsencrypt/boulder/log.Logger).Errf + - (github.com/letsencrypt/boulder/log.Logger).Warningf + - (github.com/letsencrypt/boulder/log.Logger).Infof + - (github.com/letsencrypt/boulder/log.Logger).Debugf + - (github.com/letsencrypt/boulder/log.Logger).AuditInfof + - (github.com/letsencrypt/boulder/log.Logger).AuditErrf + - (github.com/letsencrypt/boulder/ocsp/responder).SampledError + - (github.com/letsencrypt/boulder/web.RequestEvent).AddError + gosec: + excludes: + # TODO: Identify, fix, and remove violations of most of these rules + - G101 # Potential hardcoded credentials + - G102 # Binds to all network interfaces + - G104 # Errors unhandled + - G107 # Potential HTTP request made with variable url + - G201 # SQL string formatting + - G202 # SQL string concatenation + - G204 # Subprocess launched with variable + - G302 # Expect file permissions to be 0600 or less + - G306 # Expect WriteFile permissions to be 0600 or less + - G304 # Potential file inclusion via variable + - G401 # Use of weak cryptographic primitive + - G402 # TLS InsecureSkipVerify set true. + - G403 # RSA keys should be at least 2048 bits + - G404 # Use of weak random number generator + nolintlint: + require-explanation: true + require-specific: true + allow-unused: false + staticcheck: + checks: + - all + # TODO: Identify, fix, and remove violations of most of these rules + - -S1029 # Range over the string directly + - -SA1019 # Using a deprecated function, variable, constant or field + - -SA6003 # Converting a string to a slice of runes before ranging over it + - -ST1000 # Incorrect or missing package comment + - -ST1003 # Poorly chosen identifier + - -ST1005 # Incorrectly formatted error string + - -QF1001 # Could apply De Morgan's law + - -QF1008 # Could remove embedded field from selector + exclusions: + presets: + - std-error-handling +formatters: + enable: + - gofmt diff --git a/.typos.toml b/.typos.toml new file mode 100644 index 00000000000..12320dd7119 --- /dev/null +++ b/.typos.toml @@ -0,0 +1,38 @@ +[files] +extend-exclude = [ + ".git/", + "go.mod", + "go.sum", + "vendor/", +] +ignore-hidden = false + +[default] +extend-ignore-re = [ + # Anything base64 or base64url longer than 36 chars is probably encoded. + '\b[0-9A-Za-z+/]{36,}\b', + '\b[0-9A-Za-z_-]{36,}\b', + "0002a4ba3cf408927759", + "65CuDAA", + '"sql_warnings", "TrUe"', + '"tx_read_only", "FalSe"', + "evenMOREcaps", + '"iSsUe"', +] + +[default.extend-words] +# Extended DNS Error +"ede" = "ede" +# Alternative spelling +"unmarshaling" = "unmarshaling" + +[default.extend-identifiers] +"caaFailer" = "caaFailer" +"challStrat" = "challStrat" +"ExpectedStratType" = "ExpectedStratType" +"otConf" = "otConf" +"serInt" = "serInt" +"StratName" = "StratName" +"typ" = "typ" +"UPDATEs" = "UPDATEs" +"vai" = "vai" diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index ac56bb63524..00000000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,4 +0,0 @@ -# Contributor Code of Conduct - -The contributor code of conduct is available for reference [on the community -forum](https://community.letsencrypt.org/guidelines). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index afbb935604d..00000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,402 +0,0 @@ -Thanks for helping us build Boulder! This page contains requirements and -guidelines for Boulder contributions. - -# Patch Requirements - -* All new functionality and fixed bugs must be accompanied by tests. -* All patches must meet the deployability requirements listed below. -* We prefer pull requests from external forks be created with the ["Allow edits - from - maintainers"](https://github.com/blog/2247-improving-collaboration-with-forks) - checkbox selected. - -# Review Requirements - -* All pull requests must receive at least one approval through the GitHub UI. -* We indicate review approval through GitHub's code review facility. -* New commits pushed to a branch invalidate previous reviews. In other words, a - reviewer must give positive reviews of a branch after its most recent pushed - commit. -* You cannot review your own code. -* If a branch contains commits from multiple authors, it needs a reviewer who - is not an author of commits on that branch. -* If a branch contains updates to files in the vendor/ directory, the author is - responsible for running tests in all updated dependencies, and commenting in - the review thread that they have done so. Reviewers must not approve reviews - that have changes in vendor/ but lack a comment about tests. -* Review changes to or addition of tests just as rigorously as you review code - changes. Consider: Do tests actually test what they mean to test? Is this the - best way to test the functionality in question? Do the tests cover all the - functionality in the patch, including error cases? -* Are there new RPCs or config fields? Make sure the patch meets the - Deployability rules below. - -# Patch Guidelines - -* Please include helpful comments. No need to gratuitously comment clear code, - but make sure it's clear why things are being done. Include information in - your pull request about what you're trying to accomplish with your patch. -* Avoid named return values. See - [#3017](https://github.com/letsencrypt/boulder/pull/3017) for an example of a - subtle problem they can cause. -* Do not include `XXX`s or naked `TODO`s. Use - the formats: - - ```go - // TODO(): Hoverboard + Time-machine unsupported until upstream patch. - // TODO(#): Pending hoverboard/time-machine interface. - // TODO(@githubusername): Enable hoverboard kickflips once interface is stable. - ``` - -# Squash merging - -Once a pull request is approved and the tests are passing, the author or any -other committer can merge it. We always use [squash -merges](https://github.com/blog/2141-squash-your-commits) via GitHub's web -interface. That means that during the course of your review you should -generally not squash or amend commits, or force push. Even if the changes in -each commit are small, keeping them separate makes it easier for us to review -incremental changes to a pull request. Rest assured that those tiny changes -will get squashed into a nice meaningful-size commit when we merge. - -If the CI tests are failing on your branch, you should look at the logs -to figure out why. Sometimes (though rarely) they fail spuriously, in which -case you can post a comment requesting that a project owner kick the build. - -# Error handling - -All errors must be addressed in some way: That may be simply by returning an -error up the stack, or by handling it in some intelligent way where it is -generated, or by explicitly ignoring it and assigning to `_`. We use the -`errcheck` tool in our integration tests to make sure all errors are -addressed. Note that ignoring errors, even in tests, should be rare, since -they may generate hard-to-debug problems. - -When handling errors, always do the operation which creates the error (usually -a function call) and the error checking on separate lines: -``` -err := someOperation(args) -if err != nil { - return nil, fmt.Errorf("some operation failed: %w", err) -} -``` -We avoid the `if err := someOperation(args); err != nil {...}` style as we find -it to be less readable and it can give rise to surprising scoping behavior. - -We define two special types of error. `BoulderError`, defined in -errors/errors.go, is used specifically when an typed error needs to be passed -across an RPC boundary. For instance, if the SA returns "not found", callers -need to be able to distinguish that from a network error. Not every error that -may pass across an RPC boundary needs to be a BoulderError, only those errors -that need to be handled by type elsewhere. Handling by type may be as simple as -turning a BoulderError into a specific type of ProblemDetail. - -The other special type of error is `ProblemDetails`. We try to treat these as a -presentation-layer detail, and use them only in parts of the system that are -responsible for rendering errors to end-users, i.e. wfe and wfe2. Note -one exception: The VA RPC layer defines its own `ProblemDetails` type, which is -returned to the RA and stored as part of a challenge (to eventually be rendered -to the user). - -Within WFE and WFE2, ProblemDetails are sent to the client by calling -`sendError()`, which also logs the error. For internal errors like timeout, -or any error type that we haven't specifically turned into a ProblemDetail, we -return a ServerInternal error. This avoids unnecessarily exposing internals. -It's possible to add additional errors to a logEvent using `.AddError()`, but -this should only be done when there is is internal-only information to log -that isn't redundant with the ProblemDetails sent to the user. Note that the -final argument to `sendError()`, `ierr`, will automatically get added to the -logEvent for ServerInternal errors, so when sending a ServerInternal error it's -not necessary to separately call `.AddError`. - -# Deployability - -We want to ensure that a new Boulder revision can be deployed to the -currently running Boulder production instance without requiring config -changes first. We also want to ensure that during a deploy, services can be -restarted in any order. That means two things: - -## Good zero values for config fields - -Any newly added config field must have a usable [zero -value](https://tour.golang.org/basics/12). That is to say, if a config field -is absent, Boulder shouldn't crash or misbehave. If that config file names a -file to be read, Boulder should be able to proceed without that file being -read. - -Note that there are some config fields that we want to be a hard requirement. -To handle such a field, first add it as optional, then file an issue to make -it required after the next deploy is complete. - -In general, we would like our deploy process to be: deploy new code + old -config; then immediately after deploy the same code + new config. This makes -deploys cheaper so we can do them more often, and allows us to more readily -separate deploy-triggered problems from config-triggered problems. - -## Flag-gating features - -When adding significant new features or replacing existing RPCs the -`boulder/features` package should be used to gate its usage. To add a flag a -new `const FeatureFlag` should be added and its default value specified in -`features.features` in `features/features.go`. In order to test if the flag -is enabled elsewhere in the codebase you can use -`features.Enabled(features.ExampleFeatureName)` which returns a `bool` -indicating if the flag is enabled or not. - -Each service should include a `map[string]bool` named `Features` in its -configuration object at the top level and call `features.Set` with that map -immediately after parsing the configuration. For example to enable -`UseNewMetrics` and disable `AccountRevocation` you would add this object: - -```json -{ - ... - "features": { - "UseNewMetrics": true, - "AccountRevocation": false, - } -} -``` - -Feature flags are meant to be used temporarily and should not be used for -permanent boolean configuration options. Once a feature has been enabled in -both staging and production the flag should be removed making the previously -gated functionality the default in future deployments. - -### Gating RPCs - -When you add a new RPC to a Boulder service (e.g. `SA.GetFoo()`), all -components that call that RPC should gate those calls using a feature flag. -Since the feature's zero value is false, a deploy with the existing config -will not call `SA.GetFoo()`. Then, once the deploy is complete and we know -that all SA instances support the `GetFoo()` RPC, we do a followup config -deploy that sets the default value to true, and finally remove the flag -entirely once we are confident the functionality it gates behaves correctly. - -### Gating migrations - -We use [database migrations](https://en.wikipedia.org/wiki/Schema_migration) -to modify the existing schema. These migrations will be run on live data -while Boulder is still running, so we need Boulder code at any given commit -to be capable of running without depending on any changes in schemas that -have not yet been applied. - -For instance, if we're adding a new column to an existing table, Boulder should -run correctly in three states: - -1. Migration not yet applied. -2. Migration applied, flag not yet flipped. -3. Migration applied, flag flipped. - -Specifically, that means that all of our `SELECT` statements should enumerate -columns to select, and not use `*`. Also, generally speaking, we will need a -separate model `struct` for serializing and deserializing data before and -after the migration. This is because the ORM package we use, -[`gorp`](https://github.com/go-gorp/gorp), expects every field in a struct to -map to a column in the table. If we add a new field to a model struct and -Boulder attempts to write that struct to a table that doesn't yet have the -corresponding column (case 1), gorp will fail with `Insert failed table posts -has no column named Foo`. There are examples of such models in sa/model.go, -along with code to turn a model into a `struct` used internally. - -An example of a flag-gated migration, adding a new `IsWizard` field to Person -controlled by a `AllowWizards` feature flag: - -```go -# features/features.go: - -const ( - unused FeatureFlag = iota // unused is used for testing - AllowWizards // Added! -) - -... - -var features = map[FeatureFlag]bool{ - unused: false, - AllowWizards: false, // Added! -} -``` - -```go -# sa/sa.go: - -struct Person { - HatSize int - IsWizard bool // Added! -} - -struct personModelv1 { - HatSize int -} - -// Added! -struct personModelv2 { - personModelv1 - IsWizard bool -} - -func (ssa *SQLStorageAuthority) GetPerson() (Person, error) { - if features.Enabled(features.AllowWizards) { // Added! - var model personModelv2 - ssa.dbMap.SelectOne(&model, "SELECT hatSize, isWizard FROM people") - return Person{ - HatSize: model.HatSize, - IsWizard: model.IsWizard, - } - } else { - var model personModelv1 - ssa.dbMap.SelectOne(&model, "SELECT hatSize FROM people") - return Person{ - HatSize: model.HatSize, - } - } -} - -func (ssa *SQLStorageAuthority) AddPerson(p Person) (error) { - if features.Enabled(features.AllowWizards) { // Added! - return ssa.dbMap.Insert(personModelv2{ - personModelv1: { - HatSize: p.HatSize, - }, - IsWizard: p.IsWizard, - }) - } else { - return ssa.dbMap.Insert(personModelv1{ - HatSize: p.HatSize, - // p.IsWizard ignored - }) - } -} -``` - -You will also need to update the `initTables` function from `sa/database.go` to -tell Gorp which table to use for your versioned model structs. Make sure to -consult the flag you defined so that only **one** of the table maps is added at -any given time, otherwise Gorp will error. Depending on your table you may also -need to add `SetKeys` and `SetVersionCol` entries for your versioned models. -Example: - -```go -func initTables(dbMap *gorp.DbMap) { - // < unrelated lines snipped for brevity > - - if features.Enabled(features.AllowWizards) { - dbMap.AddTableWithName(personModelv2, "person") - } else { - dbMap.AddTableWithName(personModelv1, "person") - } -} -``` - -You can then add a migration with: - -`$ goose -path ./sa/_db/ create AddWizards sql` - -Finally, edit the resulting file -(`sa/_db/migrations/20160915101011_AddWizards.sql`) to define your migration: - -```mysql --- +goose Up -ALTER TABLE people ADD isWizard BOOLEAN SET DEFAULT false; - --- +goose Down -ALTER TABLE people DROP isWizard BOOLEAN SET DEFAULT false; -``` - -# Release Process - -The current Boulder release process is described in the [boulder release process -repository](https://github.com/letsencrypt/boulder-release-process). It includes -[an example](https://github.com/letsencrypt/boulder-release-process#example) git -history showing a regular release being tagged, a hotfix being tagged from -a clean main, and a hotfix being tagged from a release branch because main -was dirty. - -Previously we used dedicated -[`staging`](https://github.com/letsencrypt/boulder/tree/staging) and -[`release`](https://github.com/letsencrypt/boulder/tree/release) branches. This -had several downsides and we frequently forgot to merge staging to release once -code had been shipped to production. We do not use the `staging` and `release` -branches anymore. Releases tagged from prior to Feb 1st 2017 are also outdated -artifacts of old process (e.g. the -[`hotfixes-2017-02-01`](https://github.com/letsencrypt/boulder/releases/tag/hotfixes%2F2017-02-01) -tag). - -# Dependencies - -We use [go modules](https://github.com/golang/go/wiki/Modules) and vendor our -dependencies. As of Go 1.12, this may require setting the GO111MODULE=on and -GOFLAGS=-mod=vendor environment variables. Inside the Docker containers for -Boulder tests, these variables are set for you, but if you ever work outside -those containers you will want to set them yourself. - -To add a dependency, add the import statement to your .go file, then run -`go build` on it. This will automatically add the dependency to go.mod. Next, -run `go mod vendor && git add vendor/` to save a copy in the vendor folder. - -When vendorizing dependencies, it's important to make sure tests pass on the -version you are vendorizing. Currently we enforce this by requiring that pull -requests containing a dependency update to any version other than a tagged -release include a comment indicating that you ran the tests and that they -succeeded, preferably with the command line you run them with. Note that you -may have to get a separate checkout of the dependency (using `go get` outside -of the boulder repository) in order to run its tests, as some vendored -modules do not bring their tests with them. - -## Updating Dependencies - -To upgrade a dependency, [see the Go -docs](https://github.com/golang/go/wiki/Modules#how-to-upgrade-and-downgrade-dependencies). -Typically you want `go get ` rather than `go get -u -`, which can introduce a lot of unexpected updates. After running -`go get`, make sure to run `go mod vendor && git add vendor/` to update the -vendor directory. If you forget, CI tests will catch this. - -If you are updating a dependency to a version which is not a tagged release, -see the note above about how to run all of a dependency's tests and note that -you have done so in the PR. - -Note that updating dependencies can introduce new, transitive dependencies. In -general we try to keep our dependencies as narrow as possible in order to -minimize the number of people and organizations whose code we need to trust. -As a rule of thumb: If an update introduces new packages or modules that are -inside a repository where we already depend on other packages or modules, it's -not a big deal. If it introduces a new dependency in a different repository, -please try to figure out where that dependency came from and why (for instance: -"package X, which we depend on, started supporting XML config files, so now we -depend on an XML parser") and include that in the PR description. When there are -a large number of new dependencies introduced, and we don't need the -functionality they provide, we should consider asking the relevant upstream -repository for a refactoring to reduce the number of transitive dependencies. - -# Go Version - -The [Boulder development -environment](https://github.com/letsencrypt/boulder/blob/main/README.md#setting-up-boulder) -does not use the Go version installed on the host machine, and instead uses a -Go environment baked into a "boulder-tools" Docker image. We build a separate -boulder-tools container for each supported Go version. Please see [the -Boulder-tools -README](https://github.com/letsencrypt/boulder/blob/main/test/boulder-tools/README.md) -for more information on upgrading Go versions. - -# ACME Protocol Divergences - -While Boulder attempts to implement the ACME specification as strictly as -possible there are places at which we will diverge from the letter of the -specification for various reasons. We detail these divergences (for both the -V1 and V2 API) in the [ACME divergences -doc](https://github.com/letsencrypt/boulder/blob/main/docs/acme-divergences.md). - -# ACME Protocol Implementation Details - -The ACME specification allows developers to make certain decisions as to how -various elements in the RFC are implemented. Some of these fully conformant -decisions are listed in [ACME implementation details -doc](https://github.com/letsencrypt/boulder/blob/main/docs/acme-implementation_details.md). - -## Problems or questions? - -The best place to ask dev related questions is on the [Community -Forums](https://community.letsencrypt.org/). diff --git a/Containerfile b/Containerfile new file mode 100644 index 00000000000..1baa0ac59f3 --- /dev/null +++ b/Containerfile @@ -0,0 +1,56 @@ +# This builds Boulder in a Docker container, then creates an image +# containing just the built Boulder binaries plus some ancillary +# files that are useful for predeployment testing. +FROM docker.io/ubuntu:24.04 AS builder + +ARG COMMIT_ID +ARG GO_VERSION +ARG VERSION + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get --assume-yes --no-install-recommends --update install \ + ca-certificates curl gcc git gnupg2 libc6-dev + +COPY tools/fetch-and-verify-go.sh /tmp +RUN /tmp/fetch-and-verify-go.sh ${GO_VERSION} +RUN tar -C /opt -xzf go.tar.gz +ENV PATH="/opt/go/bin:${PATH}" + +COPY . /opt/boulder +WORKDIR /opt/boulder + +ENV GOBIN=/opt/boulder/bin/ +RUN go install \ + -buildvcs=false \ + -ldflags="-X \"github.com/letsencrypt/boulder/core.BuildID=${COMMIT_ID}\" -X \"github.com/letsencrypt/boulder/core.BuildTime=$(date -u)\"" \ + -mod=vendor \ + ./... + +FROM docker.io/ubuntu:24.04 + +ARG VERSION + +LABEL org.opencontainers.image.authors="Internet Security Research Group, https://letsencrypt.org/" +LABEL org.opencontainers.image.created="$(date -u +%Y-%m-%dT%H:%M:%SZ)" +LABEL org.opencontainers.image.description="Boulder is an ACME-compatible X.509 Certificate Authority" +LABEL org.opencontainers.image.documentation="https://github.com/letsencrypt/boulder" +LABEL org.opencontainers.image.licenses="MPL-2.0" +LABEL org.opencontainers.image.source="https://github.com/letsencrypt/boulder" +LABEL org.opencontainers.image.title="Boulder" +LABEL org.opencontainers.image.url="https://github.com/letsencrypt/boulder" +LABEL org.opencontainers.image.vendor="Internet Security Research Group" +LABEL org.opencontainers.image.version="${VERSION}" + +COPY --from=builder \ + /opt/boulder/bin/admin \ + /opt/boulder/bin/boulder \ + /opt/boulder/bin/chall-test-srv \ + /opt/boulder/bin/ct-test-srv \ + /opt/boulder/bin/salesforce-test-srv \ + /opt/boulder/bin/zendesk-test-srv \ + /opt/boulder/bin/ +COPY --from=builder /opt/boulder/data /opt/boulder/data +COPY --from=builder /opt/boulder/sa/db /opt/boulder/sa/db +COPY --from=builder /opt/boulder/test/config /opt/boulder/test/config + +ENV PATH="/opt/boulder/bin:${PATH}" diff --git a/Makefile b/Makefile index 6427367f670..cb38dc8a95e 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,4 @@ +# TODO(#8338): Remove Makefile OBJDIR ?= $(shell pwd)/bin DESTDIR ?= /usr/local/bin ARCHIVEDIR ?= /tmp @@ -6,9 +7,10 @@ VERSION ?= 1.0.0 EPOCH ?= 1 MAINTAINER ?= "Community" -CMDS = $(shell find ./cmd -maxdepth 1 -mindepth 1 -type d | grep -v testdata) -CMD_BASENAMES = $(shell echo $(CMDS) | xargs -n1 basename) -CMD_BINS = $(addprefix bin/, $(CMD_BASENAMES) ) +# TODO(#8410): Remove pardot-test-srv when we've fully migrated to +# salesforce-test-srv. +CMDS = admin boulder ceremony ct-test-srv salesforce-test-srv pardot-test-srv chall-test-srv zendesk-test-srv +CMD_BINS = $(addprefix bin/, $(CMDS) ) OBJECTS = $(CMD_BINS) # Build environment variables (referencing core/util.go) @@ -25,7 +27,7 @@ BUILD_TIME_VAR = github.com/letsencrypt/boulder/core.BuildTime GO_BUILD_FLAGS = -ldflags "-X \"$(BUILD_ID_VAR)=$(BUILD_ID)\" -X \"$(BUILD_TIME_VAR)=$(BUILD_TIME)\" -X \"$(BUILD_HOST_VAR)=$(BUILD_HOST)\"" -.PHONY: all build +.PHONY: all build build_cmds deb tar all: build build: $(OBJECTS) @@ -35,27 +37,21 @@ $(OBJDIR): $(CMD_BINS): build_cmds +# TODO(#8410): Remove bin/pardot-test-srv when we've fully migrated to +# salesforce-test-srv. +bin/pardot-test-srv: bin/salesforce-test-srv + cp bin/salesforce-test-srv $@ + build_cmds: | $(OBJDIR) echo $(OBJECTS) - GOBIN=$(OBJDIR) GO111MODULE=on go install -mod=vendor $(GO_BUILD_FLAGS) ./... - ./link.sh + GOBIN=$(OBJDIR) go install -mod=vendor $(GO_BUILD_FLAGS) ./... -# Building an RPM requires `fpm` from https://github.com/jordansissel/fpm +# Building a .deb requires `fpm` from https://github.com/jordansissel/fpm # which you can install with `gem install fpm`. # It is recommended that maintainers use environment overrides to specify # Version and Epoch, such as: # -# VERSION=0.1.9 EPOCH=52 MAINTAINER="$(whoami)" ARCHIVEDIR=/tmp make build rpm -rpm: build - fpm -f -s dir -t rpm --rpm-digest sha256 --name "boulder" \ - --license "Mozilla Public License v2.0" --vendor "ISRG" \ - --url "https://github.com/letsencrypt/boulder" --prefix=/opt/boulder \ - --version "$(VERSION)" --iteration "$(COMMIT_ID)" --epoch "$(EPOCH)" \ - --package "$(ARCHIVEDIR)/boulder-$(VERSION)-$(COMMIT_ID).x86_64.rpm" \ - --description "Boulder is an ACME-compatible X.509 Certificate Authority" \ - --maintainer "$(MAINTAINER)" \ - test/config/ sa/_db data/ $(OBJECTS) - +# VERSION=0.1.9 EPOCH=52 MAINTAINER="$(whoami)" ARCHIVEDIR=/tmp make build deb deb: build fpm -f -s dir -t deb --name "boulder" \ --license "Mozilla Public License v2.0" --vendor "ISRG" \ @@ -64,4 +60,10 @@ deb: build --package "$(ARCHIVEDIR)/boulder-$(VERSION)-$(COMMIT_ID).x86_64.deb" \ --description "Boulder is an ACME-compatible X.509 Certificate Authority" \ --maintainer "$(MAINTAINER)" \ - test/config/ sa/_db data/ $(OBJECTS) bin/ct-test-srv + test/config/ sa/db data/ $(OBJECTS) + +tar: build + fpm -f -s dir -t tar --name "boulder" --prefix=/opt/boulder \ + --package "$(ARCHIVEDIR)/boulder-$(VERSION)-$(COMMIT_ID).amd64.tar" \ + test/config/ sa/db data/ $(OBJECTS) + gzip -f "$(ARCHIVEDIR)/boulder-$(VERSION)-$(COMMIT_ID).amd64.tar" diff --git a/README.md b/README.md index df3140d68f1..27cb63eecde 100644 --- a/README.md +++ b/README.md @@ -3,10 +3,10 @@ [![Build Status](https://github.com/letsencrypt/boulder/actions/workflows/boulder-ci.yml/badge.svg?branch=main)](https://github.com/letsencrypt/boulder/actions/workflows/boulder-ci.yml?query=branch%3Amain) This is an implementation of an ACME-based CA. The [ACME -protocol](https://github.com/ietf-wg-acme/acme/) allows the CA to -automatically verify that an applicant for a certificate actually controls an -identifier, and allows domain holders to issue and revoke certificates for -their domains. Boulder is the software that runs [Let's +protocol](https://github.com/ietf-wg-acme/acme/) allows the CA to automatically +verify that an applicant for a certificate actually controls an identifier, and +allows subscribers to issue and revoke certificates for the identifiers they +control. Boulder is the software that runs [Let's Encrypt](https://letsencrypt.org). ## Contents @@ -30,11 +30,10 @@ Boulder is divided into the following main components: 4. Certificate Authority 5. Storage Authority 6. Publisher -7. OCSP Updater -8. OCSP Responder +7. CRL Updater This component model lets us separate the function of the CA by security -context. The Web Front End, Validation Authority, OCSP Responder and +context. The Web Front End, Validation Authority, CRL Storer, and Publisher need access to the Internet, which puts them at greater risk of compromise. The Registration Authority can live without Internet connectivity, but still needs to talk to the Web Front End and Validation @@ -43,35 +42,21 @@ Registration Authority. All components talk to the SA for storage, so most lines indicating SA RPCs are not shown here. ```text - +--------- OCSP Updater - | | - v | - CA -> Publisher | - ^ | - | v + CA ---------> Publisher + ^ + | Subscriber -> WFE --> RA --> SA --> MariaDB | ^ Subscriber server <- VA <----+ | | - Browser ------------------> OCSP Responder - + Browser -----> S3 <----- CRL Storer/Updater ``` Internally, the logic of the system is based around five types of objects: -accounts, authorizations, challenges, orders (for ACME v2) and certificates, -mapping directly to the resources of the same name in ACME. - -We run two Web Front Ends, one for each ACME API version. Only the front end -components differentiate between API version. Requests from ACME clients -result in new objects and changes to objects. The Storage Authority maintains -persistent copies of the current set of objects. - -Objects are also passed from one component to another on change events. For -example, when a client provides a successful response to a validation -challenge, it results in a change to the corresponding validation object. The -Validation Authority forwards the new validation object to the Storage -Authority for storage, and to the Registration Authority for any updates to a -related Authorization object. +accounts, authorizations, challenges, orders and certificates, mapping directly +to the resources of the same name in ACME. Requests from ACME clients result in +new objects and changes to objects. The Storage Authority maintains persistent +copies of the current set of objects. Boulder uses gRPC for inter-component communication. For components that you want to be remote, it is necessary to instantiate a "client" and "server" for @@ -118,51 +103,71 @@ We recommend having **at least 2GB of RAM** available on your Docker host. In practice using less RAM may result in the MariaDB container failing in non-obvious ways. -To start Boulder in a Docker container, run: - -```shell -docker-compose up -``` - To run our standard battery of tests (lints, unit, integration): ```shell -docker-compose run --use-aliases boulder ./test.sh +./t.sh ``` To run all unit tests: ```shell -docker-compose run --use-aliases boulder ./test.sh --unit +./t.sh -u ``` To run specific unit tests (example is of the ./va directory): ```shell -docker-compose run --use-aliases boulder ./test.sh --unit --filter=./va +./t.sh -u -p ./va ``` To run all integration tests: ```shell -docker-compose run --use-aliases boulder ./test.sh --integration +./t.sh -i +``` + +To run unit tests and integration tests with coverage: + +```shell +./t.sh -ui -c --coverage-dir=./test/coverage/mytestrun +``` + +To run specific integration tests (example runs TestGenerateValidity and TestWFECORS): + +```shell +./t.sh -i -f TestGenerateValidity/TestWFECORS ``` -To run specific integration tests (example runs TestAkamaiPurgerDrainQueueFails and TestWFECORS): +To do any of the above, but using the "config-next" configuration, which +represents a likely future state (e.g. including new feature flags): ```shell -docker-compose run --use-aliases boulder ./test.sh --filter TestAkamaiPurgerDrainQueueFails/TestWFECORS +./tn.sh -your -options -here ``` -To get a list of available integration tests: +To start Boulder in a Docker container, first run: ```shell -docker-compose run --use-aliases boulder ./test.sh --list-integration-tests +docker compose run bsetup ``` +this will write the necessary certificates into `test/certs/[.softhsm-tokens,ipki,webpki]`; +You only need to run this once to create the certificates. If you +need to remove all of the certificates and start over, you can remove +the directories `./test/certs/.softhsm-tokens`, `./test/certs/ipki`, +and `./test/certs/webpki` and re-run `docker compose run bsetup`. + +Then run: + +```shell +docker compose up +``` + + The configuration in docker-compose.yml mounts your boulder checkout at /boulder so you can edit code on your host and it will be immediately -reflected inside the Docker containers run with docker-compose. +reflected inside the Docker containers run with `docker compose`. If you have problems with Docker, you may want to try [removing all containers and @@ -181,44 +186,44 @@ And edit docker-compose.yml to change the `FAKE_DNS` environment variable to match. This will cause Boulder's stubbed-out DNS resolver (`sd-test-srv`) to respond to all A queries with the address in `FAKE_DNS`. +If you use a host-based firewall (e.g. `ufw` or `iptables`) make sure you allow +connections from the Docker instance to your host on the required validation +ports to your ACME client. + Alternatively, you can override the docker-compose.yml default with an environmental variable using -e (replace 172.17.0.1 with the host IPv4 address found in the command above) ```shell -docker-compose run --use-aliases -e FAKE_DNS=172.17.0.1 --service-ports boulder ./start.py +docker compose run --use-aliases -e FAKE_DNS=172.17.0.1 --service-ports boulder ./start.py ``` Running tests without the `./test.sh` wrapper: -Run all unit tests +Run unit tests locally, without docker (only works for some directories): ```shell -docker-compose run --use-aliases boulder go test -p 1 ./... +go test ./issuance/... +``` + +Run all unit tests: + +```shell +docker compose run --use-aliases boulder go test -p 1 ./... ``` Run unit tests for a specific directory: ```shell -docker-compose run --use-aliases boulder go test +docker compose run --use-aliases boulder go test ``` Run integration tests (omit `--filter ` to run all): ```shell -docker-compose run --use-aliases boulder python3 test/integration-test.py --chisel --gotest --filter +docker compose run --use-aliases boulder python3 test/integration-test.py --chisel --gotest --filter ``` -Boulder's default VA configuration (`test/config/va.json`) is configured to -connect to port 5002 to validate HTTP-01 challenges and port 5001 to validate -TLS-ALPN-01 challenges. If you want to solve challenges with a client running -on your host you should make sure it uses these ports to respond to -validation requests, or update the VA configuration's `portConfig` to use -ports 80 and 443 to match how the VA operates in production and staging -environments. If you use a host-based firewall (e.g. `ufw` or `iptables`) -make sure you allow connections from the Docker instance to your host on the -required ports. - ### Working with Certbot Check out the Certbot client from https://github.com/certbot/certbot and @@ -249,8 +254,8 @@ the following URLs: To access the HTTPS versions of the endpoints you will need to configure your ACME client software to use a CA truststore that contains the -`test/wfe-tls/minica.pem` CA certificate. See -[`test/PKI.md`](https://github.com/letsencrypt/boulder/blob/main/test/PKI.md) +`test/certs/ipki/minica.pem` CA certificate. See +[`test/certs/README.md`](https://github.com/letsencrypt/boulder/blob/main/test/certs/README.md) for more information. Your local Boulder instance uses a fake DNS resolver that returns 127.0.0.1 @@ -259,10 +264,7 @@ resolved to your localhost. To return an answer other than `127.0.0.1` change the Boulder `FAKE_DNS` environment variable to another IP address. Most often you will want to configure `FAKE_DNS` to point to your host -machine where you run an ACME client. Remember to also configure the ACME -client to use ports 5002 and 5001 instead of 80 and 443 for HTTP-01 and -TLS-ALPN-01 challenge servers (or customize the Boulder VA configuration to -match your port choices). +machine where you run an ACME client. ### Production @@ -271,8 +273,7 @@ Web PKI and the CA/Browser forum's baseline requirements. In our experience often Boulder is not the right fit for organizations that are evaluating it for production usage. In most cases a centrally managed PKI that doesn't require domain-authorization with ACME is a better choice. For this environment we -recommend evaluating [cfssl](https://github.com/cloudflare/cfssl) or a project -other than Boulder. +recommend evaluating a project other than Boulder. We offer a brief [deployment and implementation guide](https://github.com/letsencrypt/boulder/wiki/Deployment-&-Implementation-Guide) @@ -292,10 +293,16 @@ fit we're happy to answer questions to the best of our ability. ## Contributing Please take a look at -[CONTRIBUTING.md](https://github.com/letsencrypt/boulder/blob/main/CONTRIBUTING.md) +[CONTRIBUTING.md](https://github.com/letsencrypt/boulder/blob/main/docs/CONTRIBUTING.md) for our guidelines on submitting patches, code review process, code of conduct, and various other tips related to working on the codebase. +## Code of Conduct + +The code of conduct for everyone participating in this community in any capacity +is available for reference +[on the community forum](https://community.letsencrypt.org/guidelines). + ## License This project is licensed under the Mozilla Public License 2.0, the full text diff --git a/akamai/cache-client.go b/akamai/cache-client.go deleted file mode 100644 index df971801e58..00000000000 --- a/akamai/cache-client.go +++ /dev/null @@ -1,434 +0,0 @@ -package akamai - -import ( - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/sha256" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/core" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" -) - -const ( - timestampFormat = "20060102T15:04:05-0700" - v3PurgePath = "/ccu/v3/delete/url/" - v3PurgeTagPath = "/ccu/v3/delete/tag/" -) - -var ( - // ErrAllRetriesFailed indicates that all purge submission attempts have - // failed. - ErrAllRetriesFailed = errors.New("all attempts to submit purge request failed") - - // errFatal is returned by the purge method of CachePurgeClient to indicate - // that it failed for a reason that cannot be remediated by retrying the - // request. - errFatal = errors.New("fatal error") -) - -type v3PurgeRequest struct { - Objects []string `json:"objects"` -} - -type purgeResponse struct { - HTTPStatus int `json:"httpStatus"` - Detail string `json:"detail"` - EstimatedSeconds int `json:"estimatedSeconds"` - PurgeID string `json:"purgeId"` -} - -// CachePurgeClient talks to the Akamai CCU REST API. It is safe to make -// concurrent requests using this client. -type CachePurgeClient struct { - client *http.Client - apiEndpoint string - apiHost string - apiScheme string - clientToken string - clientSecret string - accessToken string - v3Network string - purgeBatchInterval time.Duration - queueEntriesPerBatch int - retries int - retryBackoff time.Duration - log blog.Logger - purgeLatency prometheus.Histogram - purges *prometheus.CounterVec - clk clock.Clock -} - -// NewCachePurgeClient performs some basic validation of supplied configuration -// and returns a newly constructed CachePurgeClient. -func NewCachePurgeClient( - baseURL, - clientToken, - secret, - accessToken, - network string, - purgeBatchInterval time.Duration, - queueEntriesPerBatch, - retries int, - retryBackoff time.Duration, - log blog.Logger, scope prometheus.Registerer, -) (*CachePurgeClient, error) { - if network != "production" && network != "staging" { - return nil, fmt.Errorf("'V3Network' must be \"staging\" or \"production\", got %q", network) - } - - endpoint, err := url.Parse(strings.TrimSuffix(baseURL, "/")) - if err != nil { - return nil, fmt.Errorf("failed to parse 'BaseURL' as a URL: %s", err) - } - - purgeLatency := prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "ccu_purge_latency", - Help: "Histogram of latencies of CCU purges", - Buckets: metrics.InternetFacingBuckets, - }) - scope.MustRegister(purgeLatency) - - purges := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "ccu_purges", - Help: "A counter of CCU purges labelled by the result", - }, []string{"type"}) - scope.MustRegister(purges) - - return &CachePurgeClient{ - client: new(http.Client), - apiEndpoint: endpoint.String(), - apiHost: endpoint.Host, - apiScheme: strings.ToLower(endpoint.Scheme), - clientToken: clientToken, - clientSecret: secret, - accessToken: accessToken, - v3Network: network, - purgeBatchInterval: purgeBatchInterval, - queueEntriesPerBatch: queueEntriesPerBatch, - retries: retries, - retryBackoff: retryBackoff, - log: log, - clk: clock.New(), - purgeLatency: purgeLatency, - purges: purges, - }, nil -} - -// makeAuthHeader constructs a special Akamai authorization header. This header -// is used to identify clients to Akamai's EdgeGrid APIs. For a more detailed -// description of the generation process see their docs: -// https://developer.akamai.com/introduction/Client_Auth.html -func (cpc *CachePurgeClient) makeAuthHeader(body []byte, apiPath string, nonce string) (string, error) { - // The akamai API is very time sensitive (recommending reliance on a stratum 2 - // or better time source). Additionally, timestamps MUST be in UTC. - timestamp := cpc.clk.Now().UTC().Format(timestampFormat) - header := fmt.Sprintf( - "EG1-HMAC-SHA256 client_token=%s;access_token=%s;timestamp=%s;nonce=%s;", - cpc.clientToken, - cpc.accessToken, - timestamp, - nonce, - ) - bodyHash := sha256.Sum256(body) - tbs := fmt.Sprintf( - "%s\t%s\t%s\t%s\t%s\t%s\t%s", - "POST", - cpc.apiScheme, - cpc.apiHost, - apiPath, - // Signed headers are not required for this request type. - "", - base64.StdEncoding.EncodeToString(bodyHash[:]), - header, - ) - cpc.log.Debugf("To-be-signed Akamai EdgeGrid authentication %q", tbs) - - h := hmac.New(sha256.New, signingKey(cpc.clientSecret, timestamp)) - h.Write([]byte(tbs)) - return fmt.Sprintf( - "%ssignature=%s", - header, - base64.StdEncoding.EncodeToString(h.Sum(nil)), - ), nil -} - -// signingKey makes a signing key by HMAC'ing the timestamp -// using a client secret as the key. -func signingKey(clientSecret string, timestamp string) []byte { - h := hmac.New(sha256.New, []byte(clientSecret)) - h.Write([]byte(timestamp)) - key := make([]byte, base64.StdEncoding.EncodedLen(32)) - base64.StdEncoding.Encode(key, h.Sum(nil)) - return key -} - -// PurgeTags constructs and dispatches a request to purge a batch of Tags. -func (cpc *CachePurgeClient) PurgeTags(tags []string) error { - purgeReq := v3PurgeRequest{ - Objects: tags, - } - endpoint := fmt.Sprintf("%s%s%s", cpc.apiEndpoint, v3PurgeTagPath, cpc.v3Network) - return cpc.authedRequest(endpoint, purgeReq) -} - -// purgeURLs constructs and dispatches a request to purge a batch of URLs. -func (cpc *CachePurgeClient) purgeURLs(urls []string) error { - purgeReq := v3PurgeRequest{ - Objects: urls, - } - endpoint := fmt.Sprintf("%s%s%s", cpc.apiEndpoint, v3PurgePath, cpc.v3Network) - return cpc.authedRequest(endpoint, purgeReq) -} - -// authedRequest POSTs the JSON marshaled purge request to the provided endpoint -// along with an Akamai authorization header. -func (cpc *CachePurgeClient) authedRequest(endpoint string, body v3PurgeRequest) error { - reqBody, err := json.Marshal(body) - if err != nil { - return fmt.Errorf("%s: %w", err, errFatal) - } - - req, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(reqBody)) - if err != nil { - return fmt.Errorf("%s: %w", err, errFatal) - } - - endpointURL, err := url.Parse(endpoint) - if err != nil { - return fmt.Errorf("while parsing %q as URL: %s: %w", endpoint, err, errFatal) - } - - authorization, err := cpc.makeAuthHeader(reqBody, endpointURL.Path, core.RandomString(16)) - if err != nil { - return fmt.Errorf("%s: %w", err, errFatal) - } - req.Header.Set("Authorization", authorization) - req.Header.Set("Content-Type", "application/json") - cpc.log.Debugf("POSTing to endpoint %q (header %q) (body %q)", endpoint, authorization, reqBody) - - start := cpc.clk.Now() - resp, err := cpc.client.Do(req) - cpc.purgeLatency.Observe(cpc.clk.Since(start).Seconds()) - if err != nil { - return fmt.Errorf("while POSTing to endpoint %q: %w", endpointURL, err) - } - defer resp.Body.Close() - - if resp.Body == nil { - return fmt.Errorf("response body was empty from URL %q", resp.Request.URL) - } - - respBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - // Success for a request to purge a URL or Cache tag is 'HTTP 201'. - // https://techdocs.akamai.com/purge-cache/reference/delete-url - // https://techdocs.akamai.com/purge-cache/reference/delete-tag - if resp.StatusCode != http.StatusCreated { - switch resp.StatusCode { - // https://techdocs.akamai.com/purge-cache/reference/403 - case http.StatusForbidden: - return fmt.Errorf("client not authorized to make requests for URL %q: %w", resp.Request.URL, errFatal) - - // https://techdocs.akamai.com/purge-cache/reference/504 - case http.StatusGatewayTimeout: - return fmt.Errorf("server timed out, got HTTP %d (body %q) for URL %q", resp.StatusCode, respBody, resp.Request.URL) - - // https://techdocs.akamai.com/purge-cache/reference/429 - case http.StatusTooManyRequests: - return fmt.Errorf("exceeded request count rate limit, got HTTP %d (body %q) for URL %q", resp.StatusCode, respBody, resp.Request.URL) - - // https://techdocs.akamai.com/purge-cache/reference/413 - case http.StatusRequestEntityTooLarge: - return fmt.Errorf("exceeded request size rate limit, got HTTP %d (body %q) for URL %q", resp.StatusCode, respBody, resp.Request.URL) - default: - return fmt.Errorf("received HTTP %d (body %q) for URL %q", resp.StatusCode, respBody, resp.Request.URL) - } - } - - var purgeInfo purgeResponse - err = json.Unmarshal(respBody, &purgeInfo) - if err != nil { - return fmt.Errorf("while unmarshalling body %q from URL %q as JSON: %w", respBody, resp.Request.URL, err) - } - - // Ensure the unmarshaled body concurs with the status of the response - // received. - if purgeInfo.HTTPStatus != http.StatusCreated { - if purgeInfo.HTTPStatus == http.StatusForbidden { - return fmt.Errorf("client not authorized to make requests to URL %q: %w", resp.Request.URL, errFatal) - } - return fmt.Errorf("unmarshaled HTTP %d (body %q) from URL %q", purgeInfo.HTTPStatus, respBody, resp.Request.URL) - } - - cpc.log.AuditInfof("Purge request sent successfully (ID %s) (body %s). Purge expected in %ds", - purgeInfo.PurgeID, reqBody, purgeInfo.EstimatedSeconds) - return nil -} - -func (cpc *CachePurgeClient) purgeBatch(queueEntries [][]string) error { - var urls []string - for _, response := range queueEntries { - urls = append(urls, response...) - } - - successful := false - for i := 0; i <= cpc.retries; i++ { - cpc.clk.Sleep(core.RetryBackoff(i, cpc.retryBackoff, time.Minute, 1.3)) - - err := cpc.purgeURLs(urls) - if err != nil { - if errors.Is(err, errFatal) { - cpc.purges.WithLabelValues("fatal failure").Inc() - return err - } - cpc.log.AuditErrf("Akamai cache purge failed, retrying: %s", err) - cpc.purges.WithLabelValues("retryable failure").Inc() - continue - } - successful = true - break - } - - if !successful { - cpc.purges.WithLabelValues("fatal failure").Inc() - return ErrAllRetriesFailed - } - - cpc.purges.WithLabelValues("success").Inc() - return nil -} - -// Purge dispatches the provided queue entries in batched requests to the Akamai -// Fast-Purge API. Requests will be attempted cpc.retries number of times before -// giving up and returning ErrAllRetriesFailed and the beginning index position -// of the batch where the failure was encountered. -func (cpc *CachePurgeClient) Purge(queueEntries [][]string) (int, error) { - totalEntries := len(queueEntries) - for batchBegin := 0; batchBegin < totalEntries; { - batchEnd := batchBegin + cpc.queueEntriesPerBatch - if batchEnd > totalEntries { - // Avoid index out of range error. - batchEnd = totalEntries - } - - err := cpc.purgeBatch(queueEntries[batchBegin:batchEnd]) - if err != nil { - return batchBegin, err - } - batchBegin += cpc.queueEntriesPerBatch - } - return totalEntries, nil -} - -// CheckSignature is exported for use in tests and akamai-test-srv. -func CheckSignature(secret string, url string, r *http.Request, body []byte) error { - bodyHash := sha256.Sum256(body) - bodyHashB64 := base64.StdEncoding.EncodeToString(bodyHash[:]) - - authorization := r.Header.Get("Authorization") - authValues := make(map[string]string) - for _, v := range strings.Split(authorization, ";") { - splitValue := strings.Split(v, "=") - authValues[splitValue[0]] = splitValue[1] - } - headerTimestamp := authValues["timestamp"] - splitHeader := strings.Split(authorization, "signature=") - shortenedHeader, signature := splitHeader[0], splitHeader[1] - hostPort := strings.Split(url, "://")[1] - h := hmac.New(sha256.New, signingKey(secret, headerTimestamp)) - input := []byte(fmt.Sprintf("POST\thttp\t%s\t%s\t\t%s\t%s", - hostPort, - r.URL.Path, - bodyHashB64, - shortenedHeader, - )) - h.Write(input) - expectedSignature := base64.StdEncoding.EncodeToString(h.Sum(nil)) - if signature != expectedSignature { - return fmt.Errorf("expected signature %q, got %q in %q", - signature, authorization, expectedSignature) - } - return nil -} - -func reverseBytes(b []byte) []byte { - for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 { - b[i], b[j] = b[j], b[i] - } - return b -} - -// makeOCSPCacheURLs constructs the 3 URLs associated with each cached OCSP -// response. -func makeOCSPCacheURLs(req []byte, ocspServer string) []string { - hash := md5.Sum(req) - encReq := base64.StdEncoding.EncodeToString(req) - return []string{ - // POST Cache Key: the format of this entry is the URL that was POSTed - // to with a query string with the parameter 'body-md5' and the value of - // the first two uint32s in little endian order in hex of the MD5 hash - // of the OCSP request body. - // - // There is limited public documentation of this feature. However, this - // entry is what triggers the Akamai cache behavior that allows Akamai to - // identify POST based OCSP for purging. For more information, see: - // https://techdocs.akamai.com/property-mgr/reference/v2020-03-04-cachepost - // https://techdocs.akamai.com/property-mgr/docs/cache-post-responses - fmt.Sprintf("%s?body-md5=%x%x", ocspServer, reverseBytes(hash[0:4]), reverseBytes(hash[4:8])), - - // URL (un-encoded): RFC 2560 and RFC 5019 state OCSP GET URLs 'MUST - // properly url-encode the base64 encoded' request but a large enough - // portion of tools do not properly do this (~10% of GET requests we - // receive) such that we must purge both the encoded and un-encoded - // URLs. - // - // Due to Akamai proxy/cache behavior which collapses '//' -> '/' we also - // collapse double slashes in the un-encoded URL so that we properly purge - // what is stored in the cache. - fmt.Sprintf("%s%s", ocspServer, strings.Replace(encReq, "//", "/", -1)), - - // URL (encoded): this entry is the url-encoded GET URL used to request - // OCSP as specified in RFC 2560 and RFC 5019. - fmt.Sprintf("%s%s", ocspServer, url.QueryEscape(encReq)), - } -} - -// GeneratePurgeURLs generates akamai URLs that can be POSTed to in order to -// purge akamai's cache of the corresponding OCSP responses. The URLs encode -// the contents of the OCSP request, so this method constructs a full OCSP -// request. -func GeneratePurgeURLs(cert, issuer *x509.Certificate) ([]string, error) { - req, err := ocsp.CreateRequest(cert, issuer, nil) - if err != nil { - return nil, err - } - - // Create a GET and special Akamai POST style OCSP url for each endpoint in - // cert.OCSPServer. - urls := []string{} - for _, ocspServer := range cert.OCSPServer { - if !strings.HasSuffix(ocspServer, "/") { - ocspServer += "/" - } - urls = append(urls, makeOCSPCacheURLs(req, ocspServer)...) - } - return urls, nil -} diff --git a/akamai/cache-client_test.go b/akamai/cache-client_test.go deleted file mode 100644 index d82cbfd5256..00000000000 --- a/akamai/cache-client_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package akamai - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/jmhodges/clock" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/test" -) - -func TestMakeAuthHeader(t *testing.T) { - log := blog.NewMock() - stats := metrics.NoopRegisterer - cpc, err := NewCachePurgeClient( - "https://akaa-baseurl-xxxxxxxxxxx-xxxxxxxxxxxxx.luna.akamaiapis.net", - "akab-client-token-xxx-xxxxxxxxxxxxxxxx", - "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=", - "akab-access-token-xxx-xxxxxxxxxxxxxxxx", - "production", - time.Millisecond*32, - 0, - 2, - time.Second, - log, - stats, - ) - test.AssertNotError(t, err, "Failed to create cache purge client") - fc := clock.NewFake() - cpc.clk = fc - wantedTimestamp, err := time.Parse(timestampFormat, "20140321T19:34:21+0000") - test.AssertNotError(t, err, "Failed to parse timestamp") - fc.Set(wantedTimestamp) - - expectedHeader := "EG1-HMAC-SHA256 client_token=akab-client-token-xxx-xxxxxxxxxxxxxxxx;access_token=akab-access-token-xxx-xxxxxxxxxxxxxxxx;timestamp=20140321T19:34:21+0000;nonce=nonce-xx-xxxx-xxxx-xxxx-xxxxxxxxxxxx;signature=hXm4iCxtpN22m4cbZb4lVLW5rhX8Ca82vCFqXzSTPe4=" - authHeader, err := cpc.makeAuthHeader( - []byte("datadatadatadatadatadatadatadata"), - "/testapi/v1/t3", - "nonce-xx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", - ) - test.AssertNotError(t, err, "Failed to create authorization header") - test.AssertEquals(t, authHeader, expectedHeader) -} - -type akamaiServer struct { - responseCode int - *httptest.Server -} - -func (as *akamaiServer) sendResponse(w http.ResponseWriter, resp purgeResponse) { - respBytes, err := json.Marshal(resp) - if err != nil { - fmt.Printf("Failed to marshal response body: %s\n", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - w.WriteHeader(as.responseCode) - w.Write(respBytes) -} - -func (as *akamaiServer) purgeHandler(w http.ResponseWriter, r *http.Request) { - var req struct { - Objects []string - } - body, err := ioutil.ReadAll(r.Body) - if err != nil { - fmt.Printf("Failed to read request body: %s\n", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - err = CheckSignature("secret", as.URL, r, body) - if err != nil { - fmt.Printf("Error checking signature: %s\n", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - err = json.Unmarshal(body, &req) - if err != nil { - fmt.Printf("Failed to unmarshal request body: %s\n", err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - resp := purgeResponse{ - HTTPStatus: as.responseCode, - Detail: "?", - EstimatedSeconds: 10, - PurgeID: "?", - } - - fmt.Println(r.URL.Path, v3PurgePath) - if strings.HasPrefix(r.URL.Path, v3PurgePath) { - for _, testURL := range req.Objects { - if !strings.HasPrefix(testURL, "http://") { - resp.HTTPStatus = http.StatusForbidden - break - } - } - } - as.sendResponse(w, resp) -} -func newAkamaiServer(code int) *akamaiServer { - m := http.NewServeMux() - as := akamaiServer{ - responseCode: code, - Server: httptest.NewServer(m), - } - m.HandleFunc(v3PurgePath, as.purgeHandler) - m.HandleFunc(v3PurgeTagPath, as.purgeHandler) - return &as -} - -// TestV3Purge tests the Akamai CCU v3 purge API -func TestV3Purge(t *testing.T) { - as := newAkamaiServer(http.StatusCreated) - defer as.Close() - - // Client is a purge client with a "production" v3Network parameter - client, err := NewCachePurgeClient( - as.URL, - "token", - "secret", - "accessToken", - "production", - time.Millisecond*32, - 2, - 3, - time.Second, - blog.NewMock(), - metrics.NoopRegisterer, - ) - test.AssertNotError(t, err, "Failed to create CachePurgeClient") - fc := clock.NewFake() - client.clk = fc - - _, err = client.Purge([][]string{{"http://test.com"}}) - test.AssertNotError(t, err, "Purge failed; expected 201 response") - - started := client.clk.Now() - as.responseCode = http.StatusInternalServerError - _, err = client.Purge([][]string{{"http://test.com"}}) - test.AssertError(t, err, "Purge succeeded; expected 500 response") - t.Log(client.clk.Since(started)) - test.Assert(t, client.clk.Since(started) > (time.Second*4), "Retries should've taken at least 4.4 seconds") - - started = client.clk.Now() - as.responseCode = http.StatusCreated - _, err = client.Purge([][]string{{"http:/test.com"}}) - test.AssertError(t, err, "Purge succeeded; expected a 403 response from malformed URL") - test.Assert(t, client.clk.Since(started) < time.Second, "Purge should've failed out immediately") -} - -func TestPurgeTags(t *testing.T) { - as := newAkamaiServer(http.StatusCreated) - defer as.Close() - - // Client is a purge client with a "production" v3Network parameter - client, err := NewCachePurgeClient( - as.URL, - "token", - "secret", - "accessToken", - "production", - time.Millisecond*32, - 2, - 3, - time.Second, - blog.NewMock(), - metrics.NoopRegisterer, - ) - test.AssertNotError(t, err, "Failed to create CachePurgeClient") - fc := clock.NewFake() - client.clk = fc - - err = client.PurgeTags([]string{"ff"}) - test.AssertNotError(t, err, "Purge failed; expected response 201") - - as.responseCode = http.StatusForbidden - err = client.PurgeTags([]string{"http://test.com"}) - test.AssertError(t, err, "Purge succeeded; expected Forbidden response") -} - -func TestNewCachePurgeClient(t *testing.T) { - // Creating a new cache purge client with an invalid "network" parameter should error - _, err := NewCachePurgeClient( - "http://127.0.0.1:9000/", - "token", - "secret", - "accessToken", - "fake", - time.Millisecond*32, - 2, - 3, - time.Second, - blog.NewMock(), - metrics.NoopRegisterer, - ) - test.AssertError(t, err, "NewCachePurgeClient with invalid network parameter didn't error") - - // Creating a new cache purge client with a valid "network" parameter shouldn't error - _, err = NewCachePurgeClient( - "http://127.0.0.1:9000/", - "token", - "secret", - "accessToken", - "staging", - time.Millisecond*32, - 2, - 3, - time.Second, - blog.NewMock(), - metrics.NoopRegisterer, - ) - test.AssertNotError(t, err, "NewCachePurgeClient with valid network parameter errored") - - // Creating a new cache purge client with an invalid server URL parameter should error - _, err = NewCachePurgeClient( - "h&ttp://whatever", - "token", - "secret", - "accessToken", - "staging", - time.Millisecond*32, - 2, - 3, - time.Second, - blog.NewMock(), - metrics.NoopRegisterer, - ) - test.AssertError(t, err, "NewCachePurgeClient with invalid server url parameter didn't error") -} - -func TestBigBatchPurge(t *testing.T) { - log := blog.NewMock() - - as := newAkamaiServer(http.StatusCreated) - - client, err := NewCachePurgeClient( - as.URL, - "token", - "secret", - "accessToken", - "production", - time.Millisecond*32, - 2, - 3, - time.Second, - log, - metrics.NoopRegisterer, - ) - test.AssertNotError(t, err, "Failed to create CachePurgeClient") - - var queueEntries [][]string - for i := 0; i < 250; i++ { - queueEntries = append(queueEntries, []string{fmt.Sprintf("http://test.com/%d", i)}) - } - - stoppedAt, err := client.Purge(queueEntries) - test.AssertNotError(t, err, "Purge failed with 201 response") - test.AssertEquals(t, stoppedAt, 250) - - // Add an entry with a malformed URL. - entryWithMalformedURL := []string{"http:/test.com"} - queueEntries = append(queueEntries, entryWithMalformedURL) - - // Add 10 more valid entries. - for i := 0; i < 10; i++ { - queueEntries = append(queueEntries, []string{fmt.Sprintf("http://test.com/%d", i)}) - } - - // Should stop at URL entry 250 ('http:/test.com') of 261 as this is the - // batch that results in errFatal. - stoppedAt, err = client.Purge(queueEntries) - test.AssertError(t, err, "Purge succeeded with a malformed URL") - test.AssertErrorIs(t, err, errFatal) - test.AssertDeepEquals(t, queueEntries[stoppedAt], entryWithMalformedURL) - test.AssertEquals(t, stoppedAt, 250) -} - -func TestReverseBytes(t *testing.T) { - a := []byte{0, 1, 2, 3} - test.AssertDeepEquals(t, reverseBytes(a), []byte{3, 2, 1, 0}) -} - -func TestGenerateOCSPCacheKeys(t *testing.T) { - der := []byte{105, 239, 255} - test.AssertDeepEquals( - t, - makeOCSPCacheURLs(der, "ocsp.invalid/"), - []string{ - "ocsp.invalid/?body-md5=d6101198a9d9f1f6", - "ocsp.invalid/ae/", - "ocsp.invalid/ae%2F%2F", - }, - ) -} diff --git a/akamai/proto/akamai.pb.go b/akamai/proto/akamai.pb.go deleted file mode 100644 index af744fe1569..00000000000 --- a/akamai/proto/akamai.pb.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.6 -// source: akamai.proto - -package proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type PurgeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Urls []string `protobuf:"bytes,1,rep,name=urls,proto3" json:"urls,omitempty"` -} - -func (x *PurgeRequest) Reset() { - *x = PurgeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_akamai_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PurgeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PurgeRequest) ProtoMessage() {} - -func (x *PurgeRequest) ProtoReflect() protoreflect.Message { - mi := &file_akamai_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PurgeRequest.ProtoReflect.Descriptor instead. -func (*PurgeRequest) Descriptor() ([]byte, []int) { - return file_akamai_proto_rawDescGZIP(), []int{0} -} - -func (x *PurgeRequest) GetUrls() []string { - if x != nil { - return x.Urls - } - return nil -} - -var File_akamai_proto protoreflect.FileDescriptor - -var file_akamai_proto_rawDesc = []byte{ - 0x0a, 0x0c, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, - 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x69, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x22, 0x0a, 0x0c, 0x50, 0x75, 0x72, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x04, 0x75, 0x72, 0x6c, 0x73, 0x32, 0x47, 0x0a, 0x0c, 0x41, 0x6b, 0x61, 0x6d, 0x61, - 0x69, 0x50, 0x75, 0x72, 0x67, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x05, 0x50, 0x75, 0x72, 0x67, 0x65, - 0x12, 0x14, 0x2e, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x69, 0x2e, 0x50, 0x75, 0x72, 0x67, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, - 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, - 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, - 0x65, 0x72, 0x2f, 0x61, 0x6b, 0x61, 0x6d, 0x61, 0x69, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_akamai_proto_rawDescOnce sync.Once - file_akamai_proto_rawDescData = file_akamai_proto_rawDesc -) - -func file_akamai_proto_rawDescGZIP() []byte { - file_akamai_proto_rawDescOnce.Do(func() { - file_akamai_proto_rawDescData = protoimpl.X.CompressGZIP(file_akamai_proto_rawDescData) - }) - return file_akamai_proto_rawDescData -} - -var file_akamai_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_akamai_proto_goTypes = []interface{}{ - (*PurgeRequest)(nil), // 0: akamai.PurgeRequest - (*emptypb.Empty)(nil), // 1: google.protobuf.Empty -} -var file_akamai_proto_depIdxs = []int32{ - 0, // 0: akamai.AkamaiPurger.Purge:input_type -> akamai.PurgeRequest - 1, // 1: akamai.AkamaiPurger.Purge:output_type -> google.protobuf.Empty - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_akamai_proto_init() } -func file_akamai_proto_init() { - if File_akamai_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_akamai_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PurgeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_akamai_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_akamai_proto_goTypes, - DependencyIndexes: file_akamai_proto_depIdxs, - MessageInfos: file_akamai_proto_msgTypes, - }.Build() - File_akamai_proto = out.File - file_akamai_proto_rawDesc = nil - file_akamai_proto_goTypes = nil - file_akamai_proto_depIdxs = nil -} diff --git a/akamai/proto/akamai.proto b/akamai/proto/akamai.proto deleted file mode 100644 index 7294ed1f10b..00000000000 --- a/akamai/proto/akamai.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; - -package akamai; -option go_package = "github.com/letsencrypt/boulder/akamai/proto"; - -import "google/protobuf/empty.proto"; - -service AkamaiPurger { - rpc Purge(PurgeRequest) returns (google.protobuf.Empty) {} -} - -message PurgeRequest { - repeated string urls = 1; -} diff --git a/akamai/proto/akamai_grpc.pb.go b/akamai/proto/akamai_grpc.pb.go deleted file mode 100644 index 94659a5e8a1..00000000000 --- a/akamai/proto/akamai_grpc.pb.go +++ /dev/null @@ -1,102 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package proto - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// AkamaiPurgerClient is the client API for AkamaiPurger service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type AkamaiPurgerClient interface { - Purge(ctx context.Context, in *PurgeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) -} - -type akamaiPurgerClient struct { - cc grpc.ClientConnInterface -} - -func NewAkamaiPurgerClient(cc grpc.ClientConnInterface) AkamaiPurgerClient { - return &akamaiPurgerClient{cc} -} - -func (c *akamaiPurgerClient) Purge(ctx context.Context, in *PurgeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/akamai.AkamaiPurger/Purge", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// AkamaiPurgerServer is the server API for AkamaiPurger service. -// All implementations must embed UnimplementedAkamaiPurgerServer -// for forward compatibility -type AkamaiPurgerServer interface { - Purge(context.Context, *PurgeRequest) (*emptypb.Empty, error) - mustEmbedUnimplementedAkamaiPurgerServer() -} - -// UnimplementedAkamaiPurgerServer must be embedded to have forward compatible implementations. -type UnimplementedAkamaiPurgerServer struct { -} - -func (UnimplementedAkamaiPurgerServer) Purge(context.Context, *PurgeRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Purge not implemented") -} -func (UnimplementedAkamaiPurgerServer) mustEmbedUnimplementedAkamaiPurgerServer() {} - -// UnsafeAkamaiPurgerServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to AkamaiPurgerServer will -// result in compilation errors. -type UnsafeAkamaiPurgerServer interface { - mustEmbedUnimplementedAkamaiPurgerServer() -} - -func RegisterAkamaiPurgerServer(s grpc.ServiceRegistrar, srv AkamaiPurgerServer) { - s.RegisterService(&AkamaiPurger_ServiceDesc, srv) -} - -func _AkamaiPurger_Purge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PurgeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AkamaiPurgerServer).Purge(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akamai.AkamaiPurger/Purge", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AkamaiPurgerServer).Purge(ctx, req.(*PurgeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// AkamaiPurger_ServiceDesc is the grpc.ServiceDesc for AkamaiPurger service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var AkamaiPurger_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "akamai.AkamaiPurger", - HandlerType: (*AkamaiPurgerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Purge", - Handler: _AkamaiPurger_Purge_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akamai.proto", -} diff --git a/allowlist/main.go b/allowlist/main.go new file mode 100644 index 00000000000..b7a0e5c3557 --- /dev/null +++ b/allowlist/main.go @@ -0,0 +1,43 @@ +package allowlist + +import ( + "github.com/letsencrypt/boulder/strictyaml" +) + +// List holds a unique collection of items of type T. Membership can be checked +// by calling the Contains method. +type List[T comparable] struct { + members map[T]struct{} +} + +// NewList returns a *List[T] populated with the provided members of type T. All +// duplicate entries are ignored, ensuring uniqueness. +func NewList[T comparable](members []T) *List[T] { + l := &List[T]{members: make(map[T]struct{})} + for _, m := range members { + l.members[m] = struct{}{} + } + return l +} + +// NewFromYAML reads a YAML sequence of values of type T and returns a *List[T] +// containing those values. If data is empty, an empty (deny all) list is +// returned. If data cannot be parsed, an error is returned. +func NewFromYAML[T comparable](data []byte) (*List[T], error) { + if len(data) == 0 { + return NewList([]T{}), nil + } + + var entries []T + err := strictyaml.Unmarshal(data, &entries) + if err != nil { + return nil, err + } + return NewList(entries), nil +} + +// Contains reports whether the provided entry is a member of the list. +func (l *List[T]) Contains(entry T) bool { + _, ok := l.members[entry] + return ok +} diff --git a/allowlist/main_test.go b/allowlist/main_test.go new file mode 100644 index 00000000000..97bef54cbb0 --- /dev/null +++ b/allowlist/main_test.go @@ -0,0 +1,109 @@ +package allowlist + +import ( + "testing" +) + +func TestNewFromYAML(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + yamlData string + check []string + expectAnswers []bool + expectErr bool + }{ + { + name: "valid YAML", + yamlData: "- oak\n- maple\n- cherry", + check: []string{"oak", "walnut", "maple", "cherry"}, + expectAnswers: []bool{true, false, true, true}, + expectErr: false, + }, + { + name: "empty YAML", + yamlData: "", + check: []string{"oak", "walnut", "maple", "cherry"}, + expectAnswers: []bool{false, false, false, false}, + expectErr: false, + }, + { + name: "invalid YAML", + yamlData: "{ invalid_yaml", + check: []string{}, + expectAnswers: []bool{}, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + list, err := NewFromYAML[string]([]byte(tt.yamlData)) + if (err != nil) != tt.expectErr { + t.Fatalf("NewFromYAML() error = %v, expectErr = %v", err, tt.expectErr) + } + + if err == nil { + for i, item := range tt.check { + got := list.Contains(item) + if got != tt.expectAnswers[i] { + t.Errorf("Contains(%q) got %v, want %v", item, got, tt.expectAnswers[i]) + } + } + } + }) + } +} + +func TestNewList(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + members []string + check []string + expectAnswers []bool + }{ + { + name: "unique members", + members: []string{"oak", "maple", "cherry"}, + check: []string{"oak", "walnut", "maple", "cherry"}, + expectAnswers: []bool{true, false, true, true}, + }, + { + name: "duplicate members", + members: []string{"oak", "maple", "cherry", "oak"}, + check: []string{"oak", "walnut", "maple", "cherry"}, + expectAnswers: []bool{true, false, true, true}, + }, + { + name: "nil list", + members: nil, + check: []string{"oak", "walnut", "maple", "cherry"}, + expectAnswers: []bool{false, false, false, false}, + }, + { + name: "empty list", + members: []string{}, + check: []string{"oak", "walnut", "maple", "cherry"}, + expectAnswers: []bool{false, false, false, false}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + list := NewList[string](tt.members) + for i, item := range tt.check { + got := list.Contains(item) + if got != tt.expectAnswers[i] { + t.Errorf("Contains(%q) got %v, want %v", item, got, tt.expectAnswers[i]) + } + } + }) + } +} diff --git a/bdns/dns.go b/bdns/dns.go index 65c6b6cb7e5..ea91a5c4349 100644 --- a/bdns/dns.go +++ b/bdns/dns.go @@ -2,602 +2,387 @@ package bdns import ( "context" - "encoding/base64" + "crypto/tls" "errors" "fmt" + "io" "net" + "net/http" "strconv" "strings" - "sync" "time" "github.com/jmhodges/clock" "github.com/miekg/dns" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" blog "github.com/letsencrypt/boulder/log" "github.com/letsencrypt/boulder/metrics" ) -func parseCidr(network string, comment string) net.IPNet { - _, net, err := net.ParseCIDR(network) - if err != nil { - panic(fmt.Sprintf("error parsing %s (%s): %s", network, comment, err)) - } - return *net +// Result is a wrapper around miekg/dns.Msg, but with all Resource Records from +// the Answer section which match the parameterized record type already pulled +// out for convenient access. +type Result[R dns.RR] struct { + *dns.Msg + CNames []*dns.CNAME + Final []R } -var ( - // Private CIDRs to ignore - privateNetworks = []net.IPNet{ - // RFC1918 - // 10.0.0.0/8 - { - IP: []byte{10, 0, 0, 0}, - Mask: []byte{255, 0, 0, 0}, - }, - // 172.16.0.0/12 - { - IP: []byte{172, 16, 0, 0}, - Mask: []byte{255, 240, 0, 0}, - }, - // 192.168.0.0/16 - { - IP: []byte{192, 168, 0, 0}, - Mask: []byte{255, 255, 0, 0}, - }, - // RFC5735 - // 127.0.0.0/8 - { - IP: []byte{127, 0, 0, 0}, - Mask: []byte{255, 0, 0, 0}, - }, - // RFC1122 Section 3.2.1.3 - // 0.0.0.0/8 - { - IP: []byte{0, 0, 0, 0}, - Mask: []byte{255, 0, 0, 0}, - }, - // RFC3927 - // 169.254.0.0/16 - { - IP: []byte{169, 254, 0, 0}, - Mask: []byte{255, 255, 0, 0}, - }, - // RFC 5736 - // 192.0.0.0/24 - { - IP: []byte{192, 0, 0, 0}, - Mask: []byte{255, 255, 255, 0}, - }, - // RFC 5737 - // 192.0.2.0/24 - { - IP: []byte{192, 0, 2, 0}, - Mask: []byte{255, 255, 255, 0}, - }, - // 198.51.100.0/24 - { - IP: []byte{198, 51, 100, 0}, - Mask: []byte{255, 255, 255, 0}, - }, - // 203.0.113.0/24 - { - IP: []byte{203, 0, 113, 0}, - Mask: []byte{255, 255, 255, 0}, - }, - // RFC 3068 - // 192.88.99.0/24 - { - IP: []byte{192, 88, 99, 0}, - Mask: []byte{255, 255, 255, 0}, - }, - // RFC 2544, Errata 423 - // 198.18.0.0/15 - { - IP: []byte{198, 18, 0, 0}, - Mask: []byte{255, 254, 0, 0}, - }, - // RFC 3171 - // 224.0.0.0/4 - { - IP: []byte{224, 0, 0, 0}, - Mask: []byte{240, 0, 0, 0}, - }, - // RFC 1112 - // 240.0.0.0/4 - { - IP: []byte{240, 0, 0, 0}, - Mask: []byte{240, 0, 0, 0}, - }, - // RFC 919 Section 7 - // 255.255.255.255/32 - { - IP: []byte{255, 255, 255, 255}, - Mask: []byte{255, 255, 255, 255}, - }, - // RFC 6598 - // 100.64.0.0./10 - { - IP: []byte{100, 64, 0, 0}, - Mask: []byte{255, 192, 0, 0}, - }, +// resultFromMsg returns a Result whose CNames and Final fields are populated +// from the underlying Msg's Answer field. +func resultFromMsg[R dns.RR](m *dns.Msg) *Result[R] { + var cnames []*dns.CNAME + var final []R + for _, rr := range m.Answer { + if a, ok := rr.(R); ok { + final = append(final, a) + } else if a, ok := rr.(*dns.CNAME); ok { + cnames = append(cnames, a) + } } - // Sourced from https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml - // where Global, Source, or Destination is False - privateV6Networks = []net.IPNet{ - parseCidr("::/128", "RFC 4291: Unspecified Address"), - parseCidr("::1/128", "RFC 4291: Loopback Address"), - parseCidr("::ffff:0:0/96", "RFC 4291: IPv4-mapped Address"), - parseCidr("100::/64", "RFC 6666: Discard Address Block"), - parseCidr("2001::/23", "RFC 2928: IETF Protocol Assignments"), - parseCidr("2001:2::/48", "RFC 5180: Benchmarking"), - parseCidr("2001:db8::/32", "RFC 3849: Documentation"), - parseCidr("2001::/32", "RFC 4380: TEREDO"), - parseCidr("fc00::/7", "RFC 4193: Unique-Local"), - parseCidr("fe80::/10", "RFC 4291: Section 2.5.6 Link-Scoped Unicast"), - parseCidr("ff00::/8", "RFC 4291: Section 2.7"), - // We disable validations to IPs under the 6to4 anycase prefix because - // there's too much risk of a malicious actor advertising the prefix and - // answering validations for a 6to4 host they do not control. - // https://community.letsencrypt.org/t/problems-validating-ipv6-against-host-running-6to4/18312/9 - parseCidr("2002::/16", "RFC 7526: 6to4 anycast prefix deprecated"), + + return &Result[R]{ + Msg: m, + CNames: cnames, + Final: final, } -) +} -// Client queries for DNS records +// Client can make A, AAAA, CAA, and TXT queries. The second return value of +// each method is the address of the resolver used to conduct the query, and +// should be populated even when returning an error. type Client interface { - LookupTXT(context.Context, string) (txts []string, err error) - LookupHost(context.Context, string) ([]net.IP, error) - LookupCAA(context.Context, string) ([]*dns.CAA, string, error) + LookupA(context.Context, string) (*Result[*dns.A], string, error) + LookupAAAA(context.Context, string) (*Result[*dns.AAAA], string, error) + LookupCAA(context.Context, string) (*Result[*dns.CAA], string, error) + LookupTXT(context.Context, string) (*Result[*dns.TXT], string, error) } -// impl represents a client that talks to an external resolver +// impl implements the Client interface via an underlying DNS exchanger. It +// rotates queries across multiple resolvers and tracks a variety of metrics. type impl struct { - dnsClient exchanger - servers ServerProvider - allowRestrictedAddresses bool - maxTries int - clk clock.Clock - log blog.Logger - - queryTime *prometheus.HistogramVec - totalLookupTime *prometheus.HistogramVec - timeoutCounter *prometheus.CounterVec - idMismatchCounter *prometheus.CounterVec + exchanger exchanger + servers ServerProvider + maxTries int + clk clock.Clock + log blog.Logger + + queryTime *prometheus.HistogramVec + totalLookupTime *prometheus.HistogramVec + timeoutCounter *prometheus.CounterVec } var _ Client = &impl{} -type exchanger interface { - Exchange(m *dns.Msg, a string) (*dns.Msg, time.Duration, error) -} - -// New constructs a new DNS resolver object that utilizes the -// provided list of DNS servers for resolution. +// New constructs a new DNS resolver object that utilizes the provided list of +// DNS servers for resolution, and the provided tlsConfig to speak DoH to those +// servers. func New( readTimeout time.Duration, servers ServerProvider, stats prometheus.Registerer, clk clock.Clock, maxTries int, + userAgent string, log blog.Logger, + tlsConfig *tls.Config, ) Client { - dnsClient := new(dns.Client) - - // Set timeout for underlying net.Conn - dnsClient.ReadTimeout = readTimeout - dnsClient.Net = "udp" + // Clone the default transport because it comes with various settings that we + // like, which are different from the zero value of an `http.Transport`. Then + // set it to force HTTP/2, because Unbound will reject non-HTTP/2 DoH + // requests. + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConfig + transport.ForceAttemptHTTP2 = true + + exchanger := &dohExchanger{ + clk: clk, + hc: http.Client{ + Timeout: readTimeout, + Transport: transport, + }, + userAgent: userAgent, + } - queryTime := prometheus.NewHistogramVec( + queryTime := promauto.With(stats).NewHistogramVec( prometheus.HistogramOpts{ Name: "dns_query_time", Help: "Time taken to perform a DNS query", Buckets: metrics.InternetFacingBuckets, }, - []string{"qtype", "result", "authenticated_data", "resolver"}, + []string{"qtype", "result", "resolver"}, ) - totalLookupTime := prometheus.NewHistogramVec( + totalLookupTime := promauto.With(stats).NewHistogramVec( prometheus.HistogramOpts{ Name: "dns_total_lookup_time", Help: "Time taken to perform a DNS lookup, including all retried queries", Buckets: metrics.InternetFacingBuckets, }, - []string{"qtype", "result", "authenticated_data", "retries", "resolver"}, + []string{"qtype", "result", "resolver", "attempts"}, ) - timeoutCounter := prometheus.NewCounterVec( + timeoutCounter := promauto.With(stats).NewCounterVec( prometheus.CounterOpts{ Name: "dns_timeout", Help: "Counter of various types of DNS query timeouts", }, - []string{"qtype", "type", "resolver", "isTLD"}, + []string{"qtype", "result", "resolver", "isTLD"}, ) - idMismatchCounter := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "dns_id_mismatch", - Help: "Counter of DNS ErrId errors sliced by query type and resolver", - }, - []string{"qtype", "resolver"}, - ) - stats.MustRegister(queryTime, totalLookupTime, timeoutCounter, idMismatchCounter) - return &impl{ - dnsClient: dnsClient, - servers: servers, - allowRestrictedAddresses: false, - maxTries: maxTries, - clk: clk, - queryTime: queryTime, - totalLookupTime: totalLookupTime, - timeoutCounter: timeoutCounter, - idMismatchCounter: idMismatchCounter, - log: log, + if maxTries < 1 { + // Allowing negative or zero total attempts makes no sense, so default to 1. + maxTries = 1 } -} -// NewTest constructs a new DNS resolver object that utilizes the -// provided list of DNS servers for resolution and will allow loopback addresses. -// This constructor should *only* be called from tests (unit or integration). -func NewTest( - readTimeout time.Duration, - servers ServerProvider, - stats prometheus.Registerer, - clk clock.Clock, - maxTries int, - log blog.Logger) Client { - resolver := New(readTimeout, servers, stats, clk, maxTries, log) - resolver.(*impl).allowRestrictedAddresses = true - return resolver + return &impl{ + exchanger: exchanger, + servers: servers, + maxTries: maxTries, + clk: clk, + queryTime: queryTime, + totalLookupTime: totalLookupTime, + timeoutCounter: timeoutCounter, + log: log, + } } -// exchangeOne performs a single DNS exchange with a randomly chosen server -// out of the server list, returning the response, time, and error (if any). -// We assume that the upstream resolver requests and validates DNSSEC records -// itself. -func (dnsClient *impl) exchangeOne(ctx context.Context, hostname string, qtype uint16) (resp *dns.Msg, err error) { - m := new(dns.Msg) +// exchangeOne performs a single DNS exchange with a randomly chosen server out +// of the server list, returning the response, resolver used, and error (if +// any). If a response received indicates that the resolver encountered an error +// (such as an expired DNSSEC signature), that is converted into an error and +// returned. +func (c *impl) exchangeOne(ctx context.Context, hostname string, qtype uint16) (*dns.Msg, string, error) { + req := new(dns.Msg) // Set question type - m.SetQuestion(dns.Fqdn(hostname), qtype) + req.SetQuestion(dns.Fqdn(hostname), qtype) // Set the AD bit in the query header so that the resolver knows that // we are interested in this bit in the response header. If this isn't // set the AD bit in the response is useless (RFC 6840 Section 5.7). // This has no security implications, it simply allows us to gather // metrics about the percentage of responses that are secured with // DNSSEC. - m.AuthenticatedData = true + req.AuthenticatedData = true // Tell the resolver that we're willing to receive responses up to 4096 bytes. // This happens sometimes when there are a very large number of CAA records // present. - m.SetEdns0(4096, false) + req.SetEdns0(4096, false) - servers, err := dnsClient.servers.Addrs() + servers, err := c.servers.Addrs() if err != nil { - return nil, fmt.Errorf("failed to list DNS servers: %w", err) + return nil, "", fmt.Errorf("failed to list DNS servers: %w", err) } - chosenServerIndex := 0 - chosenServer := servers[chosenServerIndex] - start := dnsClient.clk.Now() - client := dnsClient.dnsClient + // Prepare to increment a latency metric no matter whether we succeed or fail. + // The deferred function closes over resp, chosenServerIP, and tries, which + // are all modified in the loop below. + start := c.clk.Now() qtypeStr := dns.TypeToString[qtype] - tries := 1 + var ( + resp *dns.Msg + chosenServerIP string + tries int + ) defer func() { - result, authenticated := "failed", "" + result := "failed" if resp != nil { result = dns.RcodeToString[resp.Rcode] - authenticated = fmt.Sprintf("%t", resp.AuthenticatedData) } - dnsClient.totalLookupTime.With(prometheus.Labels{ - "qtype": qtypeStr, - "result": result, - "authenticated_data": authenticated, - "retries": strconv.Itoa(tries), - "resolver": chosenServer, - }).Observe(dnsClient.clk.Since(start).Seconds()) + c.totalLookupTime.With(prometheus.Labels{ + "qtype": qtypeStr, + "result": result, + "resolver": chosenServerIP, + "attempts": strconv.Itoa(tries), + }).Observe(c.clk.Since(start).Seconds()) }() - for { - ch := make(chan dnsResp, 1) - - go func() { - rsp, rtt, err := client.Exchange(m, chosenServer) - result, authenticated := "failed", "" - if rsp != nil { - result = dns.RcodeToString[rsp.Rcode] - authenticated = fmt.Sprintf("%t", rsp.AuthenticatedData) - } - if err != nil { - logDNSError(dnsClient.log, chosenServer, hostname, m, rsp, err) - if err == dns.ErrId { - dnsClient.idMismatchCounter.With(prometheus.Labels{ - "qtype": qtypeStr, - "resolver": chosenServer, - }).Inc() - } - } - dnsClient.queryTime.With(prometheus.Labels{ - "qtype": qtypeStr, - "result": result, - "authenticated_data": authenticated, - "resolver": chosenServer, - }).Observe(rtt.Seconds()) - ch <- dnsResp{m: rsp, err: err} - }() - select { - case <-ctx.Done(): - if ctx.Err() == context.DeadlineExceeded { - dnsClient.timeoutCounter.With(prometheus.Labels{ + + for i := range c.maxTries { + tries = i + 1 + chosenServer := servers[i%len(servers)] + + // Strip off the IP address part of the server address because + // we talk to the same server on multiple ports, and don't want + // to blow up the cardinality. + // Note: validateServerAddress() has already checked net.SplitHostPort() + // and ensures that chosenServer can't be a bare port, e.g. ":1337" + chosenServerIP, _, err = net.SplitHostPort(chosenServer) + if err != nil { + return nil, chosenServer, err + } + + // Do a bare assignment (not :=) to populate the `resp` used by the defer above. + var rtt time.Duration + resp, rtt, err = c.exchanger.ExchangeContext(ctx, req, chosenServer) + + // Do some metrics handling before we do error handling. + result := "failed" + if resp != nil { + result = dns.RcodeToString[resp.Rcode] + } + c.queryTime.With(prometheus.Labels{ + "qtype": qtypeStr, + "result": result, + "resolver": chosenServerIP, + }).Observe(rtt.Seconds()) + + if err != nil { + c.log.Infof("logDNSError chosenServer=[%s] hostname=[%s] queryType=[%s] err=[%s]", chosenServer, hostname, qtypeStr, err) + + // Check if the error is a network timeout, rather than a local context + // timeout. If it is, retry instead of giving up. + var netErr net.Error + isRetryable := ctx.Err() == nil && errors.As(err, &netErr) && netErr.Timeout() + hasRetriesLeft := tries < c.maxTries + if isRetryable && hasRetriesLeft { + continue + } else if isRetryable && !hasRetriesLeft { + c.timeoutCounter.With(prometheus.Labels{ "qtype": qtypeStr, - "type": "deadline exceeded", - "resolver": chosenServer, - "isTLD": isTLD(hostname), + "result": "out of retries", + "resolver": chosenServerIP, + "isTLD": fmt.Sprintf("%t", !strings.Contains(hostname, ".")), }).Inc() - } else if ctx.Err() == context.Canceled { - dnsClient.timeoutCounter.With(prometheus.Labels{ + } else if errors.Is(err, context.DeadlineExceeded) { + c.timeoutCounter.With(prometheus.Labels{ "qtype": qtypeStr, - "type": "canceled", - "resolver": chosenServer, - "isTLD": isTLD(hostname), + "result": "deadline exceeded", + "resolver": chosenServerIP, + "isTLD": fmt.Sprintf("%t", !strings.Contains(hostname, ".")), }).Inc() - } else { - dnsClient.timeoutCounter.With(prometheus.Labels{ + } else if errors.Is(err, context.Canceled) { + c.timeoutCounter.With(prometheus.Labels{ "qtype": qtypeStr, - "type": "unknown", - "resolver": chosenServer, + "result": "canceled", + "resolver": chosenServerIP, + "isTLD": fmt.Sprintf("%t", !strings.Contains(hostname, ".")), }).Inc() } - err = ctx.Err() - return - case r := <-ch: - if r.err != nil { - var operr *net.OpError - ok := errors.As(r.err, &operr) - isRetryable := ok && operr.Temporary() - hasRetriesLeft := tries < dnsClient.maxTries - if isRetryable && hasRetriesLeft { - tries++ - // Chose a new server to retry the query with by incrementing the - // chosen server index modulo the number of servers. This ensures that - // if one dns server isn't available we retry with the next in the - // list. - chosenServerIndex = (chosenServerIndex + 1) % len(servers) - chosenServer = servers[chosenServerIndex] - continue - } else if isRetryable && !hasRetriesLeft { - dnsClient.timeoutCounter.With(prometheus.Labels{ - "qtype": qtypeStr, - "type": "out of retries", - "resolver": chosenServer, - "isTLD": isTLD(hostname), - }).Inc() - } - } - resp, err = r.m, r.err - return + + return nil, chosenServer, err } + + return resp, chosenServer, nil } + // It's impossible to get past the bottom of the loop: on the last attempt + // (when tries == c.maxTries), all paths lead to a return from inside the loop. + return nil, "", errors.New("unexpected loop escape in exchangeOne") } -// isTLD returns a simplified view of whether something is a TLD: does it have -// any dots in it? This returns true or false as a string, and is meant solely -// for Prometheus metrics. -func isTLD(hostname string) string { - if strings.Contains(hostname, ".") { - return "false" - } else { - return "true" +// LookupA sends a DNS query to find all A records associated with the provided +// hostname. +func (c *impl) LookupA(ctx context.Context, hostname string) (*Result[*dns.A], string, error) { + resp, resolver, err := c.exchangeOne(ctx, hostname, dns.TypeA) + err = wrapErr(dns.TypeA, hostname, resp, err) + if err != nil { + return nil, resolver, err } -} -type dnsResp struct { - m *dns.Msg - err error + return resultFromMsg[*dns.A](resp), resolver, nil } -// LookupTXT sends a DNS query to find all TXT records associated with -// the provided hostname which it returns along with the returned -// DNS authority section. -func (dnsClient *impl) LookupTXT(ctx context.Context, hostname string) ([]string, error) { - var txt []string - dnsType := dns.TypeTXT - r, err := dnsClient.exchangeOne(ctx, hostname, dnsType) +// LookupAAAA sends a DNS query to find all AAAA records associated with the +// provided hostname. +func (c *impl) LookupAAAA(ctx context.Context, hostname string) (*Result[*dns.AAAA], string, error) { + resp, resolver, err := c.exchangeOne(ctx, hostname, dns.TypeAAAA) + err = wrapErr(dns.TypeAAAA, hostname, resp, err) if err != nil { - return nil, &Error{dnsType, hostname, err, -1} + return nil, resolver, err } - if r.Rcode != dns.RcodeSuccess { - return nil, &Error{dnsType, hostname, nil, r.Rcode} + + return resultFromMsg[*dns.AAAA](resp), resolver, nil +} + +// LookupCAA sends a DNS query to find all CAA records associated with the +// provided hostname. +func (c *impl) LookupCAA(ctx context.Context, hostname string) (*Result[*dns.CAA], string, error) { + resp, resolver, err := c.exchangeOne(ctx, hostname, dns.TypeCAA) + + // Special case: when checking CAA for non-TLD names, treat NXDOMAIN as a + // successful response containing an empty set of records. This can come up in + // situations where records were provisioned for validation (e.g. TXT records + // for DNS-01 challenge) and then removed after validation but before CAA + // rechecking. But allow NXDOMAIN for TLDs to fall through to the error code + // below, so we don't issue for gTLDs that have been removed by ICANN. + if err == nil && resp.Rcode == dns.RcodeNameError && strings.Contains(hostname, ".") { + return resultFromMsg[*dns.CAA](resp), resolver, nil } - for _, answer := range r.Answer { - if answer.Header().Rrtype == dnsType { - if txtRec, ok := answer.(*dns.TXT); ok { - txt = append(txt, strings.Join(txtRec.Txt, "")) - } - } + err = wrapErr(dns.TypeCAA, hostname, resp, err) + if err != nil { + return nil, resolver, err } - return txt, err + return resultFromMsg[*dns.CAA](resp), resolver, nil } -func isPrivateV4(ip net.IP) bool { - for _, net := range privateNetworks { - if net.Contains(ip) { - return true - } +// LookupTXT sends a DNS query to find all TXT records associated with the +// provided hostname. +func (c *impl) LookupTXT(ctx context.Context, hostname string) (*Result[*dns.TXT], string, error) { + resp, resolver, err := c.exchangeOne(ctx, hostname, dns.TypeTXT) + err = wrapErr(dns.TypeTXT, hostname, resp, err) + if err != nil { + return nil, resolver, err } - return false + + return resultFromMsg[*dns.TXT](resp), resolver, nil } -func isPrivateV6(ip net.IP) bool { - for _, net := range privateV6Networks { - if net.Contains(ip) { - return true - } - } - return false +// exchanger represents an underlying DNS client. This interface exists solely +// so that its implementation can be swapped out in unit tests. +type exchanger interface { + ExchangeContext(ctx context.Context, m *dns.Msg, a string) (*dns.Msg, time.Duration, error) } -func (dnsClient *impl) lookupIP(ctx context.Context, hostname string, ipType uint16) ([]dns.RR, error) { - resp, err := dnsClient.exchangeOne(ctx, hostname, ipType) - if err != nil { - return nil, &Error{ipType, hostname, err, -1} - } - if resp.Rcode != dns.RcodeSuccess { - return nil, &Error{ipType, hostname, nil, resp.Rcode} - } - return resp.Answer, nil +// dohExchanger implements the exchanger interface. It routes all of its DNS +// queries over DoH, wrapping the request with the appropriate headers and +// unwrapping the response. +type dohExchanger struct { + clk clock.Clock + hc http.Client + userAgent string } -// LookupHost sends a DNS query to find all A and AAAA records associated with -// the provided hostname. This method assumes that the external resolver will -// chase CNAME/DNAME aliases and return relevant records. It will retry -// requests in the case of temporary network errors. It returns an error if -// both the A and AAAA lookups fail or are empty, but succeeds otherwise. -func (dnsClient *impl) LookupHost(ctx context.Context, hostname string) ([]net.IP, error) { - var recordsA, recordsAAAA []dns.RR - var errA, errAAAA error - var wg sync.WaitGroup - - wg.Add(1) - go func() { - defer wg.Done() - recordsA, errA = dnsClient.lookupIP(ctx, hostname, dns.TypeA) - }() - wg.Add(1) - go func() { - defer wg.Done() - recordsAAAA, errAAAA = dnsClient.lookupIP(ctx, hostname, dns.TypeAAAA) - }() - wg.Wait() - - var addrsA []net.IP - if errA == nil { - for _, answer := range recordsA { - if answer.Header().Rrtype == dns.TypeA { - a, ok := answer.(*dns.A) - if ok && a.A.To4() != nil && (!isPrivateV4(a.A) || dnsClient.allowRestrictedAddresses) { - addrsA = append(addrsA, a.A) - } - } - } - if len(addrsA) == 0 { - errA = fmt.Errorf("no valid A records found for %s", hostname) - } +// ExchangeContext sends a DoH query to the provided DoH server and returns the response. +func (d *dohExchanger) ExchangeContext(ctx context.Context, query *dns.Msg, server string) (*dns.Msg, time.Duration, error) { + q, err := query.Pack() + if err != nil { + return nil, 0, err } - var addrsAAAA []net.IP - if errAAAA == nil { - for _, answer := range recordsAAAA { - if answer.Header().Rrtype == dns.TypeAAAA { - aaaa, ok := answer.(*dns.AAAA) - if ok && aaaa.AAAA.To16() != nil && (!isPrivateV6(aaaa.AAAA) || dnsClient.allowRestrictedAddresses) { - addrsAAAA = append(addrsAAAA, aaaa.AAAA) - } - } - } - if len(addrsAAAA) == 0 { - errAAAA = fmt.Errorf("no valid AAAA records found for %s", hostname) - } + // The default Unbound URL template + url := fmt.Sprintf("https://%s/dns-query", server) + req, err := http.NewRequestWithContext(ctx, "POST", url, strings.NewReader(string(q))) + if err != nil { + return nil, 0, err } - - if errA != nil && errAAAA != nil { - // Construct a new error from both underlying errors. We can only use %w for - // one of them, because the go error unwrapping protocol doesn't support - // branching. We don't use ProblemDetails and SubProblemDetails here, because - // this error will get wrapped in a DNSError and further munged by higher - // layers in the stack. - return nil, fmt.Errorf("%w; %s", errA, errAAAA) + req.Header.Set("Content-Type", "application/dns-message") + req.Header.Set("Accept", "application/dns-message") + if len(d.userAgent) > 0 { + req.Header.Set("User-Agent", d.userAgent) } - return append(addrsA, addrsAAAA...), nil -} - -// LookupCAA sends a DNS query to find all CAA records associated with -// the provided hostname and the complete dig-style RR `response`. This -// response is quite verbose, however it's only populated when the CAA -// response is non-empty. -func (dnsClient *impl) LookupCAA(ctx context.Context, hostname string) ([]*dns.CAA, string, error) { - dnsType := dns.TypeCAA - r, err := dnsClient.exchangeOne(ctx, hostname, dnsType) + start := d.clk.Now() + resp, err := d.hc.Do(req) if err != nil { - return nil, "", &Error{dnsType, hostname, err, -1} + return nil, d.clk.Since(start), err } + defer resp.Body.Close() - if r.Rcode == dns.RcodeServerFailure { - return nil, "", &Error{dnsType, hostname, nil, r.Rcode} + if resp.StatusCode != http.StatusOK { + return nil, d.clk.Since(start), fmt.Errorf("doh: http status %d", resp.StatusCode) } - var CAAs []*dns.CAA - for _, answer := range r.Answer { - if caaR, ok := answer.(*dns.CAA); ok { - CAAs = append(CAAs, caaR) - } - } - var response string - if len(CAAs) > 0 { - response = r.String() + b, err := io.ReadAll(resp.Body) + if err != nil { + return nil, d.clk.Since(start), fmt.Errorf("doh: reading response body: %w", err) } - return CAAs, response, nil -} -// logDNSError logs the provided err result from making a query for hostname to -// the chosenServer. If the err is a `dns.ErrId` instance then the Base64 -// encoded bytes of the query (and if not-nil, the response) in wire format -// is logged as well. This function is called from exchangeOne only for the case -// where an error occurs querying a hostname that indicates a problem between -// the VA and the chosenServer. -func logDNSError( - logger blog.Logger, - chosenServer string, - hostname string, - msg, resp *dns.Msg, - underlying error) { - // We don't expect logDNSError to be called with a nil msg or err but - // if it happens return early. We allow resp to be nil. - if msg == nil || len(msg.Question) == 0 || underlying == nil { - return + response := new(dns.Msg) + err = response.Unpack(b) + if err != nil { + return nil, d.clk.Since(start), fmt.Errorf("doh: unpacking response: %w", err) } - queryType := dns.TypeToString[msg.Question[0].Qtype] - - // If the error indicates there was a query/response ID mismatch then we want - // to log more detail. - if underlying == dns.ErrId { - packedMsgBytes, err := msg.Pack() - if err != nil { - logger.Errf("logDNSError failed to pack msg: %v", err) - return - } - encodedMsg := base64.StdEncoding.EncodeToString(packedMsgBytes) - var encodedResp string - var respQname string - if resp != nil { - packedRespBytes, err := resp.Pack() - if err != nil { - logger.Errf("logDNSError failed to pack resp: %v", err) - return - } - encodedResp = base64.StdEncoding.EncodeToString(packedRespBytes) - if len(resp.Answer) > 0 && resp.Answer[0].Header() != nil { - respQname = resp.Answer[0].Header().Name - } - } - - logger.Infof( - "logDNSError ID mismatch chosenServer=[%s] hostname=[%s] respHostname=[%s] queryType=[%s] err=[%s] msg=[%s] resp=[%s]", - chosenServer, - hostname, - respQname, - queryType, - underlying, - encodedMsg, - encodedResp) - } else { - // Otherwise log a general DNS error - logger.Infof("logDNSError chosenServer=[%s] hostname=[%s] queryType=[%s] err=[%s]", - chosenServer, - hostname, - queryType, - underlying) - } + return response, d.clk.Since(start), nil } diff --git a/bdns/dns_test.go b/bdns/dns_test.go index 74701631758..be964aea14f 100644 --- a/bdns/dns_test.go +++ b/bdns/dns_test.go @@ -2,10 +2,15 @@ package bdns import ( "context" + "crypto/tls" + "crypto/x509" "errors" "fmt" + "io" "log" "net" + "net/http" + "net/url" "os" "regexp" "strings" @@ -14,16 +19,40 @@ import ( "time" "github.com/jmhodges/clock" + "github.com/miekg/dns" + "github.com/prometheus/client_golang/prometheus" + blog "github.com/letsencrypt/boulder/log" "github.com/letsencrypt/boulder/metrics" "github.com/letsencrypt/boulder/test" - "github.com/miekg/dns" - "github.com/prometheus/client_golang/prometheus" ) const dnsLoopbackAddr = "127.0.0.1:4053" -func mockDNSQuery(w dns.ResponseWriter, r *dns.Msg) { +func mockDNSQuery(w http.ResponseWriter, httpReq *http.Request) { + if httpReq.Header.Get("Content-Type") != "application/dns-message" { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "client didn't send Content-Type: application/dns-message") + } + if httpReq.Header.Get("Accept") != "application/dns-message" { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "client didn't accept Content-Type: application/dns-message") + } + + requestBody, err := io.ReadAll(httpReq.Body) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "reading body: %s", err) + } + httpReq.Body.Close() + + r := new(dns.Msg) + err = r.Unpack(requestBody) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "unpacking request: %s", err) + } + m := new(dns.Msg) m.SetReply(r) m.Compress = false @@ -53,19 +82,19 @@ func mockDNSQuery(w dns.ResponseWriter, r *dns.Msg) { if q.Name == "v6.letsencrypt.org." { record := new(dns.AAAA) record.Hdr = dns.RR_Header{Name: "v6.letsencrypt.org.", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0} - record.AAAA = net.ParseIP("::1") + record.AAAA = net.ParseIP("2602:80a:6000:abad:cafe::1") appendAnswer(record) } if q.Name == "dualstack.letsencrypt.org." { record := new(dns.AAAA) record.Hdr = dns.RR_Header{Name: "dualstack.letsencrypt.org.", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0} - record.AAAA = net.ParseIP("::1") + record.AAAA = net.ParseIP("2602:80a:6000:abad:cafe::1") appendAnswer(record) } if q.Name == "v4error.letsencrypt.org." { record := new(dns.AAAA) record.Hdr = dns.RR_Header{Name: "v4error.letsencrypt.org.", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0} - record.AAAA = net.ParseIP("::1") + record.AAAA = net.ParseIP("2602:80a:6000:abad:cafe::1") appendAnswer(record) } if q.Name == "v6error.letsencrypt.org." { @@ -81,19 +110,19 @@ func mockDNSQuery(w dns.ResponseWriter, r *dns.Msg) { if q.Name == "cps.letsencrypt.org." { record := new(dns.A) record.Hdr = dns.RR_Header{Name: "cps.letsencrypt.org.", Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0} - record.A = net.ParseIP("127.0.0.1") + record.A = net.ParseIP("64.112.117.1") appendAnswer(record) } if q.Name == "dualstack.letsencrypt.org." { record := new(dns.A) record.Hdr = dns.RR_Header{Name: "dualstack.letsencrypt.org.", Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0} - record.A = net.ParseIP("127.0.0.1") + record.A = net.ParseIP("64.112.117.1") appendAnswer(record) } if q.Name == "v6error.letsencrypt.org." { record := new(dns.A) record.Hdr = dns.RR_Header{Name: "dualstack.letsencrypt.org.", Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0} - record.A = net.ParseIP("127.0.0.1") + record.A = net.ParseIP("64.112.117.1") appendAnswer(record) } if q.Name == "v4error.letsencrypt.org." { @@ -142,6 +171,9 @@ func mockDNSQuery(w dns.ResponseWriter, r *dns.Msg) { record.Flag = 1 appendAnswer(record) } + if q.Name == "gonetld." { + m.SetRcode(r, dns.RcodeNameError) + } case dns.TypeTXT: if q.Name == "split-txt.letsencrypt.org." { record := new(dns.TXT) @@ -166,45 +198,37 @@ func mockDNSQuery(w dns.ResponseWriter, r *dns.Msg) { } } - err := w.WriteMsg(m) + body, err := m.Pack() + if err != nil { + fmt.Fprintf(os.Stderr, "packing reply: %s\n", err) + } + w.Header().Set("Content-Type", "application/dns-message") + _, err = w.Write(body) if err != nil { panic(err) // running tests, so panic is OK } } func serveLoopResolver(stopChan chan bool) { - dns.HandleFunc(".", mockDNSQuery) - tcpServer := &dns.Server{ + m := http.NewServeMux() + m.HandleFunc("/dns-query", mockDNSQuery) + httpServer := &http.Server{ Addr: dnsLoopbackAddr, - Net: "tcp", + Handler: m, ReadTimeout: time.Second, WriteTimeout: time.Second, } - udpServer := &dns.Server{ - Addr: dnsLoopbackAddr, - Net: "udp", - ReadTimeout: time.Second, - WriteTimeout: time.Second, - } - go func() { - err := tcpServer.ListenAndServe() - if err != nil { - fmt.Println(err) - } - }() go func() { - err := udpServer.ListenAndServe() + cert := "../test/certs/ipki/localhost/cert.pem" + key := "../test/certs/ipki/localhost/key.pem" + err := httpServer.ListenAndServeTLS(cert, key) if err != nil { fmt.Println(err) } }() go func() { <-stopChan - err := tcpServer.Shutdown() - if err != nil { - log.Fatal(err) - } - err = udpServer.Shutdown() + err := httpServer.Shutdown(context.Background()) if err != nil { log.Fatal(err) } @@ -212,8 +236,8 @@ func serveLoopResolver(stopChan chan bool) { } func pollServer() { - backoff := time.Duration(200 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) + backoff := 200 * time.Millisecond + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() ticker := time.NewTicker(backoff) @@ -232,7 +256,21 @@ func pollServer() { } } +// tlsConfig is used for the TLS config of client instances that talk to the +// DoH server set up in TestMain. +var tlsConfig *tls.Config + func TestMain(m *testing.M) { + root, err := os.ReadFile("../test/certs/ipki/minica.pem") + if err != nil { + log.Fatal(err) + } + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(root) + tlsConfig = &tls.Config{ + RootCAs: pool, + } + stop := make(chan bool, 1) serveLoopResolver(stop) pollServer() @@ -245,15 +283,22 @@ func TestDNSNoServers(t *testing.T) { staticProvider, err := NewStaticProvider([]string{}) test.AssertNotError(t, err, "Got error creating StaticProvider") - obj := NewTest(time.Hour, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock()) + obj := New(time.Hour, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig) - _, err = obj.LookupHost(context.Background(), "letsencrypt.org") + _, resolver, err := obj.LookupA(context.Background(), "letsencrypt.org") + test.AssertEquals(t, resolver, "") test.AssertError(t, err, "No servers") - _, err = obj.LookupTXT(context.Background(), "letsencrypt.org") + _, resolver, err = obj.LookupAAAA(context.Background(), "letsencrypt.org") + test.AssertEquals(t, resolver, "") test.AssertError(t, err, "No servers") - _, _, err = obj.LookupCAA(context.Background(), "letsencrypt.org") + _, resolver, err = obj.LookupTXT(context.Background(), "letsencrypt.org") + test.AssertEquals(t, resolver, "") + test.AssertError(t, err, "No servers") + + _, resolver, err = obj.LookupCAA(context.Background(), "letsencrypt.org") + test.AssertEquals(t, resolver, "") test.AssertError(t, err, "No servers") } @@ -261,140 +306,244 @@ func TestDNSOneServer(t *testing.T) { staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr}) test.AssertNotError(t, err, "Got error creating StaticProvider") - obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock()) - - _, err = obj.LookupHost(context.Background(), "cps.letsencrypt.org") + obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig) + _, resolver, err := obj.LookupA(context.Background(), "letsencrypt.org") test.AssertNotError(t, err, "No message") + test.AssertEquals(t, resolver, "127.0.0.1:4053") } func TestDNSDuplicateServers(t *testing.T) { staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr, dnsLoopbackAddr}) test.AssertNotError(t, err, "Got error creating StaticProvider") - obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock()) - - _, err = obj.LookupHost(context.Background(), "cps.letsencrypt.org") + obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig) + _, resolver, err := obj.LookupA(context.Background(), "letsencrypt.org") test.AssertNotError(t, err, "No message") + test.AssertEquals(t, resolver, "127.0.0.1:4053") } func TestDNSServFail(t *testing.T) { staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr}) test.AssertNotError(t, err, "Got error creating StaticProvider") - obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock()) + obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig) bad := "servfail.com" - _, err = obj.LookupTXT(context.Background(), bad) + _, _, err = obj.LookupTXT(context.Background(), "servfail.com") test.AssertError(t, err, "LookupTXT didn't return an error") - _, err = obj.LookupHost(context.Background(), bad) - test.AssertError(t, err, "LookupHost didn't return an error") + _, _, err = obj.LookupA(context.Background(), bad) + test.AssertError(t, err, "LookupA didn't return an error") - emptyCaa, _, err := obj.LookupCAA(context.Background(), bad) - test.Assert(t, len(emptyCaa) == 0, "Query returned non-empty list of CAA records") - test.AssertError(t, err, "LookupCAA should have returned an error") + _, _, err = obj.LookupAAAA(context.Background(), bad) + test.AssertError(t, err, "LookupAAAA didn't return an error") + + _, _, err = obj.LookupCAA(context.Background(), bad) + test.AssertError(t, err, "LookupCAA didn't return an error") } func TestDNSLookupTXT(t *testing.T) { staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr}) test.AssertNotError(t, err, "Got error creating StaticProvider") - obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock()) + obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig) - a, err := obj.LookupTXT(context.Background(), "letsencrypt.org") - t.Logf("A: %v", a) + _, _, err = obj.LookupTXT(context.Background(), "letsencrypt.org") test.AssertNotError(t, err, "No message") - a, err = obj.LookupTXT(context.Background(), "split-txt.letsencrypt.org") - t.Logf("A: %v ", a) + txt, _, err := obj.LookupTXT(context.Background(), "split-txt.letsencrypt.org") test.AssertNotError(t, err, "No message") - test.AssertEquals(t, len(a), 1) - test.AssertEquals(t, a[0], "abc") + test.AssertEquals(t, len(txt.Final), 1) + test.AssertEquals(t, strings.Join(txt.Final[0].Txt, ""), "abc") } -func TestDNSLookupHost(t *testing.T) { +func TestDNSLookupA(t *testing.T) { staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr}) test.AssertNotError(t, err, "Got error creating StaticProvider") - obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock()) - - ip, err := obj.LookupHost(context.Background(), "servfail.com") - t.Logf("servfail.com - IP: %s, Err: %s", ip, err) - test.AssertError(t, err, "Server failure") - test.Assert(t, len(ip) == 0, "Should not have IPs") - - ip, err = obj.LookupHost(context.Background(), "nonexistent.letsencrypt.org") - t.Logf("nonexistent.letsencrypt.org - IP: %s, Err: %s", ip, err) - test.AssertError(t, err, "No valid A or AAAA records should error") - test.Assert(t, len(ip) == 0, "Should not have IPs") - - // Single IPv4 address - ip, err = obj.LookupHost(context.Background(), "cps.letsencrypt.org") - t.Logf("cps.letsencrypt.org - IP: %s, Err: %s", ip, err) - test.AssertNotError(t, err, "Not an error to exist") - test.Assert(t, len(ip) == 1, "Should have IP") - ip, err = obj.LookupHost(context.Background(), "cps.letsencrypt.org") - t.Logf("cps.letsencrypt.org - IP: %s, Err: %s", ip, err) - test.AssertNotError(t, err, "Not an error to exist") - test.Assert(t, len(ip) == 1, "Should have IP") - - // Single IPv6 address - ip, err = obj.LookupHost(context.Background(), "v6.letsencrypt.org") - t.Logf("v6.letsencrypt.org - IP: %s, Err: %s", ip, err) - test.AssertNotError(t, err, "Not an error to exist") - test.Assert(t, len(ip) == 1, "Should not have IPs") - - // Both IPv6 and IPv4 address - ip, err = obj.LookupHost(context.Background(), "dualstack.letsencrypt.org") - t.Logf("dualstack.letsencrypt.org - IP: %s, Err: %s", ip, err) - test.AssertNotError(t, err, "Not an error to exist") - test.Assert(t, len(ip) == 2, "Should have 2 IPs") - expected := net.ParseIP("127.0.0.1") - test.Assert(t, ip[0].To4().Equal(expected), "wrong ipv4 address") - expected = net.ParseIP("::1") - test.Assert(t, ip[1].To16().Equal(expected), "wrong ipv6 address") - - // IPv6 error, IPv4 success - ip, err = obj.LookupHost(context.Background(), "v6error.letsencrypt.org") - t.Logf("v6error.letsencrypt.org - IP: %s, Err: %s", ip, err) - test.AssertNotError(t, err, "Not an error to exist") - test.Assert(t, len(ip) == 1, "Should have 1 IP") - expected = net.ParseIP("127.0.0.1") - test.Assert(t, ip[0].To4().Equal(expected), "wrong ipv4 address") - - // IPv6 success, IPv4 error - ip, err = obj.LookupHost(context.Background(), "v4error.letsencrypt.org") - t.Logf("v4error.letsencrypt.org - IP: %s, Err: %s", ip, err) - test.AssertNotError(t, err, "Not an error to exist") - test.Assert(t, len(ip) == 1, "Should have 1 IP") - expected = net.ParseIP("::1") - test.Assert(t, ip[0].To16().Equal(expected), "wrong ipv6 address") - - // IPv6 error, IPv4 error - // Should return both the IPv4 error (Refused) and the IPv6 error (NotImplemented) - hostname := "dualstackerror.letsencrypt.org" - ip, err = obj.LookupHost(context.Background(), hostname) - t.Logf("%s - IP: %s, Err: %s", hostname, ip, err) - test.AssertError(t, err, "Should be an error") - test.AssertContains(t, err.Error(), "REFUSED looking up A for") - test.AssertContains(t, err.Error(), "NOTIMP looking up AAAA for") + obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig) + + for _, tc := range []struct { + name string + hostname string + wantIPs []net.IP + wantError string + }{ + { + name: "SERVFAIL", + hostname: "servfail.com", + wantError: "SERVFAIL looking up A for servfail.com", + }, + { + name: "No Records", + hostname: "nonexistent.letsencrypt.org", + wantIPs: nil, + }, + { + name: "Single IPv4", + hostname: "cps.letsencrypt.org", + wantIPs: []net.IP{net.ParseIP("64.112.117.1")}, + }, + { + name: "Single IPv6", + hostname: "v6.letsencrypt.org", + wantIPs: nil, + }, + { + name: "Both IPv6 and IPv4", + hostname: "dualstack.letsencrypt.org", + wantIPs: []net.IP{net.ParseIP("64.112.117.1")}, + }, + { + name: "IPv6 error and IPv4 success", + hostname: "v6error.letsencrypt.org", + wantIPs: []net.IP{net.ParseIP("64.112.117.1")}, + }, + { + name: "IPv6 success and IPv4 error", + hostname: "v4error.letsencrypt.org", + wantError: "NOTIMP looking up A for v4error.letsencrypt.org", + }, + { + name: "Both IPv6 and IPv4 error", + hostname: "dualstackerror.letsencrypt.org", + wantError: "REFUSED looking up A for dualstackerror.letsencrypt.org", + }, + } { + t.Run(tc.name, func(t *testing.T) { + res, resolver, err := obj.LookupA(context.Background(), tc.hostname) + + wantResolver := "127.0.0.1:4053" + if resolver != wantResolver { + t.Errorf("LookupA(%s) used resolver %q, but want %q", tc.hostname, resolver, wantResolver) + } + + if tc.wantError != "" { + if err == nil { + t.Fatalf("LookupA(%s) = success, but want error %q", tc.hostname, tc.wantError) + } + if !strings.Contains(err.Error(), tc.wantError) { + t.Errorf("LookupA(%s) = %q, but want error %q", tc.hostname, err, tc.wantError) + } + } else { + if err != nil { + t.Fatalf("LookupA(%s) = %q, but want success", tc.hostname, err) + } + if len(res.Final) != len(tc.wantIPs) { + t.Fatalf("LookupA(%s) returned %d addrs, but want %d", tc.hostname, len(res.Final), len(tc.wantIPs)) + } + for i := range len(tc.wantIPs) { + if !res.Final[i].A.Equal(tc.wantIPs[i]) { + t.Errorf("LookupA(%s) = %s, but want %s", tc.hostname, res.Final[i].A, tc.wantIPs[i]) + } + } + } + }) + } } -func TestDNSNXDOMAIN(t *testing.T) { +func TestDNSLookupAAAA(t *testing.T) { staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr}) test.AssertNotError(t, err, "Got error creating StaticProvider") - obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock()) + obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig) + for _, tc := range []struct { + name string + hostname string + wantIPs []net.IP + wantError string + }{ + { + name: "SERVFAIL", + hostname: "servfail.com", + wantError: "SERVFAIL looking up AAAA for servfail.com", + }, + { + name: "No Records", + hostname: "nonexistent.letsencrypt.org", + wantIPs: nil, + }, + { + name: "Single IPv4", + hostname: "cps.letsencrypt.org", + wantIPs: nil, + }, + { + name: "Single IPv6", + hostname: "v6.letsencrypt.org", + wantIPs: []net.IP{net.ParseIP("2602:80a:6000:abad:cafe::1")}, + }, + { + name: "Both IPv6 and IPv4", + hostname: "dualstack.letsencrypt.org", + wantIPs: []net.IP{net.ParseIP("2602:80a:6000:abad:cafe::1")}, + }, + { + name: "IPv6 error and IPv4 success", + hostname: "v6error.letsencrypt.org", + wantError: "NOTIMP looking up AAAA for v6error.letsencrypt.org", + }, + { + name: "IPv6 success and IPv4 error", + hostname: "v4error.letsencrypt.org", + wantIPs: []net.IP{net.ParseIP("2602:80a:6000:abad:cafe::1")}, + }, + { + name: "Both IPv6 and IPv4 error", + hostname: "dualstackerror.letsencrypt.org", + wantError: "NOTIMP looking up AAAA for dualstackerror.letsencrypt.org", + }, + } { + t.Run(tc.name, func(t *testing.T) { + res, resolver, err := obj.LookupAAAA(context.Background(), tc.hostname) + + wantResolver := "127.0.0.1:4053" + if resolver != wantResolver { + t.Errorf("LookupA(%s) used resolver %q, but want %q", tc.hostname, resolver, wantResolver) + } + + if tc.wantError != "" { + if err == nil { + t.Fatalf("LookupA(%s) = success, but want error %q", tc.hostname, tc.wantError) + } + if !strings.Contains(err.Error(), tc.wantError) { + t.Errorf("LookupA(%s) = %q, but want error %q", tc.hostname, err, tc.wantError) + } + } else { + if err != nil { + t.Fatalf("LookupA(%s) = %q, but want success", tc.hostname, err) + } + if len(res.Final) != len(tc.wantIPs) { + t.Fatalf("LookupA(%s) returned %d addrs, but want %d", tc.hostname, len(res.Final), len(tc.wantIPs)) + } + for i := range len(tc.wantIPs) { + if !res.Final[i].AAAA.Equal(tc.wantIPs[i]) { + t.Errorf("LookupA(%s) = %s, but want %s", tc.hostname, res.Final[i].AAAA, tc.wantIPs[i]) + } + } + } + }) + } +} + +func TestDNSNXDOMAIN(t *testing.T) { + staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr}) + test.AssertNotError(t, err, "Got error creating StaticProvider") + + obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig) hostname := "nxdomain.letsencrypt.org" - _, err = obj.LookupHost(context.Background(), hostname) + + _, _, err = obj.LookupA(context.Background(), hostname) test.AssertContains(t, err.Error(), "NXDOMAIN looking up A for") + + _, _, err = obj.LookupAAAA(context.Background(), hostname) test.AssertContains(t, err.Error(), "NXDOMAIN looking up AAAA for") - _, err = obj.LookupTXT(context.Background(), hostname) - expected := &Error{dns.TypeTXT, hostname, nil, dns.RcodeNameError} + _, _, err = obj.LookupTXT(context.Background(), hostname) + expected := Error{dns.TypeTXT, hostname, nil, dns.RcodeNameError, nil} test.AssertDeepEquals(t, err, expected) } @@ -402,12 +551,13 @@ func TestDNSLookupCAA(t *testing.T) { staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr}) test.AssertNotError(t, err, "Got error creating StaticProvider") - obj := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, blog.UseMock()) + obj := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 1, "", blog.UseMock(), tlsConfig) removeIDExp := regexp.MustCompile(" id: [[:digit:]]+") - caas, resp, err := obj.LookupCAA(context.Background(), "bracewel.net") + caas, resolver, err := obj.LookupCAA(context.Background(), "bracewel.net") test.AssertNotError(t, err, "CAA lookup failed") - test.Assert(t, len(caas) > 0, "Should have CAA records") + test.Assert(t, len(caas.Final) > 0, "Should have CAA records") + test.AssertEquals(t, resolver, "127.0.0.1:4053") expectedResp := `;; opcode: QUERY, status: NOERROR, id: XXXX ;; flags: qr rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 @@ -417,17 +567,22 @@ func TestDNSLookupCAA(t *testing.T) { ;; ANSWER SECTION: bracewel.net. 0 IN CAA 1 issue "letsencrypt.org" ` - test.AssertEquals(t, removeIDExp.ReplaceAllString(resp, " id: XXXX"), expectedResp) + test.AssertEquals(t, removeIDExp.ReplaceAllString(caas.String(), " id: XXXX"), expectedResp) - caas, resp, err = obj.LookupCAA(context.Background(), "nonexistent.letsencrypt.org") + caas, resolver, err = obj.LookupCAA(context.Background(), "nonexistent.letsencrypt.org") test.AssertNotError(t, err, "CAA lookup failed") - test.Assert(t, len(caas) == 0, "Shouldn't have CAA records") - expectedResp = "" - test.AssertEquals(t, resp, expectedResp) + test.Assert(t, len(caas.Final) == 0, "Shouldn't have CAA records") + test.AssertEquals(t, resolver, "127.0.0.1:4053") - caas, resp, err = obj.LookupCAA(context.Background(), "cname.example.com") + caas, resolver, err = obj.LookupCAA(context.Background(), "nxdomain.letsencrypt.org") test.AssertNotError(t, err, "CAA lookup failed") - test.Assert(t, len(caas) > 0, "Should follow CNAME to find CAA") + test.Assert(t, len(caas.Final) == 0, "Shouldn't have CAA records") + test.AssertEquals(t, resolver, "127.0.0.1:4053") + + caas, resolver, err = obj.LookupCAA(context.Background(), "cname.example.com") + test.AssertNotError(t, err, "CAA lookup failed") + test.Assert(t, len(caas.Final) > 0, "Should follow CNAME to find CAA") + test.AssertEquals(t, resolver, "127.0.0.1:4053") expectedResp = `;; opcode: QUERY, status: NOERROR, id: XXXX ;; flags: qr rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 @@ -437,38 +592,12 @@ bracewel.net. 0 IN CAA 1 issue "letsencrypt.org" ;; ANSWER SECTION: caa.example.com. 0 IN CAA 1 issue "letsencrypt.org" ` - test.AssertEquals(t, removeIDExp.ReplaceAllString(resp, " id: XXXX"), expectedResp) -} + test.AssertEquals(t, removeIDExp.ReplaceAllString(caas.String(), " id: XXXX"), expectedResp) -func TestIsPrivateIP(t *testing.T) { - test.Assert(t, isPrivateV4(net.ParseIP("127.0.0.1")), "should be private") - test.Assert(t, isPrivateV4(net.ParseIP("192.168.254.254")), "should be private") - test.Assert(t, isPrivateV4(net.ParseIP("10.255.0.3")), "should be private") - test.Assert(t, isPrivateV4(net.ParseIP("172.16.255.255")), "should be private") - test.Assert(t, isPrivateV4(net.ParseIP("172.31.255.255")), "should be private") - test.Assert(t, !isPrivateV4(net.ParseIP("128.0.0.1")), "should be private") - test.Assert(t, !isPrivateV4(net.ParseIP("192.169.255.255")), "should not be private") - test.Assert(t, !isPrivateV4(net.ParseIP("9.255.0.255")), "should not be private") - test.Assert(t, !isPrivateV4(net.ParseIP("172.32.255.255")), "should not be private") - - test.Assert(t, isPrivateV6(net.ParseIP("::0")), "should be private") - test.Assert(t, isPrivateV6(net.ParseIP("::1")), "should be private") - test.Assert(t, !isPrivateV6(net.ParseIP("::2")), "should not be private") - - test.Assert(t, isPrivateV6(net.ParseIP("fe80::1")), "should be private") - test.Assert(t, isPrivateV6(net.ParseIP("febf::1")), "should be private") - test.Assert(t, !isPrivateV6(net.ParseIP("fec0::1")), "should not be private") - test.Assert(t, !isPrivateV6(net.ParseIP("feff::1")), "should not be private") - - test.Assert(t, isPrivateV6(net.ParseIP("ff00::1")), "should be private") - test.Assert(t, isPrivateV6(net.ParseIP("ff10::1")), "should be private") - test.Assert(t, isPrivateV6(net.ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")), "should be private") - - test.Assert(t, isPrivateV6(net.ParseIP("2002::")), "should be private") - test.Assert(t, isPrivateV6(net.ParseIP("2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff")), "should be private") - test.Assert(t, isPrivateV6(net.ParseIP("0100::")), "should be private") - test.Assert(t, isPrivateV6(net.ParseIP("0100::0000:ffff:ffff:ffff:ffff")), "should be private") - test.Assert(t, !isPrivateV6(net.ParseIP("0100::0001:0000:0000:0000:0000")), "should be private") + _, resolver, err = obj.LookupCAA(context.Background(), "gonetld") + test.AssertError(t, err, "should fail for TLD NXDOMAIN") + test.AssertContains(t, err.Error(), "NXDOMAIN") + test.AssertEquals(t, resolver, "127.0.0.1:4053") } type testExchanger struct { @@ -479,7 +608,11 @@ type testExchanger struct { var errTooManyRequests = errors.New("too many requests") -func (te *testExchanger) Exchange(m *dns.Msg, a string) (*dns.Msg, time.Duration, error) { +func (te *testExchanger) ExchangeContext(ctx context.Context, m *dns.Msg, a string) (*dns.Msg, time.Duration, error) { + if ctx.Err() != nil { + return nil, 0, ctx.Err() + } + te.Lock() defer te.Unlock() msg := &dns.Msg{ @@ -495,10 +628,10 @@ func (te *testExchanger) Exchange(m *dns.Msg, a string) (*dns.Msg, time.Duration } func TestRetry(t *testing.T) { - isTempErr := &net.OpError{Op: "read", Err: tempError(true)} - nonTempErr := &net.OpError{Op: "read", Err: tempError(false)} + isTimeoutErr := &url.Error{Op: "read", Err: testTimeoutError(true)} + nonTimeoutErr := &url.Error{Op: "read", Err: testTimeoutError(false)} servFailError := errors.New("DNS problem: server failure at resolver looking up TXT for example.com") - netError := errors.New("DNS problem: networking error looking up TXT for example.com") + timeoutFailError := errors.New("DNS problem: query timed out looking up TXT for example.com") type testCase struct { name string maxTries int @@ -528,28 +661,28 @@ func TestRetry(t *testing.T) { expected: servFailError, expectedCount: 1, }, - // Temporary err, then non-OpError stops at two tries + // Timeout err, then non-OpError stops at two tries { name: "err-then-non-operror", maxTries: 3, te: &testExchanger{ - errs: []error{isTempErr, errors.New("nope")}, + errs: []error{isTimeoutErr, errors.New("nope")}, }, expected: servFailError, expectedCount: 2, }, - // Temporary error given always + // Timeout error given always { - name: "persistent-temp-error", + name: "persistent-timeout-error", maxTries: 3, te: &testExchanger{ errs: []error{ - isTempErr, - isTempErr, - isTempErr, + isTimeoutErr, + isTimeoutErr, + isTimeoutErr, }, }, - expected: netError, + expected: timeoutFailError, expectedCount: 3, metricsAllRetries: 1, }, @@ -564,59 +697,59 @@ func TestRetry(t *testing.T) { expected: nil, expectedCount: 1, }, - // Temporary error given just once causes two tries + // Timeout error given just once causes two tries { - name: "single-temp-error", + name: "single-timeout-error", maxTries: 3, te: &testExchanger{ errs: []error{ - isTempErr, + isTimeoutErr, nil, }, }, expected: nil, expectedCount: 2, }, - // Temporary error given twice causes three tries + // Timeout error given twice causes three tries { - name: "double-temp-error", + name: "double-timeout-error", maxTries: 3, te: &testExchanger{ errs: []error{ - isTempErr, - isTempErr, + isTimeoutErr, + isTimeoutErr, nil, }, }, expected: nil, expectedCount: 3, }, - // Temporary error given thrice causes three tries and fails + // Timeout error given thrice causes three tries and fails { - name: "triple-temp-error", + name: "triple-timeout-error", maxTries: 3, te: &testExchanger{ errs: []error{ - isTempErr, - isTempErr, - isTempErr, + isTimeoutErr, + isTimeoutErr, + isTimeoutErr, }, }, - expected: netError, + expected: timeoutFailError, expectedCount: 3, metricsAllRetries: 1, }, - // temporary then non-Temporary error causes two retries + // timeout then non-timeout error causes two retries { - name: "temp-nontemp-error", + name: "timeout-nontimeout-error", maxTries: 3, te: &testExchanger{ errs: []error{ - isTempErr, - nonTempErr, + isTimeoutErr, + nonTimeoutErr, }, }, - expected: netError, + expected: servFailError, expectedCount: 2, }, } @@ -626,10 +759,10 @@ func TestRetry(t *testing.T) { staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr}) test.AssertNotError(t, err, "Got error creating StaticProvider") - testClient := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), tc.maxTries, blog.UseMock()) + testClient := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), tc.maxTries, "", blog.UseMock(), tlsConfig) dr := testClient.(*impl) - dr.dnsClient = tc.te - _, err = dr.LookupTXT(context.Background(), "example.com") + dr.exchanger = tc.te + _, _, err = dr.LookupTXT(context.Background(), "example.com") if err == errTooManyRequests { t.Errorf("#%d, sent more requests than the test case handles", i) } @@ -646,74 +779,64 @@ func TestRetry(t *testing.T) { test.AssertMetricWithLabelsEquals( t, dr.timeoutCounter, prometheus.Labels{ "qtype": "TXT", - "type": "out of retries", - "resolver": dnsLoopbackAddr, + "result": "out of retries", + "resolver": "127.0.0.1", "isTLD": "false", }, tc.metricsAllRetries) } }) } +} +func TestRetryMetrics(t *testing.T) { staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr}) test.AssertNotError(t, err, "Got error creating StaticProvider") - testClient := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 3, blog.UseMock()) + // This lookup should not be retried, because the error comes from the + // context itself being cancelled. It should never see the error in the + // testExchanger, because the fake exchanger (like the real http package) + // checks for cancellation before doing any work. + testClient := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 3, "", blog.UseMock(), tlsConfig) dr := testClient.(*impl) - dr.dnsClient = &testExchanger{errs: []error{isTempErr, isTempErr, nil}} - ctx, cancel := context.WithCancel(context.Background()) + dr.exchanger = &testExchanger{errs: []error{errors.New("oops")}} + ctx, cancel := context.WithCancel(t.Context()) cancel() - _, err = dr.LookupTXT(ctx, "example.com") + _, _, err = dr.LookupTXT(ctx, "example.com") if err == nil || err.Error() != "DNS problem: query timed out (and was canceled) looking up TXT for example.com" { t.Errorf("expected %s, got %s", context.Canceled, err) } + test.AssertMetricWithLabelsEquals( + t, dr.timeoutCounter, prometheus.Labels{ + "qtype": "TXT", + "result": "canceled", + "resolver": "127.0.0.1", + }, 1) - dr.dnsClient = &testExchanger{errs: []error{isTempErr, isTempErr, nil}} - ctx, cancel = context.WithTimeout(context.Background(), -10*time.Hour) + // Same as above, except rather than cancelling the context ourselves, we + // let the go runtime cancel it as a result of a deadline in the past. + testClient = New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 3, "", blog.UseMock(), tlsConfig) + dr = testClient.(*impl) + dr.exchanger = &testExchanger{errs: []error{errors.New("oops")}} + ctx, cancel = context.WithTimeout(t.Context(), -10*time.Hour) defer cancel() - _, err = dr.LookupTXT(ctx, "example.com") + _, _, err = dr.LookupTXT(ctx, "example.com") if err == nil || err.Error() != "DNS problem: query timed out looking up TXT for example.com" { t.Errorf("expected %s, got %s", context.DeadlineExceeded, err) } - - dr.dnsClient = &testExchanger{errs: []error{isTempErr, isTempErr, nil}} - ctx, deadlineCancel := context.WithTimeout(context.Background(), -10*time.Hour) - deadlineCancel() - _, err = dr.LookupTXT(ctx, "example.com") - if err == nil || - err.Error() != "DNS problem: query timed out looking up TXT for example.com" { - t.Errorf("expected %s, got %s", context.DeadlineExceeded, err) - } - test.AssertMetricWithLabelsEquals( t, dr.timeoutCounter, prometheus.Labels{ "qtype": "TXT", - "type": "canceled", - "resolver": dnsLoopbackAddr, + "result": "deadline exceeded", + "resolver": "127.0.0.1", }, 1) - - test.AssertMetricWithLabelsEquals( - t, dr.timeoutCounter, prometheus.Labels{ - "qtype": "TXT", - "type": "deadline exceeded", - "resolver": dnsLoopbackAddr, - }, 2) } -func TestIsTLD(t *testing.T) { - if isTLD("com") != "true" { - t.Errorf("expected 'com' to be a TLD, got %q", isTLD("com")) - } - if isTLD("example.com") != "false" { - t.Errorf("expected 'example.com' to not a TLD, got %q", isTLD("example.com")) - } -} +type testTimeoutError bool -type tempError bool - -func (t tempError) Temporary() bool { return bool(t) } -func (t tempError) Error() string { return fmt.Sprintf("Temporary: %t", t) } +func (t testTimeoutError) Timeout() bool { return bool(t) } +func (t testTimeoutError) Error() string { return fmt.Sprintf("Timeout: %t", t) } // rotateFailureExchanger is a dns.Exchange implementation that tracks a count // of the number of calls to `Exchange` for a given address in the `lookups` @@ -725,9 +848,9 @@ type rotateFailureExchanger struct { brokenAddresses map[string]bool } -// Exchange for rotateFailureExchanger tracks the `a` argument in `lookups` and -// if present in `brokenAddresses`, returns a temporary error. -func (e *rotateFailureExchanger) Exchange(m *dns.Msg, a string) (*dns.Msg, time.Duration, error) { +// ExchangeContext for rotateFailureExchanger tracks the `a` argument in `lookups` and +// if present in `brokenAddresses`, returns a timeout error. +func (e *rotateFailureExchanger) ExchangeContext(_ context.Context, m *dns.Msg, a string) (*dns.Msg, time.Duration, error) { e.Lock() defer e.Unlock() @@ -736,8 +859,8 @@ func (e *rotateFailureExchanger) Exchange(m *dns.Msg, a string) (*dns.Msg, time. // If its a broken server, return a retryable error if e.brokenAddresses[a] { - isTempErr := &net.OpError{Op: "read", Err: tempError(true)} - return nil, 2 * time.Millisecond, isTempErr + isTimeoutErr := &url.Error{Op: "read", Err: testTimeoutError(true)} + return nil, 2 * time.Millisecond, isTimeoutErr } return m, 2 * time.Millisecond, nil @@ -758,10 +881,9 @@ func TestRotateServerOnErr(t *testing.T) { // working server staticProvider, err := NewStaticProvider(dnsServers) test.AssertNotError(t, err, "Got error creating StaticProvider") - fmt.Println(staticProvider.servers) maxTries := 5 - client := NewTest(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), maxTries, blog.UseMock()) + client := New(time.Second*10, staticProvider, metrics.NoopRegisterer, clock.NewFake(), maxTries, "", blog.UseMock(), tlsConfig) // Configure a mock exchanger that will always return a retryable error for // servers A and B. This will force server "[2606:4700:4700::1111]:53" to do @@ -773,15 +895,16 @@ func TestRotateServerOnErr(t *testing.T) { }, lookups: make(map[string]int), } - client.(*impl).dnsClient = mock + client.(*impl).exchanger = mock // Perform a bunch of lookups. We choose the initial server randomly. Any time // A or B is chosen there should be an error and a retry using the next server // in the list. Since we configured maxTries to be larger than the number of // servers *all* queries should eventually succeed by being retried against // server "[2606:4700:4700::1111]:53". - for i := 0; i < maxTries*2; i++ { - _, err := client.LookupTXT(context.Background(), "example.com") + for range maxTries * 2 { + _, resolver, err := client.LookupTXT(context.Background(), "example.com") + test.AssertEquals(t, resolver, "[2606:4700:4700::1111]:53") // Any errors are unexpected - server "[2606:4700:4700::1111]:53" should // have responded without error. test.AssertNotError(t, err, "Expected no error from eventual retry with functional server") @@ -797,3 +920,44 @@ func TestRotateServerOnErr(t *testing.T) { test.AssertEquals(t, mock.lookups["[2606:4700:4700::1111]:53"], maxTries*2) } + +type mockTimeoutURLError struct{} + +func (m *mockTimeoutURLError) Error() string { return "whoops, oh gosh" } +func (m *mockTimeoutURLError) Timeout() bool { return true } + +type dohAlwaysRetryExchanger struct { + sync.Mutex + err error +} + +func (dohE *dohAlwaysRetryExchanger) ExchangeContext(_ context.Context, m *dns.Msg, a string) (*dns.Msg, time.Duration, error) { + dohE.Lock() + defer dohE.Unlock() + + timeoutURLerror := &url.Error{ + Op: "GET", + URL: "https://example.com", + Err: &mockTimeoutURLError{}, + } + + return nil, time.Second, timeoutURLerror +} + +func TestDOHMetric(t *testing.T) { + staticProvider, err := NewStaticProvider([]string{dnsLoopbackAddr}) + test.AssertNotError(t, err, "Got error creating StaticProvider") + + testClient := New(time.Second*11, staticProvider, metrics.NoopRegisterer, clock.NewFake(), 0, "", blog.UseMock(), tlsConfig) + resolver := testClient.(*impl) + resolver.exchanger = &dohAlwaysRetryExchanger{err: &url.Error{Op: "read", Err: testTimeoutError(true)}} + + // Starting out, we should count 0 "out of retries" errors. + test.AssertMetricWithLabelsEquals(t, resolver.timeoutCounter, prometheus.Labels{"qtype": "None", "type": "out of retries", "resolver": "127.0.0.1", "isTLD": "false"}, 0) + + // Trigger the error. + _, _, _ = resolver.exchangeOne(context.Background(), "example.com", 0) + + // Now, we should count 1 "out of retries" errors. + test.AssertMetricWithLabelsEquals(t, resolver.timeoutCounter, prometheus.Labels{"qtype": "None", "type": "out of retries", "resolver": "127.0.0.1", "isTLD": "false"}, 1) +} diff --git a/bdns/mocks.go b/bdns/mocks.go index 54635689096..d9c3b15ef0e 100644 --- a/bdns/mocks.go +++ b/bdns/mocks.go @@ -3,122 +3,29 @@ package bdns import ( "context" "errors" - "fmt" - "net" - "os" "github.com/miekg/dns" - - blog "github.com/letsencrypt/boulder/log" ) // MockClient is a mock -type MockClient struct { - Log blog.Logger -} +type MockClient struct{} // LookupTXT is a mock -func (mock *MockClient) LookupTXT(_ context.Context, hostname string) ([]string, error) { - if hostname == "_acme-challenge.servfail.com" { - return nil, fmt.Errorf("SERVFAIL") - } - if hostname == "_acme-challenge.good-dns01.com" { - // base64(sha256("LoqXcYV8q5ONbJQxbmR7SCTNo3tiAXDfowyjxAjEuX0" - // + "." + "9jg46WB3rR_AHD-EBXdN7cBkH1WOu0tA3M9fm21mqTI")) - // expected token + test account jwk thumbprint - return []string{"LPsIwTo7o8BoG0-vjCyGQGBWSVIPxI-i_X336eUOQZo"}, nil - } - if hostname == "_acme-challenge.wrong-dns01.com" { - return []string{"a"}, nil - } - if hostname == "_acme-challenge.wrong-many-dns01.com" { - return []string{"a", "b", "c", "d", "e"}, nil - } - if hostname == "_acme-challenge.long-dns01.com" { - return []string{"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, nil - } - if hostname == "_acme-challenge.no-authority-dns01.com" { - // base64(sha256("LoqXcYV8q5ONbJQxbmR7SCTNo3tiAXDfowyjxAjEuX0" - // + "." + "9jg46WB3rR_AHD-EBXdN7cBkH1WOu0tA3M9fm21mqTI")) - // expected token + test account jwk thumbprint - return []string{"LPsIwTo7o8BoG0-vjCyGQGBWSVIPxI-i_X336eUOQZo"}, nil - } - // empty-txts.com always returns zero TXT records - if hostname == "_acme-challenge.empty-txts.com" { - return []string{}, nil - } - return []string{"hostname"}, nil -} - -// makeTimeoutError returns a a net.OpError for which Timeout() returns true. -func makeTimeoutError() *net.OpError { - return &net.OpError{ - Err: os.NewSyscallError("ugh timeout", timeoutError{}), - } +func (mock *MockClient) LookupTXT(_ context.Context, hostname string) (*Result[*dns.TXT], string, error) { + return nil, "MockClient", errors.New("unexpected LookupTXT call on test fake") } -type timeoutError struct{} - -func (t timeoutError) Error() string { - return "so sloooow" -} -func (t timeoutError) Timeout() bool { - return true +// LookupA is a fake +func (mock *MockClient) LookupA(_ context.Context, hostname string) (*Result[*dns.A], string, error) { + return nil, "MockClient", errors.New("unexpected LookupA call on test fake") } -// LookupHost is a mock -func (mock *MockClient) LookupHost(_ context.Context, hostname string) ([]net.IP, error) { - if hostname == "always.invalid" || - hostname == "invalid.invalid" { - return []net.IP{}, nil - } - if hostname == "always.timeout" { - return []net.IP{}, &Error{dns.TypeA, "always.timeout", makeTimeoutError(), -1} - } - if hostname == "always.error" { - err := &net.OpError{ - Op: "read", - Net: "udp", - Err: errors.New("some net error"), - } - m := new(dns.Msg) - m.SetQuestion(dns.Fqdn(hostname), dns.TypeA) - m.AuthenticatedData = true - m.SetEdns0(4096, false) - logDNSError(mock.Log, "mock.server", hostname, m, nil, err) - return []net.IP{}, &Error{dns.TypeA, hostname, err, -1} - } - if hostname == "id.mismatch" { - err := dns.ErrId - m := new(dns.Msg) - m.SetQuestion(dns.Fqdn(hostname), dns.TypeA) - m.AuthenticatedData = true - m.SetEdns0(4096, false) - r := new(dns.Msg) - record := new(dns.A) - record.Hdr = dns.RR_Header{Name: dns.Fqdn(hostname), Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0} - record.A = net.ParseIP("127.0.0.1") - r.Answer = append(r.Answer, record) - logDNSError(mock.Log, "mock.server", hostname, m, r, err) - return []net.IP{}, &Error{dns.TypeA, hostname, err, -1} - } - // dual-homed host with an IPv6 and an IPv4 address - if hostname == "ipv4.and.ipv6.localhost" { - return []net.IP{ - net.ParseIP("::1"), - net.ParseIP("127.0.0.1"), - }, nil - } - if hostname == "ipv6.localhost" { - return []net.IP{ - net.ParseIP("::1"), - }, nil - } - ip := net.ParseIP("127.0.0.1") - return []net.IP{ip}, nil +// LookupAAAA is a fake +func (mock *MockClient) LookupAAAA(_ context.Context, hostname string) (*Result[*dns.AAAA], string, error) { + return nil, "MockClient", errors.New("unexpected LookupAAAA call on test fake") } -// LookupCAA returns mock records for use in tests. -func (mock *MockClient) LookupCAA(_ context.Context, domain string) ([]*dns.CAA, string, error) { - return nil, "", nil +// LookupCAA is a fake +func (mock *MockClient) LookupCAA(_ context.Context, domain string) (*Result[*dns.CAA], string, error) { + return nil, "MockClient", errors.New("unexpected LookupCAA call on test fake") } diff --git a/bdns/problem.go b/bdns/problem.go index ac972681209..8783743a568 100644 --- a/bdns/problem.go +++ b/bdns/problem.go @@ -2,8 +2,10 @@ package bdns import ( "context" + "errors" "fmt" "net" + "net/url" "github.com/miekg/dns" ) @@ -15,12 +17,90 @@ type Error struct { // Exactly one of rCode or underlying should be set. underlying error rCode int + + // Optional: If the resolver returned extended error information, it will be stored here. + // https://www.rfc-editor.org/rfc/rfc8914 + extended *dns.EDNS0_EDE +} + +// extendedDNSError returns non-nil if the input message contained an OPT RR +// with an EDE option. https://www.rfc-editor.org/rfc/rfc8914. +func extendedDNSError(msg *dns.Msg) *dns.EDNS0_EDE { + opt := msg.IsEdns0() + if opt != nil { + for _, opt := range opt.Option { + ede, ok := opt.(*dns.EDNS0_EDE) + if !ok { + continue + } + return ede + } + } + return nil +} + +// wrapErr returns a non-nil error if err is non-nil or if resp.Rcode is not dns.RcodeSuccess. +// The error includes appropriate details about the DNS query that failed. +func wrapErr(queryType uint16, hostname string, resp *dns.Msg, err error) error { + if err != nil { + return Error{ + recordType: queryType, + hostname: hostname, + underlying: err, + extended: nil, + } + } + if resp.Rcode != dns.RcodeSuccess { + return Error{ + recordType: queryType, + hostname: hostname, + rCode: resp.Rcode, + underlying: nil, + extended: extendedDNSError(resp), + } + } + return nil +} + +// A copy of miekg/dns's mapping of error codes to strings. We tweak it slightly so all DNSSEC-related +// errors say "DNSSEC" at the beginning. +// https://pkg.go.dev/github.com/miekg/dns#ExtendedErrorCodeToString +// Also note that not all of these codes can currently be emitted by Unbound. See Unbound's +// announcement post for EDE: https://blog.nlnetlabs.nl/extended-dns-error-support-for-unbound/ +var extendedErrorCodeToString = map[uint16]string{ + dns.ExtendedErrorCodeOther: "Other", + dns.ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "DNSSEC: Unsupported DNSKEY Algorithm", + dns.ExtendedErrorCodeUnsupportedDSDigestType: "DNSSEC: Unsupported DS Digest Type", + dns.ExtendedErrorCodeStaleAnswer: "Stale Answer", + dns.ExtendedErrorCodeForgedAnswer: "Forged Answer", + dns.ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC: Indeterminate", + dns.ExtendedErrorCodeDNSBogus: "DNSSEC: Bogus", + dns.ExtendedErrorCodeSignatureExpired: "DNSSEC: Signature Expired", + dns.ExtendedErrorCodeSignatureNotYetValid: "DNSSEC: Signature Not Yet Valid", + dns.ExtendedErrorCodeDNSKEYMissing: "DNSSEC: DNSKEY Missing", + dns.ExtendedErrorCodeRRSIGsMissing: "DNSSEC: RRSIGs Missing", + dns.ExtendedErrorCodeNoZoneKeyBitSet: "DNSSEC: No Zone Key Bit Set", + dns.ExtendedErrorCodeNSECMissing: "DNSSEC: NSEC Missing", + dns.ExtendedErrorCodeCachedError: "Cached Error", + dns.ExtendedErrorCodeNotReady: "Not Ready", + dns.ExtendedErrorCodeBlocked: "Blocked", + dns.ExtendedErrorCodeCensored: "Censored", + dns.ExtendedErrorCodeFiltered: "Filtered", + dns.ExtendedErrorCodeProhibited: "Prohibited", + dns.ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer", + dns.ExtendedErrorCodeNotAuthoritative: "Not Authoritative", + dns.ExtendedErrorCodeNotSupported: "Not Supported", + dns.ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority", + dns.ExtendedErrorCodeNetworkError: "Network Error between Resolver and Authority", + dns.ExtendedErrorCodeInvalidData: "Invalid Data", } func (d Error) Error() string { var detail, additional string if d.underlying != nil { - if netErr, ok := d.underlying.(*net.OpError); ok { + var netErr *net.OpError + var urlErr *url.Error + if errors.As(d.underlying, &netErr) { if netErr.Timeout() { detail = detailDNSTimeout } else { @@ -28,9 +108,14 @@ func (d Error) Error() string { } // Note: we check d.underlying here even though `Timeout()` does this because the call to `netErr.Timeout()` above only // happens for `*net.OpError` underlying types! - } else if d.underlying == context.DeadlineExceeded { + } else if errors.As(d.underlying, &urlErr) && urlErr.Timeout() { + // For DOH queries, we can get back a `*url.Error` that wraps the unexported type + // `http.httpError`. Unfortunately `http.httpError` doesn't wrap any errors (like + // context.DeadlineExceeded), we can't check for that; instead we need to call Timeout(). + detail = detailDNSTimeout + } else if errors.Is(d.underlying, context.DeadlineExceeded) { detail = detailDNSTimeout - } else if d.underlying == context.Canceled { + } else if errors.Is(d.underlying, context.Canceled) { detail = detailCanceled } else { detail = detailServerFailure @@ -43,8 +128,22 @@ func (d Error) Error() string { } else { detail = detailServerFailure } - return fmt.Sprintf("DNS problem: %s looking up %s for %s%s", detail, - dns.TypeToString[d.recordType], d.hostname, additional) + + if d.extended == nil { + return fmt.Sprintf("DNS problem: %s looking up %s for %s%s", detail, + dns.TypeToString[d.recordType], d.hostname, additional) + } + + summary := extendedErrorCodeToString[d.extended.InfoCode] + if summary == "" { + summary = fmt.Sprintf("Unknown Extended DNS Error code %d", d.extended.InfoCode) + } + result := fmt.Sprintf("DNS problem: looking up %s for %s: %s", + dns.TypeToString[d.recordType], d.hostname, summary) + if d.extended.ExtraText != "" { + result = result + ": " + d.extended.ExtraText + } + return result } const detailDNSTimeout = "query timed out" diff --git a/bdns/problem_test.go b/bdns/problem_test.go index 97ff0a64469..bdf7040b019 100644 --- a/bdns/problem_test.go +++ b/bdns/problem_test.go @@ -4,9 +4,12 @@ import ( "context" "errors" "net" + "net/url" "testing" "github.com/miekg/dns" + + "github.com/letsencrypt/boulder/test" ) func TestError(t *testing.T) { @@ -15,26 +18,41 @@ func TestError(t *testing.T) { expected string }{ { - &Error{dns.TypeA, "hostname", makeTimeoutError(), -1}, - "DNS problem: query timed out looking up A for hostname", - }, { - &Error{dns.TypeMX, "hostname", &net.OpError{Err: errors.New("some net error")}, -1}, + &Error{dns.TypeMX, "hostname", &net.OpError{Err: errors.New("some net error")}, -1, nil}, "DNS problem: networking error looking up MX for hostname", }, { - &Error{dns.TypeTXT, "hostname", nil, dns.RcodeNameError}, + &Error{dns.TypeTXT, "hostname", nil, dns.RcodeNameError, nil}, "DNS problem: NXDOMAIN looking up TXT for hostname - check that a DNS record exists for this domain", }, { - &Error{dns.TypeTXT, "hostname", context.DeadlineExceeded, -1}, + &Error{dns.TypeTXT, "hostname", context.DeadlineExceeded, -1, nil}, "DNS problem: query timed out looking up TXT for hostname", }, { - &Error{dns.TypeTXT, "hostname", context.Canceled, -1}, + &Error{dns.TypeTXT, "hostname", context.Canceled, -1, nil}, "DNS problem: query timed out (and was canceled) looking up TXT for hostname", }, { - &Error{dns.TypeCAA, "hostname", nil, dns.RcodeServerFailure}, + &Error{dns.TypeCAA, "hostname", nil, dns.RcodeServerFailure, nil}, "DNS problem: SERVFAIL looking up CAA for hostname - the domain's nameservers may be malfunctioning", }, { - &Error{dns.TypeA, "hostname", nil, dns.RcodeFormatError}, + &Error{dns.TypeA, "hostname", nil, dns.RcodeServerFailure, &dns.EDNS0_EDE{InfoCode: 1, ExtraText: "oh no"}}, + "DNS problem: looking up A for hostname: DNSSEC: Unsupported DNSKEY Algorithm: oh no", + }, { + &Error{dns.TypeA, "hostname", nil, dns.RcodeServerFailure, &dns.EDNS0_EDE{InfoCode: 6, ExtraText: ""}}, + "DNS problem: looking up A for hostname: DNSSEC: Bogus", + }, { + &Error{dns.TypeA, "hostname", nil, dns.RcodeServerFailure, &dns.EDNS0_EDE{InfoCode: 1337, ExtraText: "mysterious"}}, + "DNS problem: looking up A for hostname: Unknown Extended DNS Error code 1337: mysterious", + }, { + &Error{dns.TypeCAA, "hostname", nil, dns.RcodeServerFailure, nil}, + "DNS problem: SERVFAIL looking up CAA for hostname - the domain's nameservers may be malfunctioning", + }, { + &Error{dns.TypeCAA, "hostname", nil, dns.RcodeServerFailure, nil}, + "DNS problem: SERVFAIL looking up CAA for hostname - the domain's nameservers may be malfunctioning", + }, { + &Error{dns.TypeA, "hostname", nil, dns.RcodeFormatError, nil}, "DNS problem: FORMERR looking up A for hostname", + }, { + &Error{dns.TypeA, "hostname", &url.Error{Op: "GET", URL: "https://example.com/", Err: dohTimeoutError{}}, -1, nil}, + "DNS problem: query timed out looking up A for hostname", }, } for _, tc := range testCases { @@ -43,3 +61,30 @@ func TestError(t *testing.T) { } } } + +type dohTimeoutError struct{} + +func (dohTimeoutError) Error() string { + return "doh no" +} + +func (dohTimeoutError) Timeout() bool { + return true +} + +func TestWrapErr(t *testing.T) { + err := wrapErr(dns.TypeA, "hostname", &dns.Msg{ + MsgHdr: dns.MsgHdr{Rcode: dns.RcodeSuccess}, + }, nil) + test.AssertNotError(t, err, "expected success") + + err = wrapErr(dns.TypeA, "hostname", &dns.Msg{ + MsgHdr: dns.MsgHdr{Rcode: dns.RcodeRefused}, + }, nil) + test.AssertError(t, err, "expected error") + + err = wrapErr(dns.TypeA, "hostname", &dns.Msg{ + MsgHdr: dns.MsgHdr{Rcode: dns.RcodeSuccess}, + }, errors.New("oh no")) + test.AssertError(t, err, "expected error") +} diff --git a/bdns/servers.go b/bdns/servers.go index bf9539c390d..19abc1e2fff 100644 --- a/bdns/servers.go +++ b/bdns/servers.go @@ -4,17 +4,20 @@ import ( "context" "errors" "fmt" - "math/rand" + "math/rand/v2" "net" + "net/netip" "strconv" "sync" "time" "github.com/miekg/dns" "github.com/prometheus/client_golang/prometheus" + + "github.com/letsencrypt/boulder/cmd" ) -// serverProvider represents a type which can provide a list of addresses for +// ServerProvider represents a type which can provide a list of addresses for // the bdns to use as DNS resolvers. Different implementations may provide // different strategies for providing addresses, and may provide different kinds // of addresses (e.g. host:port combos vs IP addresses). @@ -52,17 +55,16 @@ func validateServerAddress(address string) error { // Ensure the `port` portion of `address` is a valid port. portNum, err := strconv.Atoi(port) if err != nil { - return errors.New("port must be an integer: %s") + return fmt.Errorf("parsing port number: %s", err) } if portNum <= 0 || portNum > 65535 { return errors.New("port must be an integer between 0 - 65535") } // Ensure the `host` portion of `address` is a valid FQDN or IP address. - IPv6 := net.ParseIP(host).To16() - IPv4 := net.ParseIP(host).To4() + _, err = netip.ParseAddr(host) FQDN := dns.IsFqdn(dns.Fqdn(host)) - if IPv6 == nil && IPv4 == nil && !FQDN { + if err != nil && !FQDN { return errors.New("host is not an FQDN or IP address") } return nil @@ -99,37 +101,115 @@ func (sp *staticProvider) Stop() {} // addresses, and refreshes it regularly using a goroutine started by its // constructor. type dynamicProvider struct { - // The domain name which should be used for DNS. Will be used as the basis of - // a SRV query to locate DNS services on this domain, which will in turn be - // used as the basis for A queries to cache IP addrs for those services. - name string + // dnsAuthority is the single : of the DNS + // server to be used for resolution of DNS backends. If the address contains + // a hostname it will be resolved via the system DNS. If the port is left + // unspecified it will default to '53'. If this field is left unspecified + // the system DNS will be used for resolution of DNS backends. + dnsAuthority string + // service is the service name to look up SRV records for within the domain. + // If this field is left unspecified 'dns' will be used as the service name. + service string + // proto is the IP protocol (tcp or udp) to look up SRV records for. + proto string + // domain is the name to look up SRV records within. + domain string // A map of IP addresses (results of A record lookups for SRV Targets) to // ports (Port fields in SRV records) associated with those addresses. addrs map[string][]uint16 // Other internal bookkeeping state. - cancel chan interface{} + cancel chan any mu sync.RWMutex refresh time.Duration updateCounter *prometheus.CounterVec } +// ParseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. +// +// Examples: +// - target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// - target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// - target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// - target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +// +// This function is copied from: +// https://github.com/grpc/grpc-go/blob/master/internal/resolver/dns/dns_resolver.go +// It has been minimally modified to fit our code style. +func ParseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", errors.New("missing address") + } + ip := net.ParseIP(target) + if ip != nil { + // Target is an IPv4 or IPv6(without brackets) address. + return target, defaultPort, nil + } + host, port, err = net.SplitHostPort(target) + if err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. + // "[::1]:", this is an error. + return "", "", errors.New("missing port after port-separator colon") + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in + // ":80", the local system is assumed. + host = "localhost" + } + return host, port, nil + } + host, port, err = net.SplitHostPort(target + ":" + defaultPort) + if err == nil { + // Target doesn't have port. + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + var _ ServerProvider = &dynamicProvider{} // StartDynamicProvider constructs a new dynamicProvider and starts its // auto-update goroutine. The auto-update process queries DNS for SRV records // at refresh intervals and uses the resulting IP/port combos to populate the // list returned by Addrs. The update process ignores the Priority and Weight -// attributes of the SRV records. The given server name should be a full domain -// name like `example.com`, which will result in SRV queries for `_dns._udp.example.com`. -func StartDynamicProvider(server string, refresh time.Duration) (*dynamicProvider, error) { - if server == "" { - return nil, fmt.Errorf("no DNS domain name provided") +// attributes of the SRV records. +// +// `proto` is the IP protocol (tcp or udp) to look up SRV records for. +func StartDynamicProvider(c *cmd.DNSProvider, refresh time.Duration, proto string) (*dynamicProvider, error) { + if c.SRVLookup.Domain == "" { + return nil, fmt.Errorf("'domain' cannot be empty") + } + + service := c.SRVLookup.Service + if service == "" { + // Default to "dns" if no service is specified. This is the default + // service name for DNS servers. + service = "dns" } + + host, port, err := ParseTarget(c.DNSAuthority, "53") + if err != nil { + return nil, err + } + + dnsAuthority := net.JoinHostPort(host, port) + err = validateServerAddress(dnsAuthority) + if err != nil { + return nil, err + } + dp := dynamicProvider{ - name: server, - addrs: make(map[string][]uint16), - cancel: make(chan interface{}), - refresh: refresh, + dnsAuthority: dnsAuthority, + service: service, + proto: proto, + domain: c.SRVLookup.Domain, + addrs: make(map[string][]uint16), + cancel: make(chan any), + refresh: refresh, updateCounter: prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "dns_update", @@ -141,7 +221,7 @@ func StartDynamicProvider(server string, refresh time.Duration) (*dynamicProvide // Update once immediately, so we can know whether that was successful, then // kick off the long-running update goroutine. - err := dp.update() + err = dp.update() if err != nil { return nil, fmt.Errorf("failed to start dynamic provider: %w", err) } @@ -180,25 +260,36 @@ func (dp *dynamicProvider) update() error { ctx, cancel := context.WithTimeout(context.Background(), dp.refresh/2) defer cancel() - _, srvs, err := net.DefaultResolver.LookupSRV(ctx, "dns", "udp", dp.name) + resolver := &net.Resolver{ + PreferGo: true, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + d := &net.Dialer{} + return d.DialContext(ctx, network, dp.dnsAuthority) + }, + } + + // RFC 2782 formatted SRV record being queried e.g. "_service._proto.name." + record := fmt.Sprintf("_%s._%s.%s.", dp.service, dp.proto, dp.domain) + + _, srvs, err := resolver.LookupSRV(ctx, dp.service, dp.proto, dp.domain) if err != nil { - return fmt.Errorf("failed to lookup SRV records for %q: %w", dp.name, err) + return fmt.Errorf("during SRV lookup of %q: %w", record, err) } if len(srvs) == 0 { - return fmt.Errorf("no SRV records found for %q", dp.name) + return fmt.Errorf("SRV lookup of %q returned 0 results", record) } addrPorts := make(map[string][]uint16) for _, srv := range srvs { - addrs, err := net.DefaultResolver.LookupHost(ctx, srv.Target) + addrs, err := resolver.LookupHost(ctx, srv.Target) if err != nil { - return fmt.Errorf("failed to resolve SRV Target %q: %w", srv.Target, err) + return fmt.Errorf("during A/AAAA lookup of target %q from SRV record %q: %w", srv.Target, record, err) } for _, addr := range addrs { joinedHostPort := net.JoinHostPort(addr, fmt.Sprint(srv.Port)) err := validateServerAddress(joinedHostPort) if err != nil { - return fmt.Errorf("invalid SRV addr %q: %w", joinedHostPort, err) + return fmt.Errorf("invalid addr %q from SRV record %q: %w", joinedHostPort, record, err) } addrPorts[addr] = append(addrPorts[addr], srv.Port) } @@ -216,7 +307,7 @@ func (dp *dynamicProvider) Addrs() ([]string, error) { var r []string dp.mu.RLock() for ip, ports := range dp.addrs { - port := fmt.Sprint(ports[rand.Intn(len(ports))]) + port := fmt.Sprint(ports[rand.IntN(len(ports))]) addr := net.JoinHostPort(ip, port) r = append(r, addr) } diff --git a/bdns/servers_test.go b/bdns/servers_test.go index 2ed108df138..5d17d8b07da 100644 --- a/bdns/servers_test.go +++ b/bdns/servers_test.go @@ -2,6 +2,8 @@ package bdns import ( "testing" + + "github.com/letsencrypt/boulder/test" ) func Test_validateServerAddress(t *testing.T) { @@ -60,3 +62,42 @@ func Test_validateServerAddress(t *testing.T) { }) } } + +func Test_resolveDNSAuthority(t *testing.T) { + type want struct { + host string + port string + } + tests := []struct { + name string + target string + want want + wantErr bool + }{ + {"IP4 with port", "10.10.10.10:53", want{"10.10.10.10", "53"}, false}, + {"IP4 without port", "10.10.10.10", want{"10.10.10.10", "53"}, false}, + {"IP6 with port and brackets", "[2606:4700:4700::1111]:53", want{"2606:4700:4700::1111", "53"}, false}, + {"IP6 without port", "2606:4700:4700::1111", want{"2606:4700:4700::1111", "53"}, false}, + {"IP6 with brackets without port", "[2606:4700:4700::1111]", want{"2606:4700:4700::1111", "53"}, false}, + {"hostname with port", "localhost:53", want{"localhost", "53"}, false}, + {"hostname without port", "localhost", want{"localhost", "53"}, false}, + {"only port", ":53", want{"localhost", "53"}, false}, + {"hostname with no port after colon", "localhost:", want{"", ""}, true}, + {"IP4 with no port after colon", "10.10.10.10:", want{"", ""}, true}, + {"IP6 with no port after colon", "[2606:4700:4700::1111]:", want{"", ""}, true}, + {"no hostname or port", "", want{"", ""}, true}, + {"invalid addr", "foo:bar:baz", want{"", ""}, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotHost, gotPort, gotErr := ParseTarget(tt.target, "53") + test.AssertEquals(t, gotHost, tt.want.host) + test.AssertEquals(t, gotPort, tt.want.port) + if tt.wantErr { + test.AssertError(t, gotErr, "expected error") + } else { + test.AssertNotError(t, gotErr, "unexpected error") + } + }) + } +} diff --git a/ca/ca.go b/ca/ca.go index fc97ef6615e..eb510a7b309 100644 --- a/ca/ca.go +++ b/ca/ca.go @@ -1,45 +1,47 @@ package ca import ( + "bytes" "context" + "crypto" "crypto/rand" + "crypto/sha256" "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" "encoding/hex" "errors" "fmt" "math/big" - "strings" - "time" + mrand "math/rand/v2" + "slices" - "github.com/beeker1121/goque" ct "github.com/google/certificate-transparency-go" cttls "github.com/google/certificate-transparency-go/tls" "github.com/jmhodges/clock" "github.com/miekg/pkcs11" "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" + "google.golang.org/protobuf/types/known/timestamppb" capb "github.com/letsencrypt/boulder/ca/proto" "github.com/letsencrypt/boulder/core" - corepb "github.com/letsencrypt/boulder/core/proto" csrlib "github.com/letsencrypt/boulder/csr" berrors "github.com/letsencrypt/boulder/errors" - "github.com/letsencrypt/boulder/features" "github.com/letsencrypt/boulder/goodkey" + "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/issuance" + "github.com/letsencrypt/boulder/linter" blog "github.com/letsencrypt/boulder/log" + rapb "github.com/letsencrypt/boulder/ra/proto" sapb "github.com/letsencrypt/boulder/sa/proto" ) -// Metrics for CA statistics -const ( - csrExtensionCategory = "category" - csrExtensionBasic = "basic" - csrExtensionTLSFeature = "tls-feature" - csrExtensionTLSFeatureInvalid = "tls-feature-invalid" - csrExtensionOther = "other" -) - type certificateType string const ( @@ -47,546 +49,539 @@ const ( certType = certificateType("certificate") ) -// Two maps of keys to Issuers. Lookup by PublicKeyAlgorithm is useful for -// determining which issuer to use to sign a given (pre)cert, based on its -// PublicKeyAlgorithm. Lookup by NameID is useful for looking up the appropriate -// issuer based on the issuer of a given (pre)certificate. -type issuerMaps struct { - byAlg map[x509.PublicKeyAlgorithm]*issuance.Issuer - byNameID map[issuance.IssuerNameID]*issuance.Issuer +// issuanceEvent is logged before and after issuance of precertificates and certificates. +// The `omitempty` fields are not always present. +// CSR, Precertificate, and Certificate are hex-encoded DER bytes to make it easier to +// ad-hoc search for sequences or OIDs in logs. Other data, like public key within CSR, +// is logged as base64 because it doesn't have interesting DER structure. +type issuanceEvent struct { + Requester int64 + OrderID int64 + Profile string + Issuer string + IssuanceRequest *issuance.IssuanceRequest + CSR string `json:",omitempty"` + Result issuanceEventResult } -// certificateAuthorityImpl represents a CA that signs certificates. -// It can sign OCSP responses as well, but only via delegation to an ocspImpl. -type certificateAuthorityImpl struct { - capb.UnimplementedCertificateAuthorityServer - capb.UnimplementedOCSPGeneratorServer - sa sapb.StorageAuthorityCertificateClient - pa core.PolicyAuthority - ocsp *ocspImpl - issuers issuerMaps - - // This is temporary, and will be used for testing and slow roll-out - // of ECDSA issuance, but will then be removed. - ecdsaAllowList *ECDSAAllowList - prefix int // Prepended to the serial number - validityPeriod time.Duration - backdate time.Duration - maxNames int - keyPolicy goodkey.KeyPolicy - orphanQueue *goque.Queue - clk clock.Clock - log blog.Logger - signatureCount *prometheus.CounterVec - csrExtensionCount *prometheus.CounterVec - orphanCount *prometheus.CounterVec - adoptedOrphanCount *prometheus.CounterVec - signErrorCount *prometheus.CounterVec +// issuanceEventResult exists just to lend some extra structure to the +// issuanceEvent struct above. +type issuanceEventResult struct { + Precertificate string `json:",omitempty"` + Certificate string `json:",omitempty"` } -// makeIssuerMaps processes a list of issuers into a set of maps, mapping -// nearly-unique identifiers of those issuers to the issuers themselves. Note -// that, if two issuers have the same nearly-unique ID, the *latter* one in -// the input list "wins". -func makeIssuerMaps(issuers []*issuance.Issuer) (issuerMaps, error) { - issuersByAlg := make(map[x509.PublicKeyAlgorithm]*issuance.Issuer, 2) - issuersByNameID := make(map[issuance.IssuerNameID]*issuance.Issuer, len(issuers)) - for _, issuer := range issuers { - for _, alg := range issuer.Algs() { - // TODO(#5259): Enforce that there is only one issuer for each algorithm, - // instead of taking the first issuer for each algorithm type. - if issuersByAlg[alg] == nil { - issuersByAlg[alg] = issuer - } - } - issuersByNameID[issuer.Cert.NameID()] = issuer +// caMetrics holds various metrics which are shared between caImpl and crlImpl. +type caMetrics struct { + signatureCount *prometheus.CounterVec + signErrorCount *prometheus.CounterVec + lintErrorCount prometheus.Counter + certificates *prometheus.CounterVec +} + +func NewCAMetrics(stats prometheus.Registerer) *caMetrics { + signatureCount := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ + Name: "signatures", + Help: "Number of signatures", + }, []string{"purpose", "issuer"}) + + signErrorCount := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ + Name: "signature_errors", + Help: "A counter of signature errors labelled by error type", + }, []string{"type"}) + + lintErrorCount := promauto.With(stats).NewCounter(prometheus.CounterOpts{ + Name: "lint_errors", + Help: "Number of issuances that were halted by linting errors", + }) + + certificates := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ + Name: "certificates", + Help: "Number of certificates issued", + }, []string{"profile"}) + + return &caMetrics{signatureCount, signErrorCount, lintErrorCount, certificates} +} + +func (m *caMetrics) noteSignError(err error) { + var pkcs11Error pkcs11.Error + if errors.As(err, &pkcs11Error) { + m.signErrorCount.WithLabelValues("HSM").Inc() } - return issuerMaps{issuersByAlg, issuersByNameID}, nil } +// certificateAuthorityImpl represents a CA that signs certificates. +type certificateAuthorityImpl struct { + capb.UnsafeCertificateAuthorityServer + sa sapb.StorageAuthorityCertificateClient + sctClient rapb.SCTProviderClient + pa core.PolicyAuthority + issuers []*issuance.Issuer + profiles map[string]*issuance.Profile + + // The prefix is prepended to the serial number. + prefix byte + maxNames int + keyPolicy goodkey.KeyPolicy + clk clock.Clock + log blog.Logger + metrics *caMetrics + tracer trace.Tracer +} + +var _ capb.CertificateAuthorityServer = (*certificateAuthorityImpl)(nil) + // NewCertificateAuthorityImpl creates a CA instance that can sign certificates -// from any number of issuance.Issuers according to their profiles, and can sign -// OCSP (via delegation to an ocspImpl and its issuers). +// from any number of issuance.Issuers and for any number of profiles. func NewCertificateAuthorityImpl( sa sapb.StorageAuthorityCertificateClient, + sctService rapb.SCTProviderClient, pa core.PolicyAuthority, - ocsp *ocspImpl, - boulderIssuers []*issuance.Issuer, - ecdsaAllowList *ECDSAAllowList, - certExpiry time.Duration, - certBackdate time.Duration, - serialPrefix int, + issuers []*issuance.Issuer, + profiles map[string]*issuance.Profile, + serialPrefix byte, maxNames int, keyPolicy goodkey.KeyPolicy, - orphanQueue *goque.Queue, logger blog.Logger, - stats prometheus.Registerer, - signatureCount *prometheus.CounterVec, - signErrorCount *prometheus.CounterVec, + metrics *caMetrics, clk clock.Clock, ) (*certificateAuthorityImpl, error) { - var ca *certificateAuthorityImpl - var err error + if serialPrefix < 0x01 || serialPrefix > 0x7f { + return nil, errors.New("serial prefix must be between 0x01 (1) and 0x7f (127)") + } - // TODO(briansmith): Make the backdate setting mandatory after the - // production ca.json has been updated to include it. Until then, manually - // default to 1h, which is the backdating duration we currently use. - if certBackdate == 0 { - certBackdate = time.Hour + if len(issuers) == 0 { + return nil, errors.New("must have at least one issuer") } - if serialPrefix <= 0 || serialPrefix >= 256 { - err = errors.New("Must have a positive non-zero serial prefix less than 256 for CA.") - return nil, err + if len(profiles) == 0 { + return nil, errors.New("must have at least one certificate profile") } - issuers, err := makeIssuerMaps(boulderIssuers) - if err != nil { - return nil, err + + issuableKeys := make(map[x509.PublicKeyAlgorithm]bool) + issuableProfiles := make(map[string]bool) + for _, issuer := range issuers { + if issuer.IsActive() && len(issuer.Profiles()) == 0 { + return nil, fmt.Errorf("issuer %q is active but has no profiles", issuer.Name()) + } + + for _, profile := range issuer.Profiles() { + _, ok := profiles[profile] + if !ok { + return nil, fmt.Errorf("issuer %q lists profile %q, which is not configured", issuer.Name(), profile) + } + issuableProfiles[profile] = true + } + + issuableKeys[issuer.KeyType()] = true } - csrExtensionCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "csr_extensions", - Help: "Number of CSRs with extensions of the given category", - }, - []string{csrExtensionCategory}) - stats.MustRegister(csrExtensionCount) - - orphanCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "orphans", - Help: "Number of orphaned certificates labelled by type (precert, cert)", - }, - []string{"type"}) - stats.MustRegister(orphanCount) - - adoptedOrphanCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "adopted_orphans", - Help: "Number of orphaned certificates adopted from the orphan queue by type (precert, cert)", - }, - []string{"type"}) - stats.MustRegister(adoptedOrphanCount) - - ca = &certificateAuthorityImpl{ - sa: sa, - pa: pa, - ocsp: ocsp, - issuers: issuers, - validityPeriod: certExpiry, - backdate: certBackdate, - prefix: serialPrefix, - maxNames: maxNames, - keyPolicy: keyPolicy, - orphanQueue: orphanQueue, - log: logger, - signatureCount: signatureCount, - csrExtensionCount: csrExtensionCount, - orphanCount: orphanCount, - adoptedOrphanCount: adoptedOrphanCount, - signErrorCount: signErrorCount, - clk: clk, - ecdsaAllowList: ecdsaAllowList, - } - - return ca, nil -} + for profile := range profiles { + if !issuableProfiles[profile] { + return nil, fmt.Errorf("profile %q configured, but no issuer lists it", profile) + } + } -// noteSignError is called after operations that may cause a PKCS11 signing error. -func (ca *certificateAuthorityImpl) noteSignError(err error) { - var pkcs11Error *pkcs11.Error - if errors.As(err, &pkcs11Error) { - ca.signErrorCount.WithLabelValues("HSM").Inc() + for _, keyAlg := range []x509.PublicKeyAlgorithm{x509.ECDSA, x509.RSA} { + if !issuableKeys[keyAlg] { + return nil, fmt.Errorf("no %s issuers configured", keyAlg) + } } -} -var ocspStatusToCode = map[string]int{ - "good": ocsp.Good, - "revoked": ocsp.Revoked, - "unknown": ocsp.Unknown, + return &certificateAuthorityImpl{ + sa: sa, + sctClient: sctService, + pa: pa, + issuers: issuers, + profiles: profiles, + prefix: serialPrefix, + maxNames: maxNames, + keyPolicy: keyPolicy, + log: logger, + metrics: metrics, + tracer: otel.GetTracerProvider().Tracer("github.com/letsencrypt/boulder/ca"), + clk: clk, + }, nil } -func (ca *certificateAuthorityImpl) IssuePrecertificate(ctx context.Context, issueReq *capb.IssueCertificateRequest) (*capb.IssuePrecertificateResponse, error) { - // issueReq.orderID may be zero, for ACMEv1 requests. - if core.IsAnyNilOrZero(issueReq, issueReq.Csr, issueReq.RegistrationID) { +// IssueCertificate is the gRPC handler responsible for the entire [issuance +// cycle]. It takes as input just a CSR and a profile name. It generates the +// unique serial number locally, and uses the profile and the CA's clock to +// generate the validity period. It writes the serial to the database to prevent +// duplicate use of serials, generates and stores the *linting* precertificate +// as a record of what we intended to issue, contacts the SCTService (currently +// an RA instance) to retrieve SCTs, and finally generates and saves the final +// certificate. +// +// [issuance cycle]: +// https://github.com/letsencrypt/boulder/blob/main/docs/ISSUANCE-CYCLE.md +func (ca *certificateAuthorityImpl) IssueCertificate(ctx context.Context, req *capb.IssueCertificateRequest) (*capb.IssueCertificateResponse, error) { + // Step 1: Locally process the gRPC request and its embedded CSR to extract + // the relevant information, like the pubkey and SANs. Also generate + // some metadata from scratch, such as the serial and validity period. + if core.IsAnyNilOrZero(req, req.RegistrationID, req.OrderID, req.CertProfileName, req.Csr) { return nil, berrors.InternalServerError("Incomplete issue certificate request") } - serialBigInt, validity, err := ca.generateSerialNumberAndValidity() + if ca.sctClient == nil { + return nil, errors.New("IssueCertificate called with a nil SCT service") + } + + profile, ok := ca.profiles[req.CertProfileName] + if !ok { + return nil, fmt.Errorf("incapable of using a profile named %q", req.CertProfileName) + } + + notBefore, notAfter := profile.GenerateValidity(ca.clk.Now()) + + csr, err := x509.ParseCertificateRequest(req.Csr) if err != nil { return nil, err } - serialHex := core.SerialToString(serialBigInt) - regID := issueReq.RegistrationID - nowNanos := ca.clk.Now().UnixNano() - expiresNanos := validity.NotAfter.UnixNano() - _, err = ca.sa.AddSerial(ctx, &sapb.AddSerialRequest{ - Serial: serialHex, - RegID: regID, - Created: nowNanos, - Expires: expiresNanos, - }) + err = csrlib.VerifyCSR(ctx, csr, ca.maxNames, &ca.keyPolicy, ca.pa) if err != nil { return nil, err } - precertDER, ocspResp, issuer, err := ca.issuePrecertificateInner(ctx, issueReq, serialBigInt, validity) + issuer, err := ca.pickIssuer(req.CertProfileName, csr.PublicKeyAlgorithm) if err != nil { return nil, err } - issuerID := issuer.Cert.NameID() - req := &sapb.AddCertificateRequest{ - Der: precertDER, - RegID: regID, - Ocsp: ocspResp.Response, - Issued: nowNanos, - IssuerID: int64(issuerID), + if issuer.Cert.NotAfter.Before(notAfter) { + err = berrors.InternalServerError("cannot issue a certificate that expires after the issuer certificate") + return nil, err } - _, err = ca.sa.AddPrecertificate(ctx, req) + subjectKeyId, err := generateSKID(csr.PublicKey) if err != nil { - ca.orphanCount.With(prometheus.Labels{"type": "precert"}).Inc() - err = berrors.InternalServerError(err.Error()) - // Note: This log line is parsed by cmd/orphan-finder. If you make any - // changes here, you should make sure they are reflected in orphan-finder. - ca.log.AuditErrf("Failed RPC to store at SA, orphaning precertificate: serial=[%s], cert=[%s], issuerID=[%d], regID=[%d], orderID=[%d], err=[%v]", - serialHex, hex.EncodeToString(precertDER), issuerID, issueReq.RegistrationID, issueReq.OrderID, err) - if ca.orphanQueue != nil { - ca.queueOrphan(&orphanedCert{ - DER: precertDER, - RegID: regID, - OCSPResp: ocspResp.Response, - Precert: true, - IssuerID: int64(issuerID), - }) - } - return nil, err + return nil, fmt.Errorf("computing subject key ID: %w", err) } - return &capb.IssuePrecertificateResponse{ - DER: precertDER, - }, nil -} + dnsNames, ipAddresses, err := identifier.FromCSR(csr).ToValues() + if err != nil { + return nil, err + } -// IssueCertificateForPrecertificate takes a precertificate and a set -// of SCTs for that precertificate and uses the signer to create and -// sign a certificate from them. The poison extension is removed and a -// SCT list extension is inserted in its place. Except for this and the -// signature the certificate exactly matches the precertificate. After -// the certificate is signed a OCSP response is generated and the -// response and certificate are stored in the database. -// -// It's critical not to sign two different final certificates for the same -// precertificate. This can happen, for instance, if the caller provides a -// different set of SCTs on subsequent calls to IssueCertificateForPrecertificate. -// We rely on the RA not to call IssueCertificateForPrecertificate twice for the -// same serial. This is accomplished by the fact that -// IssueCertificateForPrecertificate is only ever called in a straight-through -// RPC path without retries. If there is any error, including a networking -// error, the whole certificate issuance attempt fails and any subsequent -// issuance will use a different serial number. -// -// We also check that the provided serial number does not already exist as a -// final certificate, but this is just a belt-and-suspenders measure, since -// there could be race conditions where two goroutines are issuing for the same -// serial number at the same time. -func (ca *certificateAuthorityImpl) IssueCertificateForPrecertificate(ctx context.Context, req *capb.IssueCertificateForPrecertificateRequest) (*corepb.Certificate, error) { - // issueReq.orderID may be zero, for ACMEv1 requests. - if core.IsAnyNilOrZero(req, req.DER, req.SCTs, req.RegistrationID) { - return nil, berrors.InternalServerError("Incomplete cert for precertificate request") + var ipStrings []string + for _, ip := range csr.IPAddresses { + ipStrings = append(ipStrings, ip.String()) } - precert, err := x509.ParseCertificate(req.DER) + serialBigInt, err := ca.generateSerialNumber() if err != nil { return nil, err } + serialHex := core.SerialToString(serialBigInt) + + // Step 2: Persist the serial and minimal metadata, to ensure that we never + // duplicate a serial. + _, err = ca.sa.AddSerial(ctx, &sapb.AddSerialRequest{ + Serial: serialHex, + RegID: req.RegistrationID, + Created: timestamppb.New(ca.clk.Now()), + Expires: timestamppb.New(notAfter), + }) + if err != nil { + return nil, fmt.Errorf("persisting serial to database: %w", err) + } + + // Step 3: Issue the linting precert, persist it to the database, and then + // issue the real precert. + precertReq := &issuance.IssuanceRequest{ + PublicKey: issuance.MarshalablePublicKey{PublicKey: csr.PublicKey}, + SubjectKeyId: subjectKeyId, + Serial: serialBigInt.Bytes(), + NotBefore: notBefore, + NotAfter: notAfter, + CommonName: csrlib.CNFromCSR(csr), + DNSNames: dnsNames, + IPAddresses: ipAddresses, + IncludeCTPoison: true, + } + + _, span := ca.tracer.Start(ctx, "issuance", trace.WithAttributes( + attribute.String("serial", serialHex), + attribute.String("issuer", issuer.Name()), + attribute.String("certProfileName", req.CertProfileName), + attribute.StringSlice("names", csr.DNSNames), + attribute.StringSlice("ipAddresses", ipStrings), + )) + defer span.End() + + lintPrecertDER, issuanceToken, err := issuer.Prepare(profile, precertReq) + if err != nil { + ca.log.AuditErr("Preparing precert failed", err, map[string]any{"serial": serialHex}) + if errors.Is(err, linter.ErrLinting) { + ca.metrics.lintErrorCount.Inc() + } + return nil, fmt.Errorf("failed to prepare precertificate signing: %w", err) + } + + // Note: we write the linting certificate bytes to this table, rather than the precertificate + // (which we audit log but do not put in the database). This is to ensure that even if there is + // an error immediately after signing the precertificate, we have a record in the DB of what we + // intended to sign, and can do revocations based on that. See #6807. + // The name of the SA method ("AddPrecertificate") is a historical artifact. + _, err = ca.sa.AddPrecertificate(context.Background(), &sapb.AddCertificateRequest{ + Der: lintPrecertDER, + RegID: req.RegistrationID, + Issued: timestamppb.New(ca.clk.Now()), + IssuerNameID: int64(issuer.NameID()), + }) + if err != nil { + return nil, fmt.Errorf("persisting linting precert to database: %w", err) + } + + ca.log.AuditInfo("Signing precert", issuanceEvent{ + Requester: req.RegistrationID, + OrderID: req.OrderID, + Profile: req.CertProfileName, + Issuer: issuer.Name(), + IssuanceRequest: precertReq, + CSR: hex.EncodeToString(csr.Raw), + }) + + precertDER, err := issuer.Issue(issuanceToken) + if err != nil { + ca.metrics.noteSignError(err) + ca.log.AuditErr("Signing precert failed", err, map[string]any{"serial": serialHex}) + return nil, fmt.Errorf("failed to sign precertificate: %w", err) + } + ca.metrics.signatureCount.With(prometheus.Labels{"purpose": string(precertType), "issuer": issuer.Name()}).Inc() + + ca.log.AuditInfo("Signing precert success", issuanceEvent{ + Requester: req.RegistrationID, + OrderID: req.OrderID, + Profile: req.CertProfileName, + Issuer: issuer.Name(), + IssuanceRequest: precertReq, + Result: issuanceEventResult{Precertificate: hex.EncodeToString(precertDER)}, + }) - serialHex := core.SerialToString(precert.SerialNumber) - if _, err = ca.sa.GetCertificate(ctx, &sapb.Serial{Serial: serialHex}); err == nil { - err = berrors.InternalServerError("issuance of duplicate final certificate requested: %s", serialHex) - ca.log.AuditErr(err.Error()) + err = tbsCertIsDeterministic(lintPrecertDER, precertDER) + if err != nil { return nil, err - } else if !errors.Is(err, berrors.NotFound) { - return nil, fmt.Errorf("error checking for duplicate issuance of %s: %s", serialHex, err) } + + precert, err := x509.ParseCertificate(precertDER) + if err != nil { + return nil, fmt.Errorf("parsing precertificate: %w", err) + } + + // Step 4: Get SCTs for inclusion in the final certificate. + sctResp, err := ca.sctClient.GetSCTs(ctx, &rapb.SCTRequest{PrecertDER: precertDER}) + if err != nil { + return nil, fmt.Errorf("getting SCTs: %w", err) + } + var scts []ct.SignedCertificateTimestamp - for _, sctBytes := range req.SCTs { + for _, singleSCTBytes := range sctResp.SctDER { var sct ct.SignedCertificateTimestamp - _, err = cttls.Unmarshal(sctBytes, &sct) + _, err = cttls.Unmarshal(singleSCTBytes, &sct) if err != nil { return nil, err } scts = append(scts, sct) } - issuer, ok := ca.issuers.byNameID[issuance.GetIssuerNameID(precert)] - if !ok { - return nil, berrors.InternalServerError("no issuer found for Issuer Name %s", precert.Issuer) + // Step 5: Issue and save the final certificate. + // + // Given a precertificate, a set of SCTs for that precertificate, and the same + // issuer and profile which were used to generate that precert, generate a + // linting final certificate, then sign a final certificate using a real + // issuer. The poison extension is removed from the precertificate and a SCT + // list extension is inserted in its place. Except for this and the signature + // the final certificate exactly matches the precertificate. + // + // It's critical not to sign two different final certificates for the same + // precertificate. That's why this code is inline: the only way to reach this + // point is to already have generated a unique serial and unique precert; if + // any of the previous steps returned an error, then the whole certificate + // issuance attempt fails and any subsequent attempt to reach this code will + // generate a new serial. + certReq, err := issuance.RequestFromPrecert(precert, scts) + if err != nil { + return nil, err } - issuanceReq, err := issuance.RequestFromPrecert(precert, scts) + lintCertDER, issuanceToken, err := issuer.Prepare(profile, certReq) if err != nil { - return nil, err + ca.log.AuditErr("Preparing cert failed", err, map[string]any{"serial": serialHex}) + return nil, fmt.Errorf("failed to prepare certificate signing: %w", err) } - certDER, err := issuer.Issue(issuanceReq) + + ca.log.AuditInfo("Signing cert", issuanceEvent{ + Requester: req.RegistrationID, + OrderID: req.OrderID, + Profile: req.CertProfileName, + Issuer: issuer.Name(), + IssuanceRequest: certReq, + }) + + certDER, err := issuer.Issue(issuanceToken) + if err != nil { + ca.metrics.noteSignError(err) + ca.log.AuditErr("Signing cert failed", err, map[string]any{"serial": serialHex}) + return nil, fmt.Errorf("failed to sign certificate: %w", err) + } + ca.metrics.signatureCount.With(prometheus.Labels{"purpose": string(certType), "issuer": issuer.Name()}).Inc() + ca.metrics.certificates.With(prometheus.Labels{"profile": req.CertProfileName}).Inc() + + ca.log.AuditInfo("Signing cert success", issuanceEvent{ + Requester: req.RegistrationID, + OrderID: req.OrderID, + Profile: req.CertProfileName, + Issuer: issuer.Name(), + IssuanceRequest: certReq, + Result: issuanceEventResult{Certificate: hex.EncodeToString(certDER)}, + }) + + err = tbsCertIsDeterministic(lintCertDER, certDER) if err != nil { return nil, err } - ca.signatureCount.With(prometheus.Labels{"purpose": string(certType), "issuer": issuer.Name()}).Inc() - ca.log.AuditInfof("Signing success: serial=[%s] regID=[%d] names=[%s] csr=[%s] certificate=[%s]", - serialHex, req.RegistrationID, strings.Join(precert.DNSNames, ", "), hex.EncodeToString(req.DER), - hex.EncodeToString(certDER)) - err = ca.storeCertificate(ctx, req.RegistrationID, req.OrderID, precert.SerialNumber, certDER, int64(issuer.Cert.NameID())) + + _, err = ca.sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ + Der: certDER, + RegID: req.RegistrationID, + Issued: timestamppb.New(ca.clk.Now()), + }) if err != nil { - return nil, err + ca.log.AuditErr("Storing cert failed", err, map[string]any{"serial": serialHex}) + return nil, fmt.Errorf("persisting cert to database: %w", err) } - return &corepb.Certificate{ - RegistrationID: req.RegistrationID, - Serial: core.SerialToString(precert.SerialNumber), - Der: certDER, - Digest: core.Fingerprint256(certDER), - Issued: precert.NotBefore.UnixNano(), - Expires: precert.NotAfter.UnixNano(), - }, nil + + return &capb.IssueCertificateResponse{DER: certDER}, nil } -type validity struct { - NotBefore time.Time - NotAfter time.Time +// pickIssuer returns an issuer which is willing to issue certificates for the +// given profile and public key algorithm. If no such issuer exists, it returns +// an error. If multiple such issuers exist, it selects one at random. +func (ca *certificateAuthorityImpl) pickIssuer(profileName string, keyAlg x509.PublicKeyAlgorithm) (*issuance.Issuer, error) { + var pool []*issuance.Issuer + for _, issuer := range ca.issuers { + if !issuer.IsActive() { + continue + } + if issuer.KeyType() != keyAlg { + continue + } + if !slices.Contains(issuer.Profiles(), profileName) { + continue + } + pool = append(pool, issuer) + } + + if len(pool) == 0 { + return nil, fmt.Errorf("no issuer found for profile %q and key algorithm %s", profileName, keyAlg) + } + + return pool[mrand.IntN(len(pool))], nil } -func (ca *certificateAuthorityImpl) generateSerialNumberAndValidity() (*big.Int, validity, error) { +// generateSerialNumber produces a big.Int which has more than 64 bits of +// entropy and has the CA's configured one-byte prefix. +func (ca *certificateAuthorityImpl) generateSerialNumber() (*big.Int, error) { // We want 136 bits of random number, plus an 8-bit instance id prefix. const randBits = 136 serialBytes := make([]byte, randBits/8+1) - serialBytes[0] = byte(ca.prefix) + serialBytes[0] = ca.prefix _, err := rand.Read(serialBytes[1:]) if err != nil { err = berrors.InternalServerError("failed to generate serial: %s", err) - ca.log.AuditErrf("Serial randomness failed, err=[%v]", err) - return nil, validity{}, err + ca.log.AuditErr("Serial randomness failed", err, nil) + return nil, err } serialBigInt := big.NewInt(0) serialBigInt = serialBigInt.SetBytes(serialBytes) - notBefore := ca.clk.Now().Add(-ca.backdate) - validity := validity{ - NotBefore: notBefore, - NotAfter: notBefore.Add(ca.validityPeriod - time.Second), - } - - return serialBigInt, validity, nil + return serialBigInt, nil } -func (ca *certificateAuthorityImpl) issuePrecertificateInner(ctx context.Context, issueReq *capb.IssueCertificateRequest, serialBigInt *big.Int, validity validity) ([]byte, *capb.OCSPResponse, *issuance.Issuer, error) { - csr, err := x509.ParseCertificateRequest(issueReq.Csr) +// generateSKID computes the Subject Key Identifier using one of the methods in +// RFC 7093 Section 2 Additional Methods for Generating Key Identifiers: +// The keyIdentifier [may be] composed of the leftmost 160-bits of the +// SHA-256 hash of the value of the BIT STRING subjectPublicKey +// (excluding the tag, length, and number of unused bits). +func generateSKID(pk crypto.PublicKey) ([]byte, error) { + pkBytes, err := x509.MarshalPKIXPublicKey(pk) if err != nil { - return nil, nil, nil, err - } - - err = csrlib.VerifyCSR(ctx, csr, ca.maxNames, &ca.keyPolicy, ca.pa) - if err != nil { - ca.log.AuditErr(err.Error()) - // VerifyCSR returns berror instances that can be passed through as-is - // without wrapping. - return nil, nil, nil, err - } - - var issuer *issuance.Issuer - var ok bool - if issueReq.IssuerNameID == 0 { - // Use the issuer which corresponds to the algorithm of the public key - // contained in the CSR, unless we have an allowlist of registration IDs - // for ECDSA, in which case switch all not-allowed accounts to RSA issuance. - alg := csr.PublicKeyAlgorithm - if alg == x509.ECDSA && !features.Enabled(features.ECDSAForAll) && ca.ecdsaAllowList != nil && !ca.ecdsaAllowList.permitted(issueReq.RegistrationID) { - alg = x509.RSA - } - issuer, ok = ca.issuers.byAlg[alg] - if !ok { - return nil, nil, nil, berrors.InternalServerError("no issuer found for public key algorithm %s", csr.PublicKeyAlgorithm) - } - } else { - issuer, ok = ca.issuers.byNameID[issuance.IssuerNameID(issueReq.IssuerNameID)] - if !ok { - return nil, nil, nil, berrors.InternalServerError("no issuer found for IssuerNameID %d", issueReq.IssuerNameID) - } + return nil, err } - if issuer.Cert.NotAfter.Before(validity.NotAfter) { - err = berrors.InternalServerError("cannot issue a certificate that expires after the issuer certificate") - ca.log.AuditErr(err.Error()) - return nil, nil, nil, err + var pkixPublicKey struct { + Algo pkix.AlgorithmIdentifier + BitString asn1.BitString } - - serialHex := core.SerialToString(serialBigInt) - - // Generate ocsp response before issuing precertificate - ocspResp, err := ca.ocsp.GenerateOCSP(ctx, &capb.GenerateOCSPRequest{ - Serial: serialHex, - IssuerID: int64(issuer.Cert.NameID()), - Status: string(core.OCSPStatusGood), - }) - if err != nil { - err = berrors.InternalServerError(err.Error()) - ca.log.AuditInfof("OCSP Signing for precertificate failure: serial=[%s] err=[%s]", serialHex, err) - return nil, nil, nil, err - } - - ca.log.AuditInfof("Signing: serial=[%s] regID=[%d] names=[%s] csr=[%s]", - serialHex, issueReq.RegistrationID, strings.Join(csr.DNSNames, ", "), hex.EncodeToString(csr.Raw)) - certDER, err := issuer.Issue(&issuance.IssuanceRequest{ - PublicKey: csr.PublicKey, - Serial: serialBigInt.Bytes(), - CommonName: csr.Subject.CommonName, - DNSNames: csr.DNSNames, - IncludeCTPoison: true, - IncludeMustStaple: issuance.ContainsMustStaple(csr.Extensions), - NotBefore: validity.NotBefore, - NotAfter: validity.NotAfter, - }) - ca.noteSignError(err) - if err != nil { - err = berrors.InternalServerError("failed to sign certificate: %s", err) - ca.log.AuditErrf("Signing failed: serial=[%s] err=[%v]", serialHex, err) - return nil, nil, nil, err + if _, err := asn1.Unmarshal(pkBytes, &pkixPublicKey); err != nil { + return nil, err } - ca.signatureCount.With(prometheus.Labels{"purpose": string(precertType), "issuer": issuer.Name()}).Inc() - - ca.log.AuditInfof("Signing success: serial=[%s] regID=[%d] names=[%s] csr=[%s] precertificate=[%s]", - serialHex, issueReq.RegistrationID, strings.Join(csr.DNSNames, ", "), hex.EncodeToString(csr.Raw), - hex.EncodeToString(certDER)) - return certDER, ocspResp, issuer, nil + skid := sha256.Sum256(pkixPublicKey.BitString.Bytes) + return skid[0:20:20], nil } -func (ca *certificateAuthorityImpl) storeCertificate( - ctx context.Context, - regID int64, - orderID int64, - serialBigInt *big.Int, - certDER []byte, - issuerID int64) error { - var err error - _, err = ca.sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER, - RegID: regID, - Issued: ca.clk.Now().UnixNano(), - }) - if err != nil { - ca.orphanCount.With(prometheus.Labels{"type": "cert"}).Inc() - err = berrors.InternalServerError(err.Error()) - // Note: This log line is parsed by cmd/orphan-finder. If you make any - // changes here, you should make sure they are reflected in orphan-finder. - ca.log.AuditErrf("Failed RPC to store at SA, orphaning certificate: serial=[%s] cert=[%s] err=[%v], regID=[%d], orderID=[%d]", - core.SerialToString(serialBigInt), hex.EncodeToString(certDER), err, regID, orderID) - if ca.orphanQueue != nil { - ca.queueOrphan(&orphanedCert{ - DER: certDER, - RegID: regID, - IssuerID: issuerID, - }) +// verifyTBSCertIsDeterministic verifies that x509.CreateCertificate signing +// operation is deterministic and produced identical DER bytes between the given +// lint certificate and leaf certificate. If the DER byte equality check fails +// it's mississuance, but it's better to know about the problem sooner than +// later. The caller is responsible for passing the appropriate valid +// certificate bytes in the correct position. +func tbsCertIsDeterministic(lintCertBytes []byte, leafCertBytes []byte) error { + if core.IsAnyNilOrZero(lintCertBytes, leafCertBytes) { + return fmt.Errorf("lintCertBytes of leafCertBytes were nil") + } + + // extractTBSCertBytes is a partial copy of //crypto/x509/parser.go to + // extract the RawTBSCertificate field from given DER bytes. It the + // RawTBSCertificate field bytes or an error if the given bytes cannot be + // parsed. This is far more performant than parsing the entire *Certificate + // structure with x509.ParseCertificate(). + // + // RFC 5280, Section 4.1 + // Certificate ::= SEQUENCE { + // tbsCertificate TBSCertificate, + // signatureAlgorithm AlgorithmIdentifier, + // signatureValue BIT STRING } + // + // TBSCertificate ::= SEQUENCE { + // .. + extractTBSCertBytes := func(inputDERBytes *[]byte) ([]byte, error) { + input := cryptobyte.String(*inputDERBytes) + + // Extract the Certificate bytes + if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("malformed certificate") } - return err - } - return nil -} -type orphanedCert struct { - DER []byte - OCSPResp []byte - RegID int64 - Precert bool - IssuerID int64 -} - -func (ca *certificateAuthorityImpl) queueOrphan(o *orphanedCert) { - if _, err := ca.orphanQueue.EnqueueObject(o); err != nil { - ca.log.AuditErrf("failed to queue orphan for integration: %s", err) - } -} + var tbs cryptobyte.String + // Extract the TBSCertificate bytes from the Certificate bytes + if !input.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) { + return nil, errors.New("malformed tbs certificate") + } -// OrphanIntegrationLoop runs a loop executing integrateOrphans and then waiting a minute. -// It is split out into a separate function called directly by boulder-ca in order to make -// testing the orphan queue functionality somewhat more simple. -func (ca *certificateAuthorityImpl) OrphanIntegrationLoop() { - for { - err := ca.integrateOrphan() - if err != nil { - if err == goque.ErrEmpty { - time.Sleep(time.Minute) - continue - } - ca.log.AuditErrf("failed to integrate orphaned certs: %s", err) - time.Sleep(time.Second) + if tbs.Empty() { + return nil, errors.New("parsed RawTBSCertificate field was empty") } + + return tbs, nil } -} -// integrateOrpan removes an orphan from the queue and adds it to the database. The -// item isn't dequeued until it is actually added to the database to prevent items from -// being lost if the CA is restarted between the item being dequeued and being added to -// the database. It calculates the issuance time by subtracting the backdate period from -// the notBefore time. -func (ca *certificateAuthorityImpl) integrateOrphan() error { - item, err := ca.orphanQueue.Peek() + lintRawTBSCert, err := extractTBSCertBytes(&lintCertBytes) if err != nil { - if err == goque.ErrEmpty { - return goque.ErrEmpty - } - return fmt.Errorf("failed to peek into orphan queue: %s", err) + return fmt.Errorf("while extracting lint TBS cert: %w", err) } - var orphan orphanedCert - if err = item.ToObject(&orphan); err != nil { - return fmt.Errorf("failed to marshal orphan: %s", err) - } - cert, err := x509.ParseCertificate(orphan.DER) + + leafRawTBSCert, err := extractTBSCertBytes(&leafCertBytes) if err != nil { - return fmt.Errorf("failed to parse orphan: %s", err) - } - // When calculating the `NotBefore` at issuance time, we subtracted - // ca.backdate. Now, to calculate the actual issuance time from the NotBefore, - // we reverse the process and add ca.backdate. - issued := cert.NotBefore.Add(ca.backdate) - if orphan.Precert { - _, err = ca.sa.AddPrecertificate(context.Background(), &sapb.AddCertificateRequest{ - Der: orphan.DER, - RegID: orphan.RegID, - Ocsp: orphan.OCSPResp, - Issued: issued.UnixNano(), - IssuerID: orphan.IssuerID, - }) - if err != nil && !errors.Is(err, berrors.Duplicate) { - return fmt.Errorf("failed to store orphaned precertificate: %s", err) - } - } else { - _, err = ca.sa.AddCertificate(context.Background(), &sapb.AddCertificateRequest{ - Der: orphan.DER, - RegID: orphan.RegID, - Issued: issued.UnixNano(), - }) - if err != nil && !errors.Is(err, berrors.Duplicate) { - return fmt.Errorf("failed to store orphaned certificate: %s", err) - } - } - if _, err = ca.orphanQueue.Dequeue(); err != nil { - return fmt.Errorf("failed to dequeue integrated orphaned certificate: %s", err) + return fmt.Errorf("while extracting leaf TBS cert: %w", err) } - ca.log.AuditInfof("Incorporated orphaned certificate: serial=[%s] cert=[%s] regID=[%d]", - core.SerialToString(cert.SerialNumber), hex.EncodeToString(orphan.DER), orphan.RegID) - typ := "cert" - if orphan.Precert { - typ = "precert" + + if !bytes.Equal(lintRawTBSCert, leafRawTBSCert) { + return fmt.Errorf("mismatch between lintCert and leafCert RawTBSCertificate DER bytes: \"%x\" != \"%x\"", lintRawTBSCert, leafRawTBSCert) } - ca.adoptedOrphanCount.With(prometheus.Labels{"type": typ}).Inc() - return nil -} -// GenerateOCSP is simply a passthrough to ocspImpl.GenerateOCSP so that other -// services which need to talk to the CA anyway can do so without configuring -// two separate gRPC service backends. -func (ca *certificateAuthorityImpl) GenerateOCSP(ctx context.Context, req *capb.GenerateOCSPRequest) (*capb.OCSPResponse, error) { - return ca.ocsp.GenerateOCSP(ctx, req) + return nil } diff --git a/ca/ca_test.go b/ca/ca_test.go index 09653a7217d..77c0c4bebb2 100644 --- a/ca/ca_test.go +++ b/ca/ca_test.go @@ -1,8 +1,8 @@ package ca import ( + "bytes" "context" - "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -11,33 +11,34 @@ import ( "encoding/asn1" "errors" "fmt" - "io/ioutil" "math/big" + "os" "strings" "testing" "time" - "github.com/beeker1121/goque" ct "github.com/google/certificate-transparency-go" cttls "github.com/google/certificate-transparency-go/tls" ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/jmhodges/clock" + "github.com/miekg/pkcs11" "github.com/prometheus/client_golang/prometheus" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/core" corepb "github.com/letsencrypt/boulder/core/proto" berrors "github.com/letsencrypt/boulder/errors" "github.com/letsencrypt/boulder/features" "github.com/letsencrypt/boulder/goodkey" + "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/issuance" - "github.com/letsencrypt/boulder/linter" blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" + "github.com/letsencrypt/boulder/must" "github.com/letsencrypt/boulder/policy" + rapb "github.com/letsencrypt/boulder/ra/proto" sapb "github.com/letsencrypt/boulder/sa/proto" "github.com/letsencrypt/boulder/test" ) @@ -54,12 +55,6 @@ var ( // * Includes an extensionRequest attribute for a well-formed TLS Feature extension MustStapleCSR = mustRead("./testdata/must_staple.der.csr") - // CSR generated by Go: - // * Random public key - // * CN = not-example.com - // * Includes extensionRequest attributes for *two* must-staple extensions - DuplicateMustStapleCSR = mustRead("./testdata/duplicate_must_staple.der.csr") - // CSR generated by Go: // * Random public key // * CN = not-example.com @@ -96,159 +91,87 @@ var ( // OIDExtensionSCTList is defined in RFC 6962 s3.3. OIDExtensionSCTList = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} - - // The "certificate-for-precertificate" tests use the precertificate from a - // previous "precertificate" test, in order to verify that the CA is - // stateless with respect to these two operations, since a separate CA - // object instance will be used for generating each. Consequently, the - // "precertificate" tests must be before the "certificate-for-precertificate" - // tests in this list, and we cannot run these sub-tests concurrently. - // - // In order to test the case where the same CA object is used for issuing - // both the precertificate and the certificate, we'd need to contort - // |TestIssueCertificate| quite a bit, and since it isn't clear that that - // would be useful, we've avoided adding that case, at least for now. - issuanceModes = []IssuanceMode{ - {name: "precertificate", issueCertificateForPrecertificate: false}, - {name: "certificate-for-precertificate", issueCertificateForPrecertificate: true}, - } ) -const arbitraryRegID int64 = 1001 -const yamlLoadErrMsg = "Error loading YAML bytes for ECDSA allow list:" - -// Useful key and certificate files. -const caKeyFile = "../test/test-ca.key" -const caCertFile = "../test/test-ca.pem" -const caCertFile2 = "../test/test-ca2.pem" - func mustRead(path string) []byte { - b, err := ioutil.ReadFile(path) - if err != nil { - panic(fmt.Sprintf("unable to read %#v: %s", path, err)) - } - return b -} - -type testCtx struct { - pa core.PolicyAuthority - ocsp *ocspImpl - certExpiry time.Duration - certBackdate time.Duration - serialPrefix int - maxNames int - boulderIssuers []*issuance.Issuer - keyPolicy goodkey.KeyPolicy - fc clock.FakeClock - stats prometheus.Registerer - signatureCount *prometheus.CounterVec - signErrorCount *prometheus.CounterVec - logger *blog.Mock -} - -type mockSA struct { - certificate core.Certificate + return must.Do(os.ReadFile(path)) } -func (m *mockSA) AddCertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*sapb.AddCertificateResponse, error) { - m.certificate.DER = req.Der - return nil, nil -} - -func (m *mockSA) AddPrecertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return &emptypb.Empty{}, nil -} - -func (m *mockSA) AddSerial(ctx context.Context, req *sapb.AddSerialRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return &emptypb.Empty{}, nil +// caArgs is a container for all of the arguments to +// NewCertificateAuthorityImpl. It exists so that tests can easily build a +// default certificateAuthorityImpl, but can also easily customize that object +// to exercise various behaviors. The expected usage flow is: +// +// cargs := newTestCA(t) +// cargs.foo = someOverride +// ca := cargs.make() +// +// Its fields should remain identical to the NewCertificateAuthorityImpl args. +type caArgs struct { + sa sapb.StorageAuthorityCertificateClient + sctService rapb.SCTProviderClient + pa core.PolicyAuthority + issuers []*issuance.Issuer + profiles map[string]*issuance.Profile + serialPrefix byte + maxNames int + keyPolicy goodkey.KeyPolicy + logger *blog.Mock + metrics *caMetrics + clk clock.FakeClock } -func (m *mockSA) GetCertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { - return nil, berrors.NotFoundError("cannot find the cert") -} +// newCAArgs returns a caArgs populated with reasonable default values for testing. +func newCAArgs(t *testing.T) *caArgs { + features.Reset() -func (m *mockSA) GetPrecertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { - return nil, berrors.NotFoundError("cannot find the precert") -} + fc := clock.NewFake() + fc.Add(1 * time.Hour) -var caKey crypto.Signer -var caCert *issuance.Certificate -var caCert2 *issuance.Certificate -var ctx = context.Background() + pa, err := policy.New(map[identifier.IdentifierType]bool{"dns": true}, nil, blog.NewMock()) + test.AssertNotError(t, err, "Couldn't create PA") + err = pa.LoadIdentPolicyFile("../test/ident-policy.yaml") + test.AssertNotError(t, err, "Couldn't set identifier policy") -func init() { - var err error - caCert, caKey, err = issuance.LoadIssuer(issuance.IssuerLoc{ - File: caKeyFile, - CertFile: caCertFile, + legacy, err := issuance.NewProfile(issuance.ProfileConfig{ + MaxValidityPeriod: config.Duration{Duration: time.Hour * 24 * 90}, + MaxValidityBackdate: config.Duration{Duration: time.Hour}, + IgnoredLints: []string{"w_subject_common_name_included"}, }) - if err != nil { - panic(fmt.Sprintf("Unable to load %q and %q: %s", caKeyFile, caCertFile, err)) - } - caCert2, err = issuance.LoadCertificate(caCertFile2) - if err != nil { - panic(fmt.Sprintf("Unable to parse %q: %s", caCertFile2, err)) + test.AssertNotError(t, err, "Loading test profile") + modern, err := issuance.NewProfile(issuance.ProfileConfig{ + OmitCommonName: true, + OmitKeyEncipherment: true, + OmitClientAuth: true, + OmitSKID: true, + MaxValidityPeriod: config.Duration{Duration: time.Hour * 24 * 6}, + MaxValidityBackdate: config.Duration{Duration: time.Hour}, + IgnoredLints: []string{"w_ext_subject_key_identifier_missing_sub_cert"}, + }) + test.AssertNotError(t, err, "Loading test profile") + profiles := map[string]*issuance.Profile{ + "legacy": legacy, + "modern": modern, } -} -func setup(t *testing.T) *testCtx { - features.Reset() - fc := clock.NewFake() - fc.Add(1 * time.Hour) - - pa, err := policy.New(nil) - test.AssertNotError(t, err, "Couldn't create PA") - err = pa.SetHostnamePolicyFile("../test/hostname-policy.yaml") - test.AssertNotError(t, err, "Couldn't set hostname policy") - - boulderProfile := func(rsa, ecdsa bool) *issuance.Profile { - res, _ := issuance.NewProfile( - issuance.ProfileConfig{ - AllowMustStaple: true, - AllowCTPoison: true, - AllowSCTList: true, - AllowCommonName: true, - Policies: []issuance.PolicyInformation{ - {OID: "2.23.140.1.2.1"}, - }, - MaxValidityPeriod: cmd.ConfigDuration{Duration: time.Hour * 8760}, - MaxValidityBackdate: cmd.ConfigDuration{Duration: time.Hour}, - }, - issuance.IssuerConfig{ - UseForECDSALeaves: ecdsa, - UseForRSALeaves: rsa, - IssuerURL: "http://not-example.com/issuer-url", - OCSPURL: "http://not-example.com/ocsp", - CRLURL: "http://not-example.com/crl", + issuers := make([]*issuance.Issuer, 4) + for i, name := range []string{"int-r3", "int-r4", "int-e1", "int-e2"} { + issuers[i], err = issuance.LoadIssuer(issuance.IssuerConfig{ + IssuerURL: fmt.Sprintf("http://not-example.com/i/%s", name), + CRLURLBase: fmt.Sprintf("http://not-example.com/c/%s/", name), + CRLShards: 10, + Location: issuance.IssuerLoc{ + File: fmt.Sprintf("../test/hierarchy/%s.key.pem", name), + CertFile: fmt.Sprintf("../test/hierarchy/%s.cert.pem", name), }, - ) - return res - } - boulderLinter, _ := linter.New(caCert.Certificate, caKey, []string{"n_subject_common_name_included"}) - boulderLinter2, _ := linter.New(caCert2.Certificate, caKey, []string{"n_subject_common_name_included"}) - boulderIssuers := []*issuance.Issuer{ - // Must list ECDSA-only issuer first, so it is the default for ECDSA. - { - Cert: caCert2, - Signer: caKey, - Profile: boulderProfile(false, true), - Linter: boulderLinter2, - Clk: fc, - }, - { - Cert: caCert, - Signer: caKey, - Profile: boulderProfile(true, true), - Linter: boulderLinter, - Clk: fc, - }, + Profiles: []string{"legacy", "modern"}, + }, fc) + test.AssertNotError(t, err, "Couldn't load test issuer") } - keyPolicy := goodkey.KeyPolicy{ - AllowRSA: true, - AllowECDSANISTP256: true, - AllowECDSANISTP384: true, - } + keyPolicy, err := goodkey.NewPolicy(nil, nil) + test.AssertNotError(t, err, "Failed to create test keypolicy") + signatureCount := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "signatures", @@ -259,440 +182,186 @@ func setup(t *testing.T) *testCtx { Name: "signature_errors", Help: "A counter of signature errors labelled by error type", }, []string{"type"}) - - ocsp, err := NewOCSPImpl( - boulderIssuers, - time.Hour, - 0, - time.Second, - blog.NewMock(), - metrics.NoopRegisterer, - signatureCount, - signErrorCount, - fc, - ) - test.AssertNotError(t, err, "Failed to create ocsp impl") - - return &testCtx{ - pa: pa, - ocsp: ocsp, - certExpiry: 8760 * time.Hour, - certBackdate: time.Hour, - serialPrefix: 17, - maxNames: 2, - boulderIssuers: boulderIssuers, - keyPolicy: keyPolicy, - fc: fc, - stats: metrics.NoopRegisterer, - signatureCount: signatureCount, - signErrorCount: signErrorCount, - logger: blog.NewMock(), + lintErrorCount := prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "lint_errors", + Help: "Number of issuances that were halted by linting errors", + }) + certificatesCount := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "certificates", + Help: "Number of certificates issued", + }, []string{"profile"}) + cametrics := &caMetrics{signatureCount, signErrorCount, lintErrorCount, certificatesCount} + + return &caArgs{ + sa: &mockSA{}, + sctService: &mockSCTService{}, + pa: pa, + issuers: issuers, + profiles: profiles, + serialPrefix: 0x11, + maxNames: 2, + keyPolicy: keyPolicy, + logger: blog.NewMock(), + metrics: cametrics, + clk: fc, } } -func TestFailNoSerialPrefix(t *testing.T) { - testCtx := setup(t) - - _, err := NewCertificateAuthorityImpl( - nil, - nil, - nil, - nil, - nil, - testCtx.certExpiry, - testCtx.certBackdate, - 0, - testCtx.maxNames, - testCtx.keyPolicy, - nil, - testCtx.logger, - testCtx.stats, - nil, - nil, - testCtx.fc) - test.AssertError(t, err, "CA should have failed with no SerialPrefix") +// make passes all of the caArgs' fields to the NewCertificateAuthorityImpl +// constructor and returns the result. +func (c *caArgs) make() (*certificateAuthorityImpl, error) { + return NewCertificateAuthorityImpl( + c.sa, c.sctService, c.pa, c.issuers, c.profiles, c.serialPrefix, + c.maxNames, c.keyPolicy, c.logger, c.metrics, c.clk) } -type TestCertificateIssuance struct { - ca *certificateAuthorityImpl - sa *mockSA - req *x509.CertificateRequest - mode IssuanceMode - certDER []byte - cert *x509.Certificate -} +type mockSA struct{} -type IssuanceMode struct { - name string - issueCertificateForPrecertificate bool +func (m *mockSA) AddSerial(ctx context.Context, req *sapb.AddSerialRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + return &emptypb.Empty{}, nil } -func TestIssuePrecertificate(t *testing.T) { - testCases := []struct { - name string - csr []byte - subTest func(t *testing.T, i *TestCertificateIssuance) - }{ - {"IssuePrecertificate", CNandSANCSR, issueCertificateSubTestIssuePrecertificate}, - {"ValidityUsesCAClock", CNandSANCSR, issueCertificateSubTestValidityUsesCAClock}, - {"ProfileSelectionRSA", CNandSANCSR, issueCertificateSubTestProfileSelectionRSA}, - {"ProfileSelectionECDSA", ECDSACSR, issueCertificateSubTestProfileSelectionECDSA}, - {"MustStaple", MustStapleCSR, issueCertificateSubTestMustStaple}, - {"MustStapleDuplicate", DuplicateMustStapleCSR, issueCertificateSubTestMustStaple}, - {"UnknownExtension", UnsupportedExtensionCSR, issueCertificateSubTestUnknownExtension}, - {"CTPoisonExtension", CTPoisonExtensionCSR, issueCertificateSubTestCTPoisonExtension}, - {"CTPoisonExtensionEmpty", CTPoisonExtensionEmptyCSR, issueCertificateSubTestCTPoisonExtension}, - } - - for _, testCase := range testCases { - // The loop through |issuanceModes| must be inside the loop through - // |testCases| because the "certificate-for-precertificate" tests use - // the precertificates previously generated from the preceding - // "precertificate" test. See also the comment above |issuanceModes|. - for _, mode := range issuanceModes { - ca, sa := issueCertificateSubTestSetup(t) - - t.Run(fmt.Sprintf("%s - %s", mode.name, testCase.name), func(t *testing.T) { - req, err := x509.ParseCertificateRequest(testCase.csr) - test.AssertNotError(t, err, "Certificate request failed to parse") - - issueReq := &capb.IssueCertificateRequest{Csr: testCase.csr, RegistrationID: arbitraryRegID} - - var certDER []byte - response, err := ca.IssuePrecertificate(ctx, issueReq) - - test.AssertNotError(t, err, "Failed to issue precertificate") - certDER = response.DER - - cert, err := x509.ParseCertificate(certDER) - test.AssertNotError(t, err, "Certificate failed to parse") - - poisonExtension := findExtension(cert.Extensions, OIDExtensionCTPoison) - test.AssertEquals(t, true, poisonExtension != nil) - if poisonExtension != nil { - test.AssertEquals(t, poisonExtension.Critical, true) - test.AssertDeepEquals(t, poisonExtension.Value, []byte{0x05, 0x00}) // ASN.1 DER NULL - } - - i := TestCertificateIssuance{ - ca: ca, - sa: sa, - req: req, - mode: mode, - certDER: certDER, - cert: cert, - } - - testCase.subTest(t, &i) - }) - } - } +func (m *mockSA) AddPrecertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + return &emptypb.Empty{}, nil } -func makeECDSAAllowListBytes(regID int64) []byte { - regIDBytes := []byte(fmt.Sprintf("%d", regID)) - contents := []byte(` -- `) - return append(contents, regIDBytes...) +func (m *mockSA) AddCertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + return &emptypb.Empty{}, nil } -func issueCertificateSubTestSetup(t *testing.T) (*certificateAuthorityImpl, *mockSA) { - testCtx := setup(t) - sa := &mockSA{} - ca, err := NewCertificateAuthorityImpl( - sa, - testCtx.pa, - testCtx.ocsp, - testCtx.boulderIssuers, - &ECDSAAllowList{}, - testCtx.certExpiry, - testCtx.certBackdate, - testCtx.serialPrefix, - testCtx.maxNames, - testCtx.keyPolicy, - nil, - testCtx.logger, - testCtx.stats, - testCtx.signatureCount, - testCtx.signErrorCount, - testCtx.fc) - test.AssertNotError(t, err, "Failed to create CA") - return ca, sa +func (m *mockSA) GetCertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { + return nil, berrors.NotFoundError("cannot find the cert") } -func issueCertificateSubTestIssuePrecertificate(t *testing.T, i *TestCertificateIssuance) { - cert := i.cert +func (m *mockSA) GetLintPrecertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { + return nil, berrors.NotFoundError("cannot find the precert") +} - test.AssertEquals(t, cert.Subject.CommonName, "not-example.com") +type mockSCTService struct{} - if len(cert.DNSNames) == 1 { - if cert.DNSNames[0] != "not-example.com" { - t.Errorf("Improper list of domain names %v", cert.DNSNames) - } - t.Errorf("Improper list of domain names %v", cert.DNSNames) +func (m mockSCTService) GetSCTs(ctx context.Context, sctRequest *rapb.SCTRequest, _ ...grpc.CallOption) (*rapb.SCTResponse, error) { + sct := ct.SignedCertificateTimestamp{ + SCTVersion: 0, + Timestamp: 2020, + Signature: ct.DigitallySigned{ + Signature: []byte{0}, + }, } - if len(cert.Subject.Country) > 0 { - t.Errorf("Subject contained unauthorized values: %v", cert.Subject) + sctBytes, err := cttls.Marshal(sct) + if err != nil { + return nil, err } -} -func issueCertificateSubTestValidityUsesCAClock(t *testing.T, i *TestCertificateIssuance) { - test.AssertEquals(t, i.cert.NotBefore, i.ca.clk.Now().Add(-1*i.ca.backdate)) - test.AssertEquals(t, i.cert.NotAfter.Add(time.Second).Sub(i.cert.NotBefore), i.ca.validityPeriod) + return &rapb.SCTResponse{SctDER: [][]byte{sctBytes}}, nil } -// Test issuing when multiple issuers are present. -func TestMultipleIssuers(t *testing.T) { - testCtx := setup(t) - sa := &mockSA{} - ca, err := NewCertificateAuthorityImpl( - sa, - testCtx.pa, - testCtx.ocsp, - testCtx.boulderIssuers, - nil, - testCtx.certExpiry, - testCtx.certBackdate, - testCtx.serialPrefix, - testCtx.maxNames, - testCtx.keyPolicy, - nil, - testCtx.logger, - testCtx.stats, - testCtx.signatureCount, - testCtx.signErrorCount, - testCtx.fc) - test.AssertNotError(t, err, "Failed to remake CA") - - // Test that an RSA CSR gets issuance from the RSA issuer, caCert. - issuedCert, err := ca.IssuePrecertificate(ctx, &capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: arbitraryRegID}) - test.AssertNotError(t, err, "Failed to issue certificate") - cert, err := x509.ParseCertificate(issuedCert.DER) - test.AssertNotError(t, err, "Certificate failed to parse") - err = cert.CheckSignatureFrom(caCert2.Certificate) - test.AssertNotError(t, err, "Certificate failed signature validation") - - // Test that an ECDSA CSR gets issuance from the ECDSA issuer, caCert2. - issuedCert, err = ca.IssuePrecertificate(ctx, &capb.IssueCertificateRequest{Csr: ECDSACSR, RegistrationID: arbitraryRegID}) - test.AssertNotError(t, err, "Failed to issue certificate") - cert, err = x509.ParseCertificate(issuedCert.DER) - test.AssertNotError(t, err, "Certificate failed to parse") - err = cert.CheckSignatureFrom(caCert2.Certificate) - test.AssertNotError(t, err, "Certificate failed signature validation") -} - -func TestECDSAAllowList(t *testing.T) { - req := &capb.IssueCertificateRequest{Csr: ECDSACSR, RegistrationID: arbitraryRegID} +func TestNewCertificateAuthorityImpl_BadSerialPrefix(t *testing.T) { + t.Parallel() + cargs := newCAArgs(t) - // With allowlist containing arbitraryRegID, issuance should come from ECDSA issuer. - ca, _ := issueCertificateSubTestSetup(t) - contents := makeECDSAAllowListBytes(int64(arbitraryRegID)) - err := ca.ecdsaAllowList.Update(contents) - if err != nil { - t.Errorf("%s %s", yamlLoadErrMsg, err) - t.FailNow() - } - result, err := ca.IssuePrecertificate(ctx, req) - test.AssertNotError(t, err, "Failed to issue certificate") - cert, err := x509.ParseCertificate(result.DER) - test.AssertNotError(t, err, "Certificate failed to parse") - test.AssertByteEquals(t, cert.RawIssuer, caCert2.RawSubject) - - // Attempts to update the allow list with malformed YAML should - // fail, but the allowlist should still contain arbitraryRegID, so - // issuance should come from ECDSA issuer - malformed_yaml := []byte(` -)(\/=`) - err = ca.ecdsaAllowList.Update(malformed_yaml) - test.AssertError(t, err, "Update method accepted malformed YAML") - result, err = ca.IssuePrecertificate(ctx, req) - test.AssertNotError(t, err, "Failed to issue certificate after Update was called with malformed YAML") - cert, err = x509.ParseCertificate(result.DER) - test.AssertNotError(t, err, "Certificate failed to parse") - test.AssertByteEquals(t, cert.RawIssuer, caCert2.RawSubject) - - // With allowlist not containing arbitraryRegID, issuance should fall back to RSA issuer. - contents = makeECDSAAllowListBytes(int64(2002)) - err = ca.ecdsaAllowList.Update(contents) - if err != nil { - t.Errorf("%s %s", yamlLoadErrMsg, err) - t.FailNow() + cargs.serialPrefix = 0x00 + _, err := cargs.make() + if err == nil { + t.Errorf("NewCertificateAuthorityImpl(serialPrefix=0x00) succeeded, but want error") } - result, err = ca.IssuePrecertificate(ctx, req) - test.AssertNotError(t, err, "Failed to issue certificate") - cert, err = x509.ParseCertificate(result.DER) - test.AssertNotError(t, err, "Certificate failed to parse") - test.AssertByteEquals(t, cert.RawIssuer, caCert.RawSubject) - - // With empty allowlist but ECDSAForAll enabled, issuance should come from ECDSA issuer. - ca, _ = issueCertificateSubTestSetup(t) - _ = features.Set(map[string]bool{"ECDSAForAll": true}) - defer features.Reset() - result, err = ca.IssuePrecertificate(ctx, req) - test.AssertNotError(t, err, "Failed to issue certificate") - cert, err = x509.ParseCertificate(result.DER) - test.AssertNotError(t, err, "Certificate failed to parse") - test.AssertByteEquals(t, cert.RawIssuer, caCert2.RawSubject) -} -func TestInvalidCSRs(t *testing.T) { - testCases := []struct { - name string - csrPath string - check func(t *testing.T, ca *certificateAuthorityImpl, sa *mockSA) - errorMessage string - errorType berrors.ErrorType - }{ - // Test that the CA rejects CSRs that have no names. - // - // CSR generated by Go: - // * Random RSA public key. - // * CN = [none] - // * DNSNames = [none] - {"RejectNoHostnames", "./testdata/no_names.der.csr", nil, "Issued certificate with no names", berrors.BadCSR}, - - // Test that the CA rejects CSRs that have too many names. - // - // CSR generated by Go: - // * Random public key - // * CN = [none] - // * DNSNames = not-example.com, www.not-example.com, mail.example.com - {"RejectTooManyHostnames", "./testdata/too_many_names.der.csr", nil, "Issued certificate with too many names", berrors.BadCSR}, - - // Test that the CA rejects CSRs that have public keys that are too short. - // - // CSR generated by Go: - // * Random public key -- 512 bits long - // * CN = (none) - // * DNSNames = not-example.com, www.not-example.com, mail.not-example.com - {"RejectShortKey", "./testdata/short_key.der.csr", nil, "Issued a certificate with too short a key.", berrors.BadCSR}, - - // CSR generated by Go: - // * Random RSA public key. - // * CN = aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com - // * DNSNames = [none] - {"RejectLongCommonName", "./testdata/long_cn.der.csr", nil, "Issued a certificate with a CN over 64 bytes.", berrors.BadCSR}, - - // CSR generated by OpenSSL: - // Edited signature to become invalid. - {"RejectWrongSignature", "./testdata/invalid_signature.der.csr", nil, "Issued a certificate based on a CSR with an invalid signature.", berrors.BadCSR}, + cargs.serialPrefix = 0x80 + _, err = cargs.make() + if err == nil { + t.Errorf("NewCertificateAuthorityImpl(serialPrefix=0x80) succeeded, but want error") } +} - for _, testCase := range testCases { - testCtx := setup(t) - sa := &mockSA{} - ca, err := NewCertificateAuthorityImpl( - sa, - testCtx.pa, - testCtx.ocsp, - testCtx.boulderIssuers, - nil, - testCtx.certExpiry, - testCtx.certBackdate, - testCtx.serialPrefix, - testCtx.maxNames, - testCtx.keyPolicy, - nil, - testCtx.logger, - testCtx.stats, - testCtx.signatureCount, - testCtx.signErrorCount, - testCtx.fc) - test.AssertNotError(t, err, "Failed to create CA") - - t.Run(testCase.name, func(t *testing.T) { - serializedCSR := mustRead(testCase.csrPath) - issueReq := &capb.IssueCertificateRequest{Csr: serializedCSR, RegistrationID: arbitraryRegID} - _, err = ca.IssuePrecertificate(ctx, issueReq) +func TestNewCertificateAuthorityImpl_InsufficientIssuers(t *testing.T) { + t.Parallel() + cargs := newCAArgs(t) + origIssuers := cargs.issuers - test.AssertErrorIs(t, err, testCase.errorType) - test.AssertMetricWithLabelsEquals(t, ca.signatureCount, prometheus.Labels{"purpose": "cert"}, 0) + for _, tc := range []struct { + name string + issuers []*issuance.Issuer + wantErr string + }{ + { + name: "no issuers", + issuers: nil, + wantErr: "at least one issuer", + }, + { + name: "ecdsa only", + issuers: origIssuers[2:], + wantErr: "no RSA issuers configured", + }, + { + name: "rsa only", + issuers: origIssuers[:2], + wantErr: "no ECDSA issuers configured", + }, + } { + t.Run(tc.name, func(t *testing.T) { + cargs.issuers = tc.issuers + _, err := cargs.make() + if err == nil { + t.Fatalf("NewCertificateAuthorityImpl(%s) succeeded, but want error", tc.name) + } - test.AssertError(t, err, testCase.errorMessage) - if testCase.check != nil { - testCase.check(t, ca, sa) + if !strings.Contains(err.Error(), tc.wantErr) { + t.Fatalf("NewCertificateAuthorityImpl(%s) = %q, but want %q", tc.name, err, tc.wantErr) } }) } } -func TestRejectValidityTooLong(t *testing.T) { - testCtx := setup(t) - sa := &mockSA{} - ca, err := NewCertificateAuthorityImpl( - sa, - testCtx.pa, - testCtx.ocsp, - testCtx.boulderIssuers, - nil, - testCtx.certExpiry, - testCtx.certBackdate, - testCtx.serialPrefix, - testCtx.maxNames, - testCtx.keyPolicy, - nil, - testCtx.logger, - testCtx.stats, - nil, - nil, - testCtx.fc) - test.AssertNotError(t, err, "Failed to create CA") - - // This time is a few minutes before the notAfter in testdata/ca_cert.pem - future, err := time.Parse(time.RFC3339, "2025-02-10T00:30:00Z") - - test.AssertNotError(t, err, "Failed to parse time") - testCtx.fc.Set(future) - // Test that the CA rejects CSRs that would expire after the intermediate cert - _, err = ca.IssuePrecertificate(ctx, &capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: arbitraryRegID}) - test.AssertError(t, err, "Cannot issue a certificate that expires after the intermediate certificate") - test.AssertErrorIs(t, err, berrors.InternalServer) -} +func TestNewCertificateAuthorityImpl_InsufficientProfiles(t *testing.T) { + t.Parallel() + cargs := newCAArgs(t) + cargs.profiles = nil + + _, err := cargs.make() + if err == nil { + t.Fatalf("NewCertificateAuthorityImpl(profiles=nil) succeeded, but want error") + } -func issueCertificateSubTestProfileSelectionRSA(t *testing.T, i *TestCertificateIssuance) { - // Certificates for RSA keys should be marked as usable for signatures and encryption. - expectedKeyUsage := x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment - t.Logf("expected key usage %v, got %v", expectedKeyUsage, i.cert.KeyUsage) - test.AssertEquals(t, i.cert.KeyUsage, expectedKeyUsage) + wantErr := "at least one certificate profile" + if !strings.Contains(err.Error(), wantErr) { + t.Fatalf("NewCertificateAuthorityImpl(profiles=nil) = %q, but want %q", err, wantErr) + } } -func issueCertificateSubTestProfileSelectionECDSA(t *testing.T, i *TestCertificateIssuance) { - // Certificates for ECDSA keys should be marked as usable for only signatures. - expectedKeyUsage := x509.KeyUsageDigitalSignature - t.Logf("expected key usage %v, got %v", expectedKeyUsage, i.cert.KeyUsage) - test.AssertEquals(t, i.cert.KeyUsage, expectedKeyUsage) +// recordingSA keeps track of the serial, precertificate, and certificate which +// are written to it. We use recordingSA only for the _HappyPath test because +// it's a pain to mitigate the data-races inherent in writing to it from many +// parallel subtests. +type recordingSA struct { + serial *sapb.AddSerialRequest + precertificate *sapb.AddCertificateRequest + certificate *sapb.AddCertificateRequest } -func countMustStaple(t *testing.T, cert *x509.Certificate) (count int) { - oidTLSFeature := asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24} - mustStapleFeatureValue := []byte{0x30, 0x03, 0x02, 0x01, 0x05} - for _, ext := range cert.Extensions { - if ext.Id.Equal(oidTLSFeature) { - test.Assert(t, !ext.Critical, "Extension was marked critical") - test.AssertByteEquals(t, ext.Value, mustStapleFeatureValue) - count++ - } - } - return count +func (m *recordingSA) AddSerial(ctx context.Context, req *sapb.AddSerialRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + m.serial = req + return &emptypb.Empty{}, nil } -func issueCertificateSubTestMustStaple(t *testing.T, i *TestCertificateIssuance) { - test.AssertMetricWithLabelsEquals(t, i.ca.signatureCount, prometheus.Labels{"purpose": "precertificate"}, 1) - test.AssertEquals(t, countMustStaple(t, i.cert), 1) +func (m *recordingSA) AddPrecertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + m.precertificate = req + return &emptypb.Empty{}, nil } -func issueCertificateSubTestUnknownExtension(t *testing.T, i *TestCertificateIssuance) { - test.AssertMetricWithLabelsEquals(t, i.ca.signatureCount, prometheus.Labels{"purpose": "precertificate"}, 1) +func (m *recordingSA) AddCertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + m.certificate = req + return &emptypb.Empty{}, nil +} - // NOTE: The hard-coded value here will have to change over time as Boulder - // adds new (unrequested) extensions to certificates. - expectedExtensionCount := 10 - test.AssertEquals(t, len(i.cert.Extensions), expectedExtensionCount) +func (m *recordingSA) GetCertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { + return nil, berrors.NotFoundError("cannot find the cert") } -func issueCertificateSubTestCTPoisonExtension(t *testing.T, i *TestCertificateIssuance) { - test.AssertMetricWithLabelsEquals(t, i.ca.signatureCount, prometheus.Labels{"purpose": "precertificate"}, 1) +func (m *recordingSA) GetLintPrecertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { + return nil, berrors.NotFoundError("cannot find the precert") } func findExtension(extensions []pkix.Extension, id asn1.ObjectIdentifier) *pkix.Extension { @@ -704,86 +373,15 @@ func findExtension(extensions []pkix.Extension, id asn1.ObjectIdentifier) *pkix. return nil } -func makeSCTs() ([][]byte, error) { - sct := ct.SignedCertificateTimestamp{ - SCTVersion: 0, - Timestamp: 2020, - Signature: ct.DigitallySigned{ - Signature: []byte{0}, - }, - } - sctBytes, err := cttls.Marshal(sct) +// deserializeSCTList deserializes a list of SCTs. +// Forked from github.com/cloudflare/cfssl/helpers +func deserializeSCTList(sctListExtensionValue []byte) ([]ct.SignedCertificateTimestamp, error) { + var serializedSCTList []byte + _, err := asn1.Unmarshal(sctListExtensionValue, &serializedSCTList) if err != nil { return nil, err } - return [][]byte{sctBytes}, err -} - -func TestIssueCertificateForPrecertificate(t *testing.T) { - testCtx := setup(t) - sa := &mockSA{} - ca, err := NewCertificateAuthorityImpl( - sa, - testCtx.pa, - testCtx.ocsp, - testCtx.boulderIssuers, - nil, - testCtx.certExpiry, - testCtx.certBackdate, - testCtx.serialPrefix, - testCtx.maxNames, - testCtx.keyPolicy, - nil, - testCtx.logger, - testCtx.stats, - testCtx.signatureCount, - testCtx.signErrorCount, - testCtx.fc) - test.AssertNotError(t, err, "Failed to create CA") - - issueReq := capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: arbitraryRegID, OrderID: 0} - precert, err := ca.IssuePrecertificate(ctx, &issueReq) - test.AssertNotError(t, err, "Failed to issue precert") - parsedPrecert, err := x509.ParseCertificate(precert.DER) - test.AssertNotError(t, err, "Failed to parse precert") - - // Check for poison extension - poisonExtension := findExtension(parsedPrecert.Extensions, OIDExtensionCTPoison) - test.AssertNotNil(t, poisonExtension, "Couldn't find CTPoison extension") - test.AssertEquals(t, poisonExtension.Critical, true) - test.AssertDeepEquals(t, poisonExtension.Value, []byte{0x05, 0x00}) // ASN.1 DER NULL - - sctBytes, err := makeSCTs() - if err != nil { - t.Fatal(err) - } - - test.AssertNotError(t, err, "Failed to marshal SCT") - cert, err := ca.IssueCertificateForPrecertificate(ctx, &capb.IssueCertificateForPrecertificateRequest{ - DER: precert.DER, - SCTs: sctBytes, - RegistrationID: arbitraryRegID, - OrderID: 0, - }) - test.AssertNotError(t, err, "Failed to issue cert from precert") - parsedCert, err := x509.ParseCertificate(cert.Der) - test.AssertNotError(t, err, "Failed to parse cert") - - // Check for SCT list extension - sctListExtension := findExtension(parsedCert.Extensions, OIDExtensionSCTList) - test.AssertNotNil(t, sctListExtension, "Couldn't find SCTList extension") - test.AssertEquals(t, sctListExtension.Critical, false) - var rawValue []byte - _, err = asn1.Unmarshal(sctListExtension.Value, &rawValue) - test.AssertNotError(t, err, "Failed to unmarshal extension value") - sctList, err := deserializeSCTList(rawValue) - test.AssertNotError(t, err, "Failed to deserialize SCT list") - test.Assert(t, len(sctList) == 1, fmt.Sprintf("Wrong number of SCTs, wanted: 1, got: %d", len(sctList))) -} -// deserializeSCTList deserializes a list of SCTs. -// Forked from github.com/cloudflare/cfssl/helpers -func deserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimestamp, error) { var sctList ctx509.SignedCertificateTimestampList rest, err := cttls.Unmarshal(serializedSCTList, &sctList) if err != nil { @@ -807,315 +405,645 @@ func deserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimesta return list, nil } -// dupeSA returns a non-error to GetCertificate in order to simulate a request -// to issue a final certificate with a duplicate serial. -type dupeSA struct { - mockSA -} +func TestIssueCertificate_HappyPath(t *testing.T) { + for _, profile := range []string{"legacy", "modern"} { + for _, tc := range []struct { + name string + csr []byte + }{ + { + name: "RSA", + csr: CNandSANCSR, + }, + { + name: "ECDSA", + csr: ECDSACSR, + }, + { + name: "unrecognized extension", + csr: UnsupportedExtensionCSR, + }, + { + name: "poison extension", + csr: CTPoisonExtensionCSR, + }, + { + name: "malformed poison extension", + csr: CTPoisonExtensionEmptyCSR, + }, + { + // Rejection of CSRs that request Must-Staple happens in the RA. + name: "must staple extension", + csr: mustRead("./testdata/must_staple.der.csr"), + }, + } { + t.Run(tc.name+"/"+profile, func(t *testing.T) { + t.Parallel() + + // Use our own CA for each of these parallel subtests, because we plan + // to inspect the serial, precert, and final cert stored in the mock SA. + // Also so we can assert that certain metrics have specific values. + cargs := newCAArgs(t) + sa := &recordingSA{} + cargs.sa = sa + ca, err := cargs.make() + if err != nil { + t.Fatalf("making test ca: %s", err) + } -func (m *dupeSA) GetCertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { - return nil, nil -} + res, err := ca.IssueCertificate(t.Context(), &capb.IssueCertificateRequest{ + RegistrationID: 1, OrderID: 1, + Csr: tc.csr, CertProfileName: profile, + }) + if err != nil { + t.Fatalf("IssueCertificate(%s) = %q, but want success", tc.name, err) + } -// getCertErrorSA always returns an error for GetCertificate -type getCertErrorSA struct { - mockSA -} + test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "precertificate", "status": "success"}, 1) + test.AssertMetricWithLabelsEquals(t, ca.metrics.signatureCount, prometheus.Labels{"purpose": "certificate", "status": "success"}, 1) + + if sa.serial.RegID != 1 { + t.Errorf("want serial to be associated with acct %d, but got %d", 1, sa.serial.RegID) + } + + storedPrecert, err := x509.ParseCertificate(sa.precertificate.Der) + if err != nil { + t.Fatalf("parsing precert: %s", err) + } + + poisonExtension := findExtension(storedPrecert.Extensions, OIDExtensionCTPoison) + if poisonExtension == nil { + t.Fatal("failed to find ctpoison extension") + } + + if !poisonExtension.Critical { + t.Error("precertificate ctpoison extension must be critical") + } + + if !bytes.Equal(poisonExtension.Value, []byte{0x05, 0x00}) { // ASN.1 DER NULL + t.Errorf("precertificate poison extension has value %x, but want %x", poisonExtension.Value, []byte{0x05, 0x00}) + } + + storedCert, err := x509.ParseCertificate(sa.certificate.Der) + if err != nil { + t.Fatalf("parsing cert: %s", err) + } + + sctExtension := findExtension(storedCert.Extensions, OIDExtensionSCTList) + if sctExtension == nil { + t.Fatal("failed to find sctList extension") + } -func (m *getCertErrorSA) GetCertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { - return nil, fmt.Errorf("i don't like it") + if sctExtension.Critical { + t.Error("sctList extension must not be critical") + } + + sctList, err := deserializeSCTList(sctExtension.Value) + if err != nil { + t.Fatalf("parsing sctList extension: %s", err) + } + + if len(sctList) != 1 { + t.Errorf("got %d SCTs, but want 1", len(sctList)) + } + + cert, err := x509.ParseCertificate(res.DER) + if err != nil { + t.Fatalf("parsing returned cert: %s", err) + } + + if (sa.serial.Serial != core.SerialToString(storedPrecert.SerialNumber)) || + (sa.serial.Serial != core.SerialToString(storedCert.SerialNumber)) || + (sa.serial.Serial != core.SerialToString(cert.SerialNumber)) { + t.Errorf("expected all serials to match") + } + + if !bytes.Equal(res.DER, sa.certificate.Der) { + t.Errorf("Expected stored and returned cert to be identical") + } + }) + } + } } -func TestIssueCertificateForPrecertificateDuplicateSerial(t *testing.T) { - testCtx := setup(t) - sa := &dupeSA{} - ca, err := NewCertificateAuthorityImpl( - sa, - testCtx.pa, - testCtx.ocsp, - testCtx.boulderIssuers, - nil, - testCtx.certExpiry, - testCtx.certBackdate, - testCtx.serialPrefix, - testCtx.maxNames, - testCtx.keyPolicy, - nil, - testCtx.logger, - testCtx.stats, - testCtx.signatureCount, - testCtx.signErrorCount, - testCtx.fc) - test.AssertNotError(t, err, "Failed to create CA") - - sctBytes, err := makeSCTs() +func TestIssueCertificate_BadCSR(t *testing.T) { + t.Parallel() + + ca, err := newCAArgs(t).make() if err != nil { - t.Fatal(err) + t.Fatalf("making test ca: %s", err) } - issueReq := capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: arbitraryRegID, OrderID: 0} - precert, err := ca.IssuePrecertificate(ctx, &issueReq) - test.AssertNotError(t, err, "Failed to issue precert") - _, err = ca.IssueCertificateForPrecertificate(ctx, &capb.IssueCertificateForPrecertificateRequest{ - DER: precert.DER, - SCTs: sctBytes, - RegistrationID: arbitraryRegID, - OrderID: 0, - }) - if err == nil { - t.Error("Expected error issuing duplicate serial but got none.") + for _, tc := range []struct { + name string + csrPath string + }{ + { + name: "no names", + csrPath: "./testdata/no_names.der.csr", + }, + { + name: "too many names", + csrPath: "./testdata/too_many_names.der.csr", + }, + { + name: "short key", + csrPath: "./testdata/short_key.der.csr", + }, + { + name: "bad key algorithm", + csrPath: "./testdata/bad_algorithm.der.csr", + }, + { + name: "invalid signature", + csrPath: "./testdata/invalid_signature.der.csr", + }, + } { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + _, err := ca.IssueCertificate(t.Context(), &capb.IssueCertificateRequest{ + RegistrationID: 1, OrderID: 1, + Csr: mustRead(tc.csrPath), CertProfileName: "legacy", + }) + if err == nil { + t.Fatalf("IssueCertificate(%q) succeeded, but want error", tc.csrPath) + } + if !errors.Is(err, berrors.BadCSR) { + t.Fatalf("IssueCertificate(%q) = %T, but want %T", tc.csrPath, err, berrors.BadCSR) + } + }) } - if !strings.Contains(err.Error(), "issuance of duplicate final certificate requested") { - t.Errorf("Wrong type of error issuing duplicate serial. Expected 'issuance of duplicate', got '%s'", err) +} + +func TestIssueCertificate_ValidPastIssuer(t *testing.T) { + t.Parallel() + cargs := newCAArgs(t) + + // Limit ourselves to only having one ECDSA issuer, just in case they have + // different notAfter dates. + cargs.issuers = cargs.issuers[:3] + + // Jump to a time just moments before the test issuer expire. + future := cargs.issuers[2].Cert.Certificate.NotAfter.Add(-1 * time.Hour) + cargs.clk.Set(future) + + ca, err := cargs.make() + if err != nil { + t.Fatalf("making test ca: %s", err) } - // Now check what happens if there is an error (e.g. timeout) while checking - // for the duplicate. - errorsa := &getCertErrorSA{} - errorca, err := NewCertificateAuthorityImpl( - errorsa, - testCtx.pa, - testCtx.ocsp, - testCtx.boulderIssuers, - nil, - testCtx.certExpiry, - testCtx.certBackdate, - testCtx.serialPrefix, - testCtx.maxNames, - testCtx.keyPolicy, - nil, - testCtx.logger, - testCtx.stats, - testCtx.signatureCount, - testCtx.signErrorCount, - testCtx.fc) - test.AssertNotError(t, err, "Failed to create CA") - - _, err = errorca.IssueCertificateForPrecertificate(ctx, &capb.IssueCertificateForPrecertificateRequest{ - DER: precert.DER, - SCTs: sctBytes, - RegistrationID: arbitraryRegID, - OrderID: 0, + _, err = ca.IssueCertificate(t.Context(), &capb.IssueCertificateRequest{ + RegistrationID: 1, OrderID: 1, + Csr: ECDSACSR, CertProfileName: "legacy", }) if err == nil { - t.Fatal("Expected error issuing duplicate serial but got none.") + t.Fatalf("IssueCertificate(notAfter > issuer.notAfter) succeeded, but want error") } - if !strings.Contains(err.Error(), "error checking for duplicate") { - t.Fatalf("Wrong type of error issuing duplicate serial. Expected 'error checking for duplicate', got '%s'", err) + if !errors.Is(err, berrors.InternalServer) { + t.Fatalf("IssueCertificate(notAfter > issuer.notAfter) = %T, but want %T", err, berrors.InternalServer) } } -type queueSA struct { - mockSA +func TestIssueCertificate_InvalidProfile(t *testing.T) { + t.Parallel() - fail bool - duplicate bool + ca, err := newCAArgs(t).make() + if err != nil { + t.Fatalf("making test ca: %s", err) + } - issued time.Time - issuedPrecert time.Time -} + for _, tc := range []struct { + name string + profile string + wantErr string + }{ + { + name: "no profile", + profile: "", + wantErr: "Incomplete issue certificate request", + }, + { + name: "unrecognized profile", + profile: "doesnotexist", + wantErr: "incapable of using a profile named", + }, + { + name: "invalid profile name", + profile: "🤓", + wantErr: "incapable of using a profile named", + }, + } { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() -func (qsa *queueSA) AddCertificate(_ context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*sapb.AddCertificateResponse, error) { - if qsa.fail { - return nil, errors.New("bad") - } else if qsa.duplicate { - return nil, berrors.DuplicateError("is a dupe") + _, err := ca.IssueCertificate(t.Context(), &capb.IssueCertificateRequest{ + RegistrationID: 1, OrderID: 1, + Csr: ECDSACSR, CertProfileName: tc.profile, + }) + if err == nil { + t.Fatalf("IssueCertificate(profile=%q) succeeded, but want error", tc.profile) + } + if !strings.Contains(err.Error(), tc.wantErr) { + t.Fatalf("IssueCertificate(profile=%q) = %q, but want %q", tc.profile, err, tc.wantErr) + } + }) } - qsa.issued = time.Unix(0, req.Issued).UTC() - return nil, nil } -func (qsa *queueSA) AddPrecertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - if qsa.fail { - return nil, errors.New("bad") - } else if qsa.duplicate { - return nil, berrors.DuplicateError("is a dupe") +func TestIssueCertificate_ProfileSelection(t *testing.T) { + t.Parallel() + + ca, err := newCAArgs(t).make() + if err != nil { + t.Fatalf("making test ca: %s", err) } - qsa.issuedPrecert = time.Unix(0, req.Issued).UTC() - return nil, nil -} -// TestPrecertOrphanQueue tests that IssuePrecertificate writes precertificates -// to the orphan queue if storage fails, and that `integrateOrphan` later -// successfully writes those precertificates to the database. To do this, it -// uses the `queueSA` mock, which allows us to flip on and off a "fail" bit that -// decides whether it errors in response to storage requests. -func TestPrecertOrphanQueue(t *testing.T) { - tmpDir := t.TempDir() - orphanQueue, err := goque.OpenQueue(tmpDir) - test.AssertNotError(t, err, "Failed to open orphaned certificate queue") - - qsa := &queueSA{fail: true} - testCtx := setup(t) - fakeNow := time.Date(2019, 9, 20, 0, 0, 0, 0, time.UTC) - testCtx.fc.Set(fakeNow) - ca, err := NewCertificateAuthorityImpl( - qsa, - testCtx.pa, - testCtx.ocsp, - testCtx.boulderIssuers, - nil, - testCtx.certExpiry, - testCtx.certBackdate, - testCtx.serialPrefix, - testCtx.maxNames, - testCtx.keyPolicy, - orphanQueue, - testCtx.logger, - testCtx.stats, - testCtx.signatureCount, - testCtx.signErrorCount, - testCtx.fc) - test.AssertNotError(t, err, "Failed to create CA") - - err = ca.integrateOrphan() - if err != goque.ErrEmpty { - t.Fatalf("Unexpected error, wanted %q, got %q", goque.ErrEmpty, err) + for _, tc := range []struct { + profile string + wantValidity time.Duration + }{ + { + profile: "legacy", + wantValidity: 90 * 24 * time.Hour, + }, + { + profile: "modern", + wantValidity: 6 * 24 * time.Hour, + }, + } { + t.Run(tc.profile, func(t *testing.T) { + t.Parallel() + + res, err := ca.IssueCertificate(t.Context(), &capb.IssueCertificateRequest{ + RegistrationID: 1, OrderID: 1, + Csr: ECDSACSR, CertProfileName: tc.profile, + }) + if err != nil { + t.Fatalf("IssueCertificate(profile=%q) = %q, but want success", tc.profile, err) + } + + cert, err := x509.ParseCertificate(res.DER) + if err != nil { + t.Fatalf("parsing certificate: %s", err) + } + + // We use the validity period as a proxy for detecting whether the correct + // profile was selected and used, since we know that the validity period + // differs between the two test profiles. + validity := cert.NotAfter.Add(time.Second).Sub(cert.NotBefore) + if validity != tc.wantValidity { + t.Errorf("IssueCertificate(profile=%q) = validity %d, but want %d", tc.profile, validity, tc.wantValidity) + } + }) } +} - _, err = ca.IssuePrecertificate(context.Background(), &capb.IssueCertificateRequest{ - RegistrationID: 1, - OrderID: 1, - Csr: CNandSANCSR, - }) - test.AssertError(t, err, "Expected IssuePrecertificate to fail with `qsa.fail = true`") +func TestIssueCertificate_IssuerSelection(t *testing.T) { + t.Parallel() + cargs := newCAArgs(t) + origIssuers := cargs.issuers - matches := testCtx.logger.GetAllMatching(`orphaning precertificate.* regID=\[1\], orderID=\[1\]`) - if len(matches) != 1 { - t.Errorf("no log line, or incorrect log line for orphaned precertificate:\n%s", - strings.Join(testCtx.logger.GetAllMatching(".*"), "\n")) + ca, err := cargs.make() + if err != nil { + t.Fatalf("making test ca: %s", err) } - test.AssertMetricWithLabelsEquals( - t, ca.orphanCount, prometheus.Labels{"type": "precert"}, 1) + for _, tc := range []struct { + name string + csr []byte + wantIssuers []*issuance.Issuer + wantKUs x509.KeyUsage + }{ + { + name: "ECDSA", + csr: ECDSACSR, + wantIssuers: origIssuers[2:], + wantKUs: x509.KeyUsageDigitalSignature, + }, + { + name: "RSA", + csr: CNandSANCSR, + wantIssuers: origIssuers[:2], + wantKUs: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + }, + } { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + res, err := ca.IssueCertificate(t.Context(), &capb.IssueCertificateRequest{ + RegistrationID: 1, OrderID: 1, + Csr: tc.csr, CertProfileName: "legacy", + }) + if err != nil { + t.Fatalf("IssueCertificate(csr=%q) = %q, but want success", tc.name, err) + } + + cert, err := x509.ParseCertificate(res.DER) + if err != nil { + t.Fatalf("parsing certificate: %s", err) + } + + if cert.KeyUsage != tc.wantKUs { + t.Errorf("IssueCertificate(csr=%q) has KU %v, but want %v", tc.name, cert.KeyUsage, tc.wantKUs) + } + + validated := false + for _, issuer := range tc.wantIssuers { + err = cert.CheckSignatureFrom(issuer.Cert.Certificate) + if err == nil { + validated = true + break + } + } + if !validated { + t.Errorf("IssueCertificate(csr=%q) issued from unexpected issuer %q", tc.name, cert.Issuer.CommonName) + } + }) + } +} - qsa.fail = false - err = ca.integrateOrphan() - test.AssertNotError(t, err, "integrateOrphan failed") - if !qsa.issuedPrecert.Equal(fakeNow) { - t.Errorf("expected issued time to be %s, got %s", fakeNow, qsa.issuedPrecert) +func TestIssueCertificate_UnpredictableIssuance(t *testing.T) { + ca, err := newCAArgs(t).make() + if err != nil { + t.Fatalf("creating test ca: %s", err) } - err = ca.integrateOrphan() - if err != goque.ErrEmpty { - t.Fatalf("Unexpected error, wanted %q, got %q", goque.ErrEmpty, err) + + // Issue the same (ECDSA-keyed) certificate 20 times. At least one issuance + // should come from each of the two active ECDSA issuers (int-e1 and int-e2). + // With 20 trials, the probability that all 20 issuances come from the same + // issuer is 0.5 ^ 20 = 9.5e-7 ~= 1e-6 = 1 in a million, so we do not consider + // this test to be flaky. + seenE1 := false + seenE2 := false + for range 20 { + res, err := ca.IssueCertificate(t.Context(), &capb.IssueCertificateRequest{ + RegistrationID: 1, OrderID: 1, + Csr: ECDSACSR, CertProfileName: "legacy", + }) + if err != nil { + t.Fatalf("issuing certificate: %s", err) + } + + cert, err := x509.ParseCertificate(res.DER) + if err != nil { + t.Fatalf("parsing certificate: %s", err) + } + + if strings.Contains(cert.Issuer.CommonName, "E1") { + seenE1 = true + } else if strings.Contains(cert.Issuer.CommonName, "E2") { + seenE2 = true + } else { + t.Fatalf("Issued certificate from unexpected issuer") + } } - test.AssertMetricWithLabelsEquals( - t, ca.adoptedOrphanCount, prometheus.Labels{"type": "precert"}, 1) + if !seenE1 { + t.Error("Expected at least one issuance from active issuer E1") + } + if !seenE2 { + t.Error("Expected at least one issuance from active issuer E2") + } } -func TestOrphanQueue(t *testing.T) { - tmpDir := t.TempDir() - orphanQueue, err := goque.OpenQueue(tmpDir) - test.AssertNotError(t, err, "Failed to open orphaned certificate queue") - - qsa := &queueSA{fail: true} - testCtx := setup(t) - fakeNow, err := time.Parse("Mon Jan 2 15:04:05 2006", "Mon Jan 2 15:04:05 2006") +// TestPickIssuer tests the various deterministic cases, ensuring that the +// function properly respects the issuers' key algorithms and profiles. The test +// cases here are somewhat tightly coupled to the profiles populated by +// newCAArgs; this full coverage is to ensure that pickIssuer doesn't have an +// off-by-one error or similar bug lurking in it. +// +// The non-deterministic case is covered by TestIssueCertificate_UnpredictableIssuance. +func TestPickIssuer(t *testing.T) { + t.Parallel() + + ca, err := newCAArgs(t).make() if err != nil { - t.Fatal(err) + t.Fatalf("creating test ca: %s", err) } - testCtx.fc.Set(fakeNow) - ca, err := NewCertificateAuthorityImpl( - qsa, - testCtx.pa, - testCtx.ocsp, - testCtx.boulderIssuers, - nil, - testCtx.certExpiry, - testCtx.certBackdate, - testCtx.serialPrefix, - testCtx.maxNames, - testCtx.keyPolicy, - orphanQueue, - testCtx.logger, - testCtx.stats, - nil, - nil, - testCtx.fc) - test.AssertNotError(t, err, "Failed to create CA") - - err = ca.integrateOrphan() - if err != goque.ErrEmpty { - t.Fatalf("Unexpected error, wanted %q, got %q", goque.ErrEmpty, err) + + for _, tc := range []struct { + name string + profile string + keyAlg x509.PublicKeyAlgorithm + wantErr bool + }{ + { + name: "unrecognized profile", + profile: "doesnotexist", + keyAlg: x509.ECDSA, + wantErr: true, + }, + { + name: "unrecognized key algorithm", + profile: "modern", + keyAlg: x509.Ed25519, + wantErr: true, + }, + { + name: "recognized/legacy+ecdsa", + profile: "legacy", + keyAlg: x509.ECDSA, + }, + { + name: "recognized/legacy+rsa", + profile: "legacy", + keyAlg: x509.RSA, + }, + { + name: "recognized/modern+ecdsa", + profile: "modern", + keyAlg: x509.ECDSA, + }, + { + name: "recognized/modern+rsa", + profile: "modern", + keyAlg: x509.RSA, + }, + } { + t.Run(tc.name, func(t *testing.T) { + _, err := ca.pickIssuer(tc.profile, tc.keyAlg) + if err == nil && tc.wantErr { + t.Errorf("pickIssuer(%s, %s) = success, but want error", tc.profile, tc.keyAlg) + } else if err != nil && !tc.wantErr { + t.Errorf("pickIssuer(%s, %s) = %s, but want success", tc.profile, tc.keyAlg, err) + } + }) } +} - // generate basic test cert - k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "Failed to generate test key") - tmpl := &x509.Certificate{ - SerialNumber: big.NewInt(1), - DNSNames: []string{"test.invalid"}, - NotBefore: fakeNow.Add(-time.Hour), +func TestPickIssuer_Inactive(t *testing.T) { + t.Parallel() + cargs := newCAArgs(t) + + // Load our own set of issuers, but with half of them inactive. + var issuers []*issuance.Issuer + for i, name := range []string{"int-r3", "int-r4", "int-e1", "int-e2"} { + var profiles []string + if i%2 == 0 { + profiles = []string{"legacy", "modern"} + } + issuer, err := issuance.LoadIssuer(issuance.IssuerConfig{ + IssuerURL: fmt.Sprintf("http://not-example.com/i/%s", name), + CRLURLBase: fmt.Sprintf("http://not-example.com/c/%s/", name), + CRLShards: 10, + Location: issuance.IssuerLoc{ + File: fmt.Sprintf("../test/hierarchy/%s.key.pem", name), + CertFile: fmt.Sprintf("../test/hierarchy/%s.cert.pem", name), + }, + Profiles: profiles, + }, cargs.clk) + if err != nil { + t.Fatalf("loading test issuer: %s", err) + } + issuers = append(issuers, issuer) } - certDER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, k.Public(), k) - test.AssertNotError(t, err, "Failed to generate test cert") - err = ca.storeCertificate( - context.Background(), - 1, - 1, - tmpl.SerialNumber, - certDER, - 1, - ) - test.AssertError(t, err, "storeCertificate didn't fail when AddCertificate failed") - - qsa.fail = false - err = ca.integrateOrphan() - test.AssertNotError(t, err, "integrateOrphan failed") - if !qsa.issued.Equal(fakeNow) { - t.Errorf("expected issued time to be %s, got %s", fakeNow, qsa.issued) + cargs.issuers = issuers + + ca, err := cargs.make() + if err != nil { + t.Fatalf("creating test ca: %s", err) } - err = ca.integrateOrphan() - if err != goque.ErrEmpty { - t.Fatalf("Unexpected error, wanted %q, got %q", goque.ErrEmpty, err) + + // Calling pickIssuer should never return one of the inactive issuers. + for range 20 { + issuer, err := ca.pickIssuer("modern", x509.ECDSA) + if err != nil { + t.Fatalf("pickIssuer(modern, ECDSA) = %s, but want success", err) + } + if strings.Contains(issuer.Name(), "E2") { + t.Errorf("pickIssuer(modern, ECDSA) = E2, but only want E1") + } + } + for range 20 { + issuer, err := ca.pickIssuer("modern", x509.RSA) + if err != nil { + t.Fatalf("pickIssuer(modern, RSA) = %s, but want success", err) + } + if strings.Contains(issuer.Name(), "R4") { + t.Errorf("pickIssuer(modern, RSA) = R4, but only want R3") + } } +} - // test with a duplicate cert - ca.queueOrphan(&orphanedCert{ - DER: certDER, - OCSPResp: []byte{}, - RegID: 1, - }) +func TestNoteSignError(t *testing.T) { + testCtx := newCAArgs(t) + metrics := testCtx.metrics + + err := fmt.Errorf("wrapped non-signing error: %w", errors.New("oops")) + metrics.noteSignError(err) + test.AssertMetricWithLabelsEquals(t, metrics.signErrorCount, prometheus.Labels{"type": "HSM"}, 0) + + err = fmt.Errorf("wrapped signing error: %w", pkcs11.Error(5)) + metrics.noteSignError(err) + test.AssertMetricWithLabelsEquals(t, metrics.signErrorCount, prometheus.Labels{"type": "HSM"}, 1) +} + +func TestGenerateSKID(t *testing.T) { + t.Parallel() + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "Error generating key") - qsa.duplicate = true - err = ca.integrateOrphan() - test.AssertNotError(t, err, "integrateOrphan failed with duplicate cert") - if !qsa.issued.Equal(fakeNow) { - t.Errorf("expected issued time to be %s, got %s", fakeNow, qsa.issued) + sha256skid, err := generateSKID(key.Public()) + test.AssertNotError(t, err, "Error generating SKID") + test.AssertEquals(t, len(sha256skid), 20) + test.AssertEquals(t, cap(sha256skid), 20) + features.Reset() +} + +func TestVerifyTBSCertIsDeterministic(t *testing.T) { + t.Parallel() + + // Create first keypair and cert + testKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "unable to generate ECDSA private key") + template := &x509.Certificate{ + NotAfter: time.Now().Add(1 * time.Hour), + DNSNames: []string{"example.com"}, + SerialNumber: big.NewInt(1), } - err = ca.integrateOrphan() - if err != goque.ErrEmpty { - t.Fatalf("Unexpected error, wanted %q, got %q", goque.ErrEmpty, err) + certDer1, err := x509.CreateCertificate(rand.Reader, template, template, &testKey.PublicKey, testKey) + test.AssertNotError(t, err, "unable to create certificate") + + // Create second keypair and cert + testKey2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "unable to generate ECDSA private key") + template2 := &x509.Certificate{ + NotAfter: time.Now().Add(2 * time.Hour), + DNSNames: []string{"example.net"}, + SerialNumber: big.NewInt(2), } + certDer2, err := x509.CreateCertificate(rand.Reader, template2, template2, &testKey2.PublicKey, testKey2) + test.AssertNotError(t, err, "unable to create certificate") - // add cert to queue, and recreate queue to make sure it still has the cert - qsa.fail = true - qsa.duplicate = false - err = ca.storeCertificate( - context.Background(), - 1, - 1, - tmpl.SerialNumber, - certDER, - 1, - ) - test.AssertError(t, err, "storeCertificate didn't fail when AddCertificate failed") - err = orphanQueue.Close() - test.AssertNotError(t, err, "Failed to close the queue cleanly") - orphanQueue, err = goque.OpenQueue(tmpDir) - test.AssertNotError(t, err, "Failed to open orphaned certificate queue") - defer func() { _ = orphanQueue.Close() }() - ca.orphanQueue = orphanQueue - - qsa.fail = false - err = ca.integrateOrphan() - test.AssertNotError(t, err, "integrateOrphan failed") - if !qsa.issued.Equal(fakeNow) { - t.Errorf("expected issued time to be %s, got %s", fakeNow, qsa.issued) + testCases := []struct { + name string + lintCertBytes []byte + leafCertBytes []byte + errorSubstr string + }{ + { + name: "Both nil", + lintCertBytes: nil, + leafCertBytes: nil, + errorSubstr: "were nil", + }, + { + name: "Missing a value, invalid input", + lintCertBytes: nil, + leafCertBytes: []byte{0x6, 0x6, 0x6}, + errorSubstr: "were nil", + }, + { + name: "Missing a value, valid input", + lintCertBytes: nil, + leafCertBytes: certDer1, + errorSubstr: "were nil", + }, + { + name: "Mismatched bytes, invalid input", + lintCertBytes: []byte{0x6, 0x6, 0x6}, + leafCertBytes: []byte{0x1, 0x2, 0x3}, + errorSubstr: "malformed certificate", + }, + { + name: "Mismatched bytes, invalider input", + lintCertBytes: certDer1, + leafCertBytes: []byte{0x1, 0x2, 0x3}, + errorSubstr: "malformed certificate", + }, + { + // This case is an example of when a linting cert's DER bytes are + // mismatched compared to then precert or final cert created from + // that linting cert's DER bytes. + name: "Mismatched bytes, valid input", + lintCertBytes: certDer1, + leafCertBytes: certDer2, + errorSubstr: "mismatch between", + }, + { + // Take this with a grain of salt since this test is not actually + // creating a linting certificate and performing two + // x509.CreateCertificate() calls like + // ca.IssueCertificateForPrecertificate and + // ca.issuePrecertificateInner do. However, we're still going to + // verify the equality. + name: "Valid", + lintCertBytes: certDer1, + leafCertBytes: certDer1, + }, } - err = ca.integrateOrphan() - if err != goque.ErrEmpty { - t.Fatalf("Unexpected error, wanted %q, got %q", goque.ErrEmpty, err) + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + err := tbsCertIsDeterministic(testCase.lintCertBytes, testCase.leafCertBytes) + if testCase.errorSubstr != "" { + test.AssertError(t, err, "your lack of errors is disturbing") + test.AssertContains(t, err.Error(), testCase.errorSubstr) + } else { + test.AssertNotError(t, err, "unexpected error") + } + }) } } diff --git a/ca/crl.go b/ca/crl.go new file mode 100644 index 00000000000..22cc5deb1d4 --- /dev/null +++ b/ca/crl.go @@ -0,0 +1,211 @@ +package ca + +import ( + "crypto/sha256" + "crypto/x509" + "errors" + "fmt" + "io" + "strings" + "time" + + "google.golang.org/grpc" + + "github.com/prometheus/client_golang/prometheus" + + capb "github.com/letsencrypt/boulder/ca/proto" + "github.com/letsencrypt/boulder/core" + corepb "github.com/letsencrypt/boulder/core/proto" + bcrl "github.com/letsencrypt/boulder/crl" + "github.com/letsencrypt/boulder/issuance" + blog "github.com/letsencrypt/boulder/log" +) + +type crlImpl struct { + capb.UnsafeCRLGeneratorServer + issuers map[issuance.NameID]*issuance.Issuer + profile *issuance.CRLProfile + maxLogLen int + log blog.Logger + metrics *caMetrics +} + +var _ capb.CRLGeneratorServer = (*crlImpl)(nil) + +// NewCRLImpl returns a new object which fulfils the ca.proto CRLGenerator +// interface. It uses the list of issuers to determine what issuers it can +// issue CRLs from. lifetime sets the validity period (inclusive) of the +// resulting CRLs. +func NewCRLImpl( + issuers []*issuance.Issuer, + profileConfig issuance.CRLProfileConfig, + maxLogLen int, + logger blog.Logger, + metrics *caMetrics, +) (*crlImpl, error) { + issuerMap := make(map[issuance.NameID]*issuance.Issuer) + for _, issuer := range issuers { + nameID := issuer.NameID() + _, found := issuerMap[nameID] + if found { + return nil, fmt.Errorf("got two issuers with same NameID (%q)", issuer.Name()) + } + issuerMap[nameID] = issuer + } + + profile, err := issuance.NewCRLProfile(profileConfig) + if err != nil { + return nil, fmt.Errorf("loading CRL profile: %w", err) + } + + return &crlImpl{ + issuers: issuerMap, + profile: profile, + maxLogLen: maxLogLen, + log: logger, + metrics: metrics, + }, nil +} + +func (ci *crlImpl) GenerateCRL(stream grpc.BidiStreamingServer[capb.GenerateCRLRequest, capb.GenerateCRLResponse]) error { + var issuer *issuance.Issuer + var req *issuance.CRLRequest + rcs := make([]x509.RevocationListEntry, 0) + + for { + in, err := stream.Recv() + if err != nil { + if err == io.EOF { + break + } + return err + } + + switch payload := in.Payload.(type) { + case *capb.GenerateCRLRequest_Metadata: + if req != nil { + return errors.New("got more than one metadata message") + } + + req, err = ci.metadataToRequest(payload.Metadata) + if err != nil { + return err + } + + var ok bool + issuer, ok = ci.issuers[issuance.NameID(payload.Metadata.IssuerNameID)] + if !ok { + return fmt.Errorf("got unrecognized IssuerNameID: %d", payload.Metadata.IssuerNameID) + } + + case *capb.GenerateCRLRequest_Entry: + rc, err := ci.entryToRevokedCertificate(payload.Entry) + if err != nil { + return err + } + + rcs = append(rcs, *rc) + + default: + return errors.New("got empty or malformed message in input stream") + } + } + + if req == nil { + return errors.New("no crl metadata received") + } + + // Compute a unique ID for this issuer-number-shard combo, to tie together all + // the audit log lines related to its issuance. + logID := blog.LogLineChecksum(fmt.Sprintf("%d", issuer.NameID()) + req.Number.String() + fmt.Sprintf("%d", req.Shard)) + ci.log.AuditInfo("Signing CRL", map[string]any{ + "logID": logID, + "issuer": issuer.Cert.Subject.CommonName, + "number": req.Number.String(), + "shard": req.Shard, + "thisUpdate": req.ThisUpdate.Format(time.RFC3339), + "numEntries": len(rcs), + }) + + if len(rcs) > 0 { + builder := strings.Builder{} + for i := range len(rcs) { + fmt.Fprintf(&builder, "\"%x:%d\",", rcs[i].SerialNumber.Bytes(), rcs[i].ReasonCode) + + if builder.Len() >= ci.maxLogLen { + ci.log.AuditInfo("Signing CRL entries", map[string]string{ + "logID": logID, + "entries": fmt.Sprintf("[%s]", strings.TrimSuffix(builder.String(), ",")), + }) + builder = strings.Builder{} + } + } + ci.log.AuditInfo("Signing CRL entries", map[string]any{ + "logID": logID, + "entries": fmt.Sprintf("[%s]", strings.TrimSuffix(builder.String(), ",")), + }) + } + + req.Entries = rcs + + crlBytes, err := issuer.IssueCRL(ci.profile, req) + if err != nil { + ci.metrics.noteSignError(err) + return fmt.Errorf("signing crl: %w", err) + } + ci.metrics.signatureCount.With(prometheus.Labels{"purpose": "crl", "issuer": issuer.Name()}).Inc() + + hash := sha256.Sum256(crlBytes) + ci.log.AuditInfo("Signing CRL success", map[string]any{ + "logID": logID, + "size": len(crlBytes), + "hash": fmt.Sprintf("%x", hash), + }) + + for i := 0; i < len(crlBytes); i += 1000 { + j := min(i+1000, len(crlBytes)) + err = stream.Send(&capb.GenerateCRLResponse{ + Chunk: crlBytes[i:j], + }) + if err != nil { + return err + } + if i%1000 == 0 { + ci.log.Debugf("Wrote %d bytes to output stream", i*1000) + } + } + + return nil +} + +func (ci *crlImpl) metadataToRequest(meta *capb.CRLMetadata) (*issuance.CRLRequest, error) { + if core.IsAnyNilOrZero(meta.IssuerNameID, meta.ThisUpdate, meta.ShardIdx) { + return nil, errors.New("got incomplete metadata message") + } + thisUpdate := meta.ThisUpdate.AsTime() + number := bcrl.Number(thisUpdate) + + return &issuance.CRLRequest{ + Number: number, + Shard: meta.ShardIdx, + ThisUpdate: thisUpdate, + }, nil +} + +func (ci *crlImpl) entryToRevokedCertificate(entry *corepb.CRLEntry) (*x509.RevocationListEntry, error) { + serial, err := core.StringToSerial(entry.Serial) + if err != nil { + return nil, err + } + + if core.IsAnyNilOrZero(entry.RevokedAt) { + return nil, errors.New("got empty or zero revocation timestamp") + } + revokedAt := entry.RevokedAt.AsTime() + + return &x509.RevocationListEntry{ + SerialNumber: serial, + RevocationTime: revokedAt, + ReasonCode: int(entry.Reason), + }, nil +} diff --git a/ca/crl_test.go b/ca/crl_test.go new file mode 100644 index 00000000000..b6c78ebfe29 --- /dev/null +++ b/ca/crl_test.go @@ -0,0 +1,284 @@ +package ca + +import ( + "crypto/x509" + "fmt" + "io" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/timestamppb" + + capb "github.com/letsencrypt/boulder/ca/proto" + "github.com/letsencrypt/boulder/config" + corepb "github.com/letsencrypt/boulder/core/proto" + "github.com/letsencrypt/boulder/issuance" + "github.com/letsencrypt/boulder/test" +) + +type mockGenerateCRLBidiStream struct { + grpc.ServerStream + input <-chan *capb.GenerateCRLRequest + output chan<- *capb.GenerateCRLResponse +} + +func (s mockGenerateCRLBidiStream) Recv() (*capb.GenerateCRLRequest, error) { + next, ok := <-s.input + if !ok { + return nil, io.EOF + } + return next, nil +} + +func (s mockGenerateCRLBidiStream) Send(entry *capb.GenerateCRLResponse) error { + s.output <- entry + return nil +} + +func TestGenerateCRL(t *testing.T) { + t.Parallel() + cargs := newCAArgs(t) + crli, err := NewCRLImpl( + cargs.issuers, + issuance.CRLProfileConfig{ + ValidityInterval: config.Duration{Duration: 216 * time.Hour}, + MaxBackdate: config.Duration{Duration: time.Hour}, + }, + 100, + cargs.logger, + cargs.metrics, + ) + test.AssertNotError(t, err, "Failed to create crl impl") + errs := make(chan error, 1) + + // Test that we get an error when no metadata is sent. + ins := make(chan *capb.GenerateCRLRequest) + go func() { + errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: nil}) + }() + close(ins) + err = <-errs + test.AssertError(t, err, "can't generate CRL with no metadata") + test.AssertContains(t, err.Error(), "no crl metadata received") + + // Test that we get an error when incomplete metadata is sent. + ins = make(chan *capb.GenerateCRLRequest) + go func() { + errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: nil}) + }() + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Metadata{ + Metadata: &capb.CRLMetadata{}, + }, + } + close(ins) + err = <-errs + test.AssertError(t, err, "can't generate CRL with incomplete metadata") + test.AssertContains(t, err.Error(), "got incomplete metadata message") + + // Test that we get an error when unrecognized metadata is sent. + ins = make(chan *capb.GenerateCRLRequest) + go func() { + errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: nil}) + }() + now := cargs.clk.Now() + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Metadata{ + Metadata: &capb.CRLMetadata{ + IssuerNameID: 1, + ThisUpdate: timestamppb.New(now), + ShardIdx: 1, + }, + }, + } + close(ins) + err = <-errs + test.AssertError(t, err, "can't generate CRL with bad metadata") + test.AssertContains(t, err.Error(), "got unrecognized IssuerNameID") + + // Test that we get an error when two metadata are sent. + ins = make(chan *capb.GenerateCRLRequest) + go func() { + errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: nil}) + }() + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Metadata{ + Metadata: &capb.CRLMetadata{ + IssuerNameID: int64(cargs.issuers[0].NameID()), + ThisUpdate: timestamppb.New(now), + ShardIdx: 1, + }, + }, + } + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Metadata{ + Metadata: &capb.CRLMetadata{ + IssuerNameID: int64(cargs.issuers[0].NameID()), + ThisUpdate: timestamppb.New(now), + ShardIdx: 1, + }, + }, + } + close(ins) + err = <-errs + fmt.Println("done waiting for error") + test.AssertError(t, err, "can't generate CRL with duplicate metadata") + test.AssertContains(t, err.Error(), "got more than one metadata message") + + // Test that we get an error when an entry has a bad serial. + ins = make(chan *capb.GenerateCRLRequest) + go func() { + errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: nil}) + }() + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Entry{ + Entry: &corepb.CRLEntry{ + Serial: "123", + Reason: 1, + RevokedAt: timestamppb.New(now), + }, + }, + } + close(ins) + err = <-errs + test.AssertError(t, err, "can't generate CRL with bad serials") + test.AssertContains(t, err.Error(), "invalid serial number") + + // Test that we get an error when an entry has a bad revocation time. + ins = make(chan *capb.GenerateCRLRequest) + go func() { + errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: nil}) + }() + + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Entry{ + Entry: &corepb.CRLEntry{ + Serial: "deadbeefdeadbeefdeadbeefdeadbeefdead", + Reason: 1, + RevokedAt: nil, + }, + }, + } + close(ins) + err = <-errs + test.AssertError(t, err, "can't generate CRL with bad serials") + test.AssertContains(t, err.Error(), "got empty or zero revocation timestamp") + + // Test that generating an empty CRL works. + ins = make(chan *capb.GenerateCRLRequest) + outs := make(chan *capb.GenerateCRLResponse) + go func() { + errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: outs}) + close(outs) + }() + crlBytes := make([]byte, 0) + done := make(chan struct{}) + go func() { + for resp := range outs { + crlBytes = append(crlBytes, resp.Chunk...) + } + close(done) + }() + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Metadata{ + Metadata: &capb.CRLMetadata{ + IssuerNameID: int64(cargs.issuers[0].NameID()), + ThisUpdate: timestamppb.New(now), + ShardIdx: 1, + }, + }, + } + close(ins) + err = <-errs + <-done + test.AssertNotError(t, err, "generating empty CRL should work") + test.Assert(t, len(crlBytes) > 0, "should have gotten some CRL bytes") + crl, err := x509.ParseRevocationList(crlBytes) + test.AssertNotError(t, err, "should be able to parse empty CRL") + test.AssertEquals(t, len(crl.RevokedCertificateEntries), 0) + err = crl.CheckSignatureFrom(cargs.issuers[0].Cert.Certificate) + test.AssertEquals(t, crl.ThisUpdate, now) + test.AssertEquals(t, crl.ThisUpdate, timestamppb.New(now).AsTime()) + test.AssertNotError(t, err, "CRL signature should validate") + + // Test that generating a CRL with some entries works. + ins = make(chan *capb.GenerateCRLRequest) + outs = make(chan *capb.GenerateCRLResponse) + go func() { + errs <- crli.GenerateCRL(mockGenerateCRLBidiStream{input: ins, output: outs}) + close(outs) + }() + crlBytes = make([]byte, 0) + done = make(chan struct{}) + go func() { + for resp := range outs { + crlBytes = append(crlBytes, resp.Chunk...) + } + close(done) + }() + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Metadata{ + Metadata: &capb.CRLMetadata{ + IssuerNameID: int64(cargs.issuers[0].NameID()), + ThisUpdate: timestamppb.New(now), + ShardIdx: 1, + }, + }, + } + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Entry{ + Entry: &corepb.CRLEntry{ + Serial: "000000000000000000000000000000000000", + RevokedAt: timestamppb.New(now), + // Reason 0, Unspecified, is omitted. + }, + }, + } + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Entry{ + Entry: &corepb.CRLEntry{ + Serial: "111111111111111111111111111111111111", + Reason: 1, // keyCompromise + RevokedAt: timestamppb.New(now), + }, + }, + } + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Entry{ + Entry: &corepb.CRLEntry{ + Serial: "444444444444444444444444444444444444", + Reason: 4, // superseded + RevokedAt: timestamppb.New(now), + }, + }, + } + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Entry{ + Entry: &corepb.CRLEntry{ + Serial: "555555555555555555555555555555555555", + Reason: 5, // cessationOfOperation + RevokedAt: timestamppb.New(now), + }, + }, + } + ins <- &capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Entry{ + Entry: &corepb.CRLEntry{ + Serial: "999999999999999999999999999999999999", + Reason: 9, // privilegeWithdrawn + RevokedAt: timestamppb.New(now), + }, + }, + } + close(ins) + err = <-errs + <-done + test.AssertNotError(t, err, "generating empty CRL should work") + test.Assert(t, len(crlBytes) > 0, "should have gotten some CRL bytes") + crl, err = x509.ParseRevocationList(crlBytes) + test.AssertNotError(t, err, "should be able to parse empty CRL") + test.AssertEquals(t, len(crl.RevokedCertificateEntries), 5) + err = crl.CheckSignatureFrom(cargs.issuers[0].Cert.Certificate) + test.AssertNotError(t, err, "CRL signature should validate") +} diff --git a/ca/ecdsa_allow_list.go b/ca/ecdsa_allow_list.go deleted file mode 100644 index 53228fa2845..00000000000 --- a/ca/ecdsa_allow_list.go +++ /dev/null @@ -1,101 +0,0 @@ -package ca - -import ( - "sync" - - "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/reloader" - "github.com/prometheus/client_golang/prometheus" - "gopkg.in/yaml.v2" -) - -// ECDSAAllowList acts as a container for a map of Registration IDs, a -// mutex, and a file reloader. This allows the map of IDs to be updated -// safely if changes to the allow list are detected. -type ECDSAAllowList struct { - sync.RWMutex - regIDsMap map[int64]bool - reloader *reloader.Reloader - logger log.Logger - statusGauge *prometheus.GaugeVec -} - -// Update is an exported method (typically specified as a callback to a -// file reloader) that replaces the inner `regIDsMap` with the contents -// of a YAML list (as bytes). -func (e *ECDSAAllowList) Update(contents []byte) error { - var regIDs []int64 - err := yaml.Unmarshal(contents, ®IDs) - if err != nil { - return err - } - e.Lock() - defer e.Unlock() - e.regIDsMap = makeRegIDsMap(regIDs) - // nil check for testing purposes - if e.statusGauge != nil { - e.statusGauge.WithLabelValues("succeeded").Set(float64(len(e.regIDsMap))) - } - return nil -} - -// UpdateCallbackErr is an exported method (typically specified as a -// callback to a file reloader) that records failed allow list file -// reload attempts. -func (e *ECDSAAllowList) UpdateCallbackErr(err error) { - e.logger.Errf("error reloading ECDSA allowed list: %s", err) - e.RLock() - defer e.RUnlock() - // nil check for testing purposes - if e.statusGauge != nil { - e.statusGauge.WithLabelValues("failed").Set(float64(len(e.regIDsMap))) - } -} - -// permitted checks if ECDSA issuance is permitted for the specified -// Registration ID. -func (e *ECDSAAllowList) permitted(regID int64) bool { - e.RLock() - defer e.RUnlock() - return e.regIDsMap[regID] -} - -// length returns the number of entries currently in the allow list. -func (e *ECDSAAllowList) length() int { - e.RLock() - defer e.RUnlock() - return len(e.regIDsMap) -} - -// Stop stops an active allow list reloader. Typically called during -// boulder-ca shutdown. -func (e *ECDSAAllowList) Stop() { - e.Lock() - defer e.Unlock() - if e.reloader != nil { - e.reloader.Stop() - } -} - -func makeRegIDsMap(regIDs []int64) map[int64]bool { - regIDsMap := make(map[int64]bool) - for _, regID := range regIDs { - regIDsMap[regID] = true - } - return regIDsMap -} - -// NewECDSAAllowListFromFile is exported to allow `boulder-ca` to -// construct a new `ECDSAAllowList` object. An initial entry count is -// returned to `boulder-ca` for logging purposes. -func NewECDSAAllowListFromFile(filename string, logger log.Logger, metric *prometheus.GaugeVec) (*ECDSAAllowList, int, error) { - allowList := &ECDSAAllowList{logger: logger, statusGauge: metric} - // Create an allow list reloader. This also populates the inner - // allowList regIDsMap. - reloader, err := reloader.New(filename, allowList.Update, allowList.UpdateCallbackErr) - if err != nil { - return nil, 0, err - } - allowList.reloader = reloader - return allowList, allowList.length(), nil -} diff --git a/ca/ecdsa_allow_list_test.go b/ca/ecdsa_allow_list_test.go deleted file mode 100644 index b41c8c20957..00000000000 --- a/ca/ecdsa_allow_list_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package ca - -import ( - "testing" - - "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/reloader" - "github.com/prometheus/client_golang/prometheus" -) - -func TestNewECDSAAllowListFromFile(t *testing.T) { - type args struct { - filename string - reloader *reloader.Reloader - logger log.Logger - metric *prometheus.GaugeVec - } - tests := []struct { - name string - args args - want1337Permitted bool - wantEntries int - wantErrBool bool - }{ - { - name: "one entry", - args: args{"testdata/ecdsa_allow_list.yml", nil, nil, nil}, - want1337Permitted: true, - wantEntries: 1, - wantErrBool: false, - }, - { - name: "one entry but it's not 1337", - args: args{"testdata/ecdsa_allow_list2.yml", nil, nil, nil}, - want1337Permitted: false, - wantEntries: 1, - wantErrBool: false, - }, - { - name: "should error due to no file", - args: args{"testdata/ecdsa_allow_list_no_exist.yml", nil, nil, nil}, - want1337Permitted: false, - wantEntries: 0, - wantErrBool: true, - }, - { - name: "should error due to malformed YAML", - args: args{"testdata/ecdsa_allow_list_malformed.yml", nil, nil, nil}, - want1337Permitted: false, - wantEntries: 0, - wantErrBool: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, got1, err := NewECDSAAllowListFromFile(tt.args.filename, tt.args.logger, tt.args.metric) - - if (err != nil) != tt.wantErrBool { - t.Errorf("NewECDSAAllowListFromFile() error = %v, wantErr %v", err, tt.wantErrBool) - t.Error(got, got1, err) - return - } - if got != nil && got.permitted(1337) != tt.want1337Permitted { - t.Errorf("NewECDSAAllowListFromFile() got = %v, want %v", got, tt.want1337Permitted) - } - if got1 != tt.wantEntries { - t.Errorf("NewECDSAAllowListFromFile() got1 = %v, want %v", got1, tt.wantEntries) - } - }) - } -} diff --git a/ca/ocsp.go b/ca/ocsp.go deleted file mode 100644 index 6522d68d7e5..00000000000 --- a/ca/ocsp.go +++ /dev/null @@ -1,269 +0,0 @@ -package ca - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - "time" - - "github.com/jmhodges/clock" - "github.com/miekg/pkcs11" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" - - capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/core" - berrors "github.com/letsencrypt/boulder/errors" - "github.com/letsencrypt/boulder/issuance" - blog "github.com/letsencrypt/boulder/log" -) - -// TODO(#5152): Simplify this when we've fully deprecated old-style IssuerIDs. -type ocspIssuerMaps struct { - byID map[issuance.IssuerID]*issuance.Issuer - byNameID map[issuance.IssuerNameID]*issuance.Issuer -} - -// ocspImpl provides a backing implementation for the OCSP gRPC service. -type ocspImpl struct { - capb.UnimplementedOCSPGeneratorServer - issuers ocspIssuerMaps - ocspLifetime time.Duration - ocspLogQueue *ocspLogQueue - log blog.Logger - signatureCount *prometheus.CounterVec - signErrorCount *prometheus.CounterVec - clk clock.Clock -} - -// makeOCSPIssuerMaps processes a list of issuers into a set of maps, mapping -// nearly-unique identifiers of those issuers to the issuers themselves. Note -// that, if two issuers have the same nearly-unique ID, the *latter* one in -// the input list "wins". -func makeOCSPIssuerMaps(issuers []*issuance.Issuer) (ocspIssuerMaps, error) { - issuersByID := make(map[issuance.IssuerID]*issuance.Issuer, len(issuers)) - issuersByNameID := make(map[issuance.IssuerNameID]*issuance.Issuer, len(issuers)) - for _, issuer := range issuers { - issuersByID[issuer.ID()] = issuer - issuersByNameID[issuer.Cert.NameID()] = issuer - } - return ocspIssuerMaps{issuersByID, issuersByNameID}, nil -} - -func NewOCSPImpl( - issuers []*issuance.Issuer, - ocspLifetime time.Duration, - ocspLogMaxLength int, - ocspLogPeriod time.Duration, - logger blog.Logger, - stats prometheus.Registerer, - signatureCount *prometheus.CounterVec, - signErrorCount *prometheus.CounterVec, - clk clock.Clock, -) (*ocspImpl, error) { - issuersByID := make(map[issuance.IssuerID]*issuance.Issuer, len(issuers)) - for _, issuer := range issuers { - issuersByID[issuer.ID()] = issuer - } - - var ocspLogQueue *ocspLogQueue - if ocspLogMaxLength > 0 { - ocspLogQueue = newOCSPLogQueue(ocspLogMaxLength, ocspLogPeriod, stats, logger) - } - - issuerMaps, err := makeOCSPIssuerMaps(issuers) - if err != nil { - return nil, err - } - - oi := &ocspImpl{ - issuers: issuerMaps, - ocspLifetime: ocspLifetime, - ocspLogQueue: ocspLogQueue, - log: logger, - signatureCount: signatureCount, - signErrorCount: signErrorCount, - clk: clk, - } - return oi, nil -} - -// LogOCSPLoop collects OCSP generation log events into bundles, and logs -// them periodically. -func (oi *ocspImpl) LogOCSPLoop() { - if oi.ocspLogQueue != nil { - oi.ocspLogQueue.loop() - } -} - -// Stop asks this ocspImpl to shut down. It must be called after the -// corresponding RPC service is shut down and there are no longer any inflight -// RPCs. It will attempt to drain any logging queues (which may block), and will -// return only when done. -func (oi *ocspImpl) Stop() { - if oi.ocspLogQueue != nil { - oi.ocspLogQueue.stop() - } -} - -// GenerateOCSP produces a new OCSP response and returns it -func (oi *ocspImpl) GenerateOCSP(ctx context.Context, req *capb.GenerateOCSPRequest) (*capb.OCSPResponse, error) { - // req.Status, req.Reason, and req.RevokedAt are often 0, for non-revoked certs. - if core.IsAnyNilOrZero(req, req.Serial, req.IssuerID) { - return nil, berrors.InternalServerError("Incomplete generate OCSP request") - } - - serialInt, err := core.StringToSerial(req.Serial) - if err != nil { - return nil, err - } - serial := serialInt - - issuer, ok := oi.issuers.byNameID[issuance.IssuerNameID(req.IssuerID)] - if !ok { - // TODO(#5152): Remove this fallback to old-style IssuerIDs. - issuer, ok = oi.issuers.byID[issuance.IssuerID(req.IssuerID)] - if !ok { - return nil, fmt.Errorf("This CA doesn't have an issuer cert with ID %d", req.IssuerID) - } - } - - now := oi.clk.Now().Truncate(time.Hour) - tbsResponse := ocsp.Response{ - Status: ocspStatusToCode[req.Status], - SerialNumber: serial, - ThisUpdate: now, - NextUpdate: now.Add(oi.ocspLifetime - time.Second), - } - if tbsResponse.Status == ocsp.Revoked { - tbsResponse.RevokedAt = time.Unix(0, req.RevokedAt) - tbsResponse.RevocationReason = int(req.Reason) - } - - if oi.ocspLogQueue != nil { - oi.ocspLogQueue.enqueue(serial.Bytes(), now, ocsp.ResponseStatus(tbsResponse.Status)) - } - - ocspResponse, err := ocsp.CreateResponse(issuer.Cert.Certificate, issuer.Cert.Certificate, tbsResponse, issuer.Signer) - if err == nil { - oi.signatureCount.With(prometheus.Labels{"purpose": "ocsp", "issuer": issuer.Name()}).Inc() - } else { - var pkcs11Error *pkcs11.Error - if errors.As(err, &pkcs11Error) { - oi.signErrorCount.WithLabelValues("HSM").Inc() - } - } - return &capb.OCSPResponse{Response: ocspResponse}, err -} - -// ocspLogQueue accumulates OCSP logging events and writes several of them -// in a single log line. This reduces the number of log lines and bytes, -// which would otherwise be quite high. As of Jan 2021 we do approximately -// 550 rps of OCSP generation events. We can turn that into about 5.5 rps -// of log lines if we accumulate 100 entries per line, which amounts to about -// 3900 bytes per log line. -// Summary of log line usage: -// serial in hex: 36 bytes, separator characters: 2 bytes, status: 1 byte -// If maxLogLen is less than the length of a single log item, generate -// one log line for every item. -type ocspLogQueue struct { - // Maximum length, in bytes, of a single log line. - maxLogLen int - // Maximum amount of time between OCSP logging events. - period time.Duration - queue chan ocspLog - // This allows the stop() function to block until we've drained the queue. - wg sync.WaitGroup - depth prometheus.Gauge - logger blog.Logger - clk clock.Clock -} - -type ocspLog struct { - serial []byte - time time.Time - status ocsp.ResponseStatus -} - -func newOCSPLogQueue( - maxLogLen int, - period time.Duration, - stats prometheus.Registerer, - logger blog.Logger, -) *ocspLogQueue { - depth := prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "ocsp_log_queue_depth", - Help: "Number of OCSP generation log entries waiting to be written", - }) - stats.MustRegister(depth) - olq := ocspLogQueue{ - maxLogLen: maxLogLen, - period: period, - queue: make(chan ocspLog), - wg: sync.WaitGroup{}, - depth: depth, - logger: logger, - clk: clock.New(), - } - olq.wg.Add(1) - return &olq -} - -func (olq *ocspLogQueue) enqueue(serial []byte, time time.Time, status ocsp.ResponseStatus) { - olq.queue <- ocspLog{ - serial: append([]byte{}, serial...), - time: time, - status: status, - } -} - -// To ensure we don't go over the max log line length, use a safety margin -// equal to the expected length of an entry. -const ocspSingleLogEntryLen = 39 - -// loop consumes events from the queue channel, batches them up, and -// logs them in batches of maxLogLen / 39, or every `period`, -// whichever comes first. -func (olq *ocspLogQueue) loop() { - defer olq.wg.Done() - done := false - for !done { - var builder strings.Builder - deadline := olq.clk.After(olq.period) - inner: - for { - olq.depth.Set(float64(len(olq.queue))) - select { - case ol, ok := <-olq.queue: - if !ok { - // Channel was closed, finish. - done = true - break inner - } - fmt.Fprintf(&builder, "%x:%d,", ol.serial, ol.status) - case <-deadline: - break inner - } - if builder.Len()+ocspSingleLogEntryLen >= olq.maxLogLen { - break - } - } - if builder.Len() > 0 { - olq.logger.AuditInfof("OCSP signed: %s", builder.String()) - } - } -} - -// stop the loop, and wait for it to finish. This must be called only after -// it's guaranteed that nothing will call enqueue again (for instance, after -// the OCSPGenerator and CertificateAuthority services are shut down with -// no RPCs in flight). Otherwise, enqueue will panic. -// If this is called without previously starting a goroutine running `.loop()`, -// it will block forever. -func (olq *ocspLogQueue) stop() { - close(olq.queue) - olq.wg.Wait() -} diff --git a/ca/ocsp_test.go b/ca/ocsp_test.go deleted file mode 100644 index 921c70a4e41..00000000000 --- a/ca/ocsp_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package ca - -import ( - "context" - "crypto/x509" - "encoding/hex" - "testing" - "time" - - capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/core" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/test" - "golang.org/x/crypto/ocsp" -) - -func serial(t *testing.T) []byte { - serial, err := hex.DecodeString("aabbccddeeffaabbccddeeff000102030405") - if err != nil { - t.Fatal(err) - } - return serial - -} - -func TestOCSP(t *testing.T) { - testCtx := setup(t) - ca, err := NewCertificateAuthorityImpl( - &mockSA{}, - testCtx.pa, - testCtx.ocsp, - testCtx.boulderIssuers, - nil, - testCtx.certExpiry, - testCtx.certBackdate, - testCtx.serialPrefix, - testCtx.maxNames, - testCtx.keyPolicy, - nil, - testCtx.logger, - testCtx.stats, - testCtx.signatureCount, - testCtx.signErrorCount, - testCtx.fc) - test.AssertNotError(t, err, "Failed to create CA") - ocspi := testCtx.ocsp - - // Issue a certificate from the RSA issuer caCert, then check OCSP comes from the same issuer. - rsaIssuerID := ca.issuers.byAlg[x509.RSA].ID() - rsaCertPB, err := ca.IssuePrecertificate(ctx, &capb.IssueCertificateRequest{Csr: CNandSANCSR, RegistrationID: arbitraryRegID}) - test.AssertNotError(t, err, "Failed to issue certificate") - rsaCert, err := x509.ParseCertificate(rsaCertPB.DER) - test.AssertNotError(t, err, "Failed to parse rsaCert") - rsaOCSPPB, err := ocspi.GenerateOCSP(ctx, &capb.GenerateOCSPRequest{ - Serial: core.SerialToString(rsaCert.SerialNumber), - IssuerID: int64(rsaIssuerID), - Status: string(core.OCSPStatusGood), - }) - test.AssertNotError(t, err, "Failed to generate OCSP") - rsaOCSP, err := ocsp.ParseResponse(rsaOCSPPB.Response, caCert.Certificate) - test.AssertNotError(t, err, "Failed to parse / validate OCSP for rsaCert") - test.AssertEquals(t, rsaOCSP.Status, 0) - test.AssertEquals(t, rsaOCSP.RevocationReason, 0) - test.AssertEquals(t, rsaOCSP.SerialNumber.Cmp(rsaCert.SerialNumber), 0) - - // Issue a certificate from the ECDSA issuer caCert2, then check OCSP comes from the same issuer. - ecdsaIssuerID := ca.issuers.byAlg[x509.ECDSA].ID() - ecdsaCertPB, err := ca.IssuePrecertificate(ctx, &capb.IssueCertificateRequest{Csr: ECDSACSR, RegistrationID: arbitraryRegID}) - test.AssertNotError(t, err, "Failed to issue certificate") - ecdsaCert, err := x509.ParseCertificate(ecdsaCertPB.DER) - test.AssertNotError(t, err, "Failed to parse ecdsaCert") - ecdsaOCSPPB, err := ocspi.GenerateOCSP(ctx, &capb.GenerateOCSPRequest{ - Serial: core.SerialToString(ecdsaCert.SerialNumber), - IssuerID: int64(ecdsaIssuerID), - Status: string(core.OCSPStatusGood), - }) - test.AssertNotError(t, err, "Failed to generate OCSP") - ecdsaOCSP, err := ocsp.ParseResponse(ecdsaOCSPPB.Response, caCert2.Certificate) - test.AssertNotError(t, err, "Failed to parse / validate OCSP for ecdsaCert") - test.AssertEquals(t, ecdsaOCSP.Status, 0) - test.AssertEquals(t, ecdsaOCSP.RevocationReason, 0) - test.AssertEquals(t, ecdsaOCSP.SerialNumber.Cmp(ecdsaCert.SerialNumber), 0) - - // GenerateOCSP with a bad IssuerID should fail. - _, err = ocspi.GenerateOCSP(context.Background(), &capb.GenerateOCSPRequest{ - Serial: core.SerialToString(rsaCert.SerialNumber), - IssuerID: int64(666), - Status: string(core.OCSPStatusGood), - }) - test.AssertError(t, err, "GenerateOCSP didn't fail with invalid IssuerID") - - // GenerateOCSP with a bad Serial should fail. - _, err = ocspi.GenerateOCSP(context.Background(), &capb.GenerateOCSPRequest{ - Serial: "BADDECAF", - IssuerID: int64(rsaIssuerID), - Status: string(core.OCSPStatusGood), - }) - test.AssertError(t, err, "GenerateOCSP didn't fail with invalid Serial") - - // GenerateOCSP with a valid-but-nonexistent Serial should *not* fail. - _, err = ocspi.GenerateOCSP(context.Background(), &capb.GenerateOCSPRequest{ - Serial: "03DEADBEEFBADDECAFFADEFACECAFE30", - IssuerID: int64(rsaIssuerID), - Status: string(core.OCSPStatusGood), - }) - test.AssertNotError(t, err, "GenerateOCSP failed with fake-but-valid Serial") -} - -// Set up an ocspLogQueue with a very long period and a large maxLen, -// to ensure any buffered entries get flushed on `.stop()`. -func TestOcspLogFlushOnExit(t *testing.T) { - t.Parallel() - log := blog.NewMock() - stats := metrics.NoopRegisterer - queue := newOCSPLogQueue(4000, 10000*time.Millisecond, stats, log) - go queue.loop() - queue.enqueue(serial(t), time.Now(), ocsp.ResponseStatus(ocsp.Good)) - queue.stop() - - expected := []string{ - "INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:0,", - } - test.AssertDeepEquals(t, log.GetAll(), expected) -} - -// Ensure log lines are sent when they exceed maxLen. -func TestOcspFlushOnLength(t *testing.T) { - t.Parallel() - log := blog.NewMock() - stats := metrics.NoopRegisterer - queue := newOCSPLogQueue(100, 100*time.Millisecond, stats, log) - go queue.loop() - for i := 0; i < 5; i++ { - queue.enqueue(serial(t), time.Now(), ocsp.ResponseStatus(ocsp.Good)) - } - queue.stop() - - expected := []string{ - "INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:0,aabbccddeeffaabbccddeeff000102030405:0,", - "INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:0,aabbccddeeffaabbccddeeff000102030405:0,", - "INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:0,", - } - test.AssertDeepEquals(t, log.GetAll(), expected) -} - -// Ensure log lines are sent after a timeout. -func TestOcspFlushOnTimeout(t *testing.T) { - t.Parallel() - log := blog.NewWaitingMock() - stats := metrics.NoopRegisterer - queue := newOCSPLogQueue(90000, 10*time.Millisecond, stats, log) - - go queue.loop() - queue.enqueue(serial(t), time.Now(), ocsp.ResponseStatus(ocsp.Good)) - - expected := "INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:0," - logLines, err := log.WaitForMatch("OCSP signed", 50*time.Millisecond) - test.AssertNotError(t, err, "error in mock log") - test.AssertDeepEquals(t, logLines, expected) - queue.stop() -} - -// If the deadline passes and nothing has been logged, we should not log a blank line. -func TestOcspNoEmptyLines(t *testing.T) { - t.Parallel() - log := blog.NewMock() - stats := metrics.NoopRegisterer - queue := newOCSPLogQueue(90000, 10*time.Millisecond, stats, log) - - go queue.loop() - time.Sleep(50 * time.Millisecond) - queue.stop() - - test.AssertDeepEquals(t, log.GetAll(), []string{}) -} - -// If the maxLogLen is shorter than one entry, log everything immediately. -func TestOcspLogWhenMaxLogLenIsShort(t *testing.T) { - t.Parallel() - log := blog.NewMock() - stats := metrics.NoopRegisterer - queue := newOCSPLogQueue(3, 10000*time.Millisecond, stats, log) - go queue.loop() - queue.enqueue(serial(t), time.Now(), ocsp.ResponseStatus(ocsp.Good)) - queue.stop() - - expected := []string{ - "INFO: [AUDIT] OCSP signed: aabbccddeeffaabbccddeeff000102030405:0,", - } - test.AssertDeepEquals(t, log.GetAll(), expected) -} - -// Enqueueing entries after stop causes panic. -func TestOcspLogPanicsOnEnqueueAfterStop(t *testing.T) { - t.Parallel() - - log := blog.NewMock() - stats := metrics.NoopRegisterer - queue := newOCSPLogQueue(4000, 10000*time.Millisecond, stats, log) - go queue.loop() - queue.stop() - - defer func() { - if r := recover(); r == nil { - t.Errorf("The code did not panic") - } - }() - - queue.enqueue(serial(t), time.Now(), ocsp.ResponseStatus(ocsp.Good)) -} diff --git a/ca/proto/ca.pb.go b/ca/proto/ca.pb.go index c4dd7edba15..c48af27a3c4 100644 --- a/ca/proto/ca.pb.go +++ b/ca/proto/ca.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.6 +// protoc-gen-go v1.36.5 +// protoc v3.20.1 // source: ca.proto package proto @@ -10,8 +10,10 @@ import ( proto "github.com/letsencrypt/boulder/core/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -22,23 +24,25 @@ const ( ) type IssueCertificateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 6 Csr []byte `protobuf:"bytes,1,opt,name=csr,proto3" json:"csr,omitempty"` RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"` OrderID int64 `protobuf:"varint,3,opt,name=orderID,proto3" json:"orderID,omitempty"` - IssuerNameID int64 `protobuf:"varint,4,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"` + // certProfileName is a human readable name provided by the RA and used to + // determine if the CA can issue for that profile. A default name will be + // assigned inside the CA during *Profile construction if no name is provided. + // The value of this field should not be relied upon inside the RA. + CertProfileName string `protobuf:"bytes,5,opt,name=certProfileName,proto3" json:"certProfileName,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *IssueCertificateRequest) Reset() { *x = IssueCertificateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ca_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_ca_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IssueCertificateRequest) String() string { @@ -49,7 +53,7 @@ func (*IssueCertificateRequest) ProtoMessage() {} func (x *IssueCertificateRequest) ProtoReflect() protoreflect.Message { mi := &file_ca_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -85,39 +89,36 @@ func (x *IssueCertificateRequest) GetOrderID() int64 { return 0 } -func (x *IssueCertificateRequest) GetIssuerNameID() int64 { +func (x *IssueCertificateRequest) GetCertProfileName() string { if x != nil { - return x.IssuerNameID + return x.CertProfileName } - return 0 + return "" } -type IssuePrecertificateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type IssueCertificateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + DER []byte `protobuf:"bytes,1,opt,name=DER,proto3" json:"DER,omitempty"` unknownFields protoimpl.UnknownFields - - DER []byte `protobuf:"bytes,1,opt,name=DER,proto3" json:"DER,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *IssuePrecertificateResponse) Reset() { - *x = IssuePrecertificateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_ca_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *IssueCertificateResponse) Reset() { + *x = IssueCertificateResponse{} + mi := &file_ca_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *IssuePrecertificateResponse) String() string { +func (x *IssueCertificateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*IssuePrecertificateResponse) ProtoMessage() {} +func (*IssueCertificateResponse) ProtoMessage() {} -func (x *IssuePrecertificateResponse) ProtoReflect() protoreflect.Message { +func (x *IssueCertificateResponse) ProtoReflect() protoreflect.Message { mi := &file_ca_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -127,47 +128,45 @@ func (x *IssuePrecertificateResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use IssuePrecertificateResponse.ProtoReflect.Descriptor instead. -func (*IssuePrecertificateResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use IssueCertificateResponse.ProtoReflect.Descriptor instead. +func (*IssueCertificateResponse) Descriptor() ([]byte, []int) { return file_ca_proto_rawDescGZIP(), []int{1} } -func (x *IssuePrecertificateResponse) GetDER() []byte { +func (x *IssueCertificateResponse) GetDER() []byte { if x != nil { return x.DER } return nil } -type IssueCertificateForPrecertificateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type GenerateCRLRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Payload: + // + // *GenerateCRLRequest_Metadata + // *GenerateCRLRequest_Entry + Payload isGenerateCRLRequest_Payload `protobuf_oneof:"payload"` unknownFields protoimpl.UnknownFields - - DER []byte `protobuf:"bytes,1,opt,name=DER,proto3" json:"DER,omitempty"` - SCTs [][]byte `protobuf:"bytes,2,rep,name=SCTs,proto3" json:"SCTs,omitempty"` - RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - OrderID int64 `protobuf:"varint,4,opt,name=orderID,proto3" json:"orderID,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *IssueCertificateForPrecertificateRequest) Reset() { - *x = IssueCertificateForPrecertificateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ca_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GenerateCRLRequest) Reset() { + *x = GenerateCRLRequest{} + mi := &file_ca_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *IssueCertificateForPrecertificateRequest) String() string { +func (x *GenerateCRLRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*IssueCertificateForPrecertificateRequest) ProtoMessage() {} +func (*GenerateCRLRequest) ProtoMessage() {} -func (x *IssueCertificateForPrecertificateRequest) ProtoReflect() protoreflect.Message { +func (x *GenerateCRLRequest) ProtoReflect() protoreflect.Message { mi := &file_ca_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -177,70 +176,78 @@ func (x *IssueCertificateForPrecertificateRequest) ProtoReflect() protoreflect.M return mi.MessageOf(x) } -// Deprecated: Use IssueCertificateForPrecertificateRequest.ProtoReflect.Descriptor instead. -func (*IssueCertificateForPrecertificateRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GenerateCRLRequest.ProtoReflect.Descriptor instead. +func (*GenerateCRLRequest) Descriptor() ([]byte, []int) { return file_ca_proto_rawDescGZIP(), []int{2} } -func (x *IssueCertificateForPrecertificateRequest) GetDER() []byte { +func (x *GenerateCRLRequest) GetPayload() isGenerateCRLRequest_Payload { if x != nil { - return x.DER + return x.Payload } return nil } -func (x *IssueCertificateForPrecertificateRequest) GetSCTs() [][]byte { +func (x *GenerateCRLRequest) GetMetadata() *CRLMetadata { if x != nil { - return x.SCTs + if x, ok := x.Payload.(*GenerateCRLRequest_Metadata); ok { + return x.Metadata + } } return nil } -func (x *IssueCertificateForPrecertificateRequest) GetRegistrationID() int64 { +func (x *GenerateCRLRequest) GetEntry() *proto.CRLEntry { if x != nil { - return x.RegistrationID + if x, ok := x.Payload.(*GenerateCRLRequest_Entry); ok { + return x.Entry + } } - return 0 + return nil } -func (x *IssueCertificateForPrecertificateRequest) GetOrderID() int64 { - if x != nil { - return x.OrderID - } - return 0 +type isGenerateCRLRequest_Payload interface { + isGenerateCRLRequest_Payload() } -// Exactly one of certDER or [serial and issuerID] must be set. -type GenerateOCSPRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type GenerateCRLRequest_Metadata struct { + Metadata *CRLMetadata `protobuf:"bytes,1,opt,name=metadata,proto3,oneof"` +} - Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - Reason int32 `protobuf:"varint,3,opt,name=reason,proto3" json:"reason,omitempty"` - RevokedAt int64 `protobuf:"varint,4,opt,name=revokedAt,proto3" json:"revokedAt,omitempty"` - Serial string `protobuf:"bytes,5,opt,name=serial,proto3" json:"serial,omitempty"` - IssuerID int64 `protobuf:"varint,6,opt,name=issuerID,proto3" json:"issuerID,omitempty"` +type GenerateCRLRequest_Entry struct { + Entry *proto.CRLEntry `protobuf:"bytes,2,opt,name=entry,proto3,oneof"` } -func (x *GenerateOCSPRequest) Reset() { - *x = GenerateOCSPRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ca_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (*GenerateCRLRequest_Metadata) isGenerateCRLRequest_Payload() {} + +func (*GenerateCRLRequest_Entry) isGenerateCRLRequest_Payload() {} + +type CRLMetadata struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 5 + IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"` + ThisUpdate *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=thisUpdate,proto3" json:"thisUpdate,omitempty"` + ShardIdx int64 `protobuf:"varint,3,opt,name=shardIdx,proto3" json:"shardIdx,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CRLMetadata) Reset() { + *x = CRLMetadata{} + mi := &file_ca_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GenerateOCSPRequest) String() string { +func (x *CRLMetadata) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GenerateOCSPRequest) ProtoMessage() {} +func (*CRLMetadata) ProtoMessage() {} -func (x *GenerateOCSPRequest) ProtoReflect() protoreflect.Message { +func (x *CRLMetadata) ProtoReflect() protoreflect.Message { mi := &file_ca_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -250,72 +257,55 @@ func (x *GenerateOCSPRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GenerateOCSPRequest.ProtoReflect.Descriptor instead. -func (*GenerateOCSPRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use CRLMetadata.ProtoReflect.Descriptor instead. +func (*CRLMetadata) Descriptor() ([]byte, []int) { return file_ca_proto_rawDescGZIP(), []int{3} } -func (x *GenerateOCSPRequest) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *GenerateOCSPRequest) GetReason() int32 { - if x != nil { - return x.Reason - } - return 0 -} - -func (x *GenerateOCSPRequest) GetRevokedAt() int64 { +func (x *CRLMetadata) GetIssuerNameID() int64 { if x != nil { - return x.RevokedAt + return x.IssuerNameID } return 0 } -func (x *GenerateOCSPRequest) GetSerial() string { +func (x *CRLMetadata) GetThisUpdate() *timestamppb.Timestamp { if x != nil { - return x.Serial + return x.ThisUpdate } - return "" + return nil } -func (x *GenerateOCSPRequest) GetIssuerID() int64 { +func (x *CRLMetadata) GetShardIdx() int64 { if x != nil { - return x.IssuerID + return x.ShardIdx } return 0 } -type OCSPResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type GenerateCRLResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"` unknownFields protoimpl.UnknownFields - - Response []byte `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *OCSPResponse) Reset() { - *x = OCSPResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_ca_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GenerateCRLResponse) Reset() { + *x = GenerateCRLResponse{} + mi := &file_ca_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *OCSPResponse) String() string { +func (x *GenerateCRLResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*OCSPResponse) ProtoMessage() {} +func (*GenerateCRLResponse) ProtoMessage() {} -func (x *OCSPResponse) ProtoReflect() protoreflect.Message { +func (x *GenerateCRLResponse) ProtoReflect() protoreflect.Message { mi := &file_ca_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -325,120 +315,110 @@ func (x *OCSPResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use OCSPResponse.ProtoReflect.Descriptor instead. -func (*OCSPResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GenerateCRLResponse.ProtoReflect.Descriptor instead. +func (*GenerateCRLResponse) Descriptor() ([]byte, []int) { return file_ca_proto_rawDescGZIP(), []int{4} } -func (x *OCSPResponse) GetResponse() []byte { +func (x *GenerateCRLResponse) GetChunk() []byte { if x != nil { - return x.Response + return x.Chunk } return nil } var File_ca_proto protoreflect.FileDescriptor -var file_ca_proto_rawDesc = []byte{ +var file_ca_proto_rawDesc = string([]byte{ 0x0a, 0x08, 0x63, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x63, 0x61, 0x1a, 0x15, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x91, 0x01, 0x0a, 0x17, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x73, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, - 0x63, 0x73, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6f, - 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6f, 0x72, - 0x64, 0x65, 0x72, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, - 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x22, 0x2f, 0x0a, 0x1b, 0x49, 0x73, 0x73, - 0x75, 0x65, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x44, 0x45, 0x52, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x44, 0x45, 0x52, 0x22, 0x92, 0x01, 0x0a, 0x28, 0x49, - 0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x46, - 0x6f, 0x72, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x44, 0x45, 0x52, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x44, 0x45, 0x52, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x43, 0x54, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x04, 0x53, 0x43, 0x54, 0x73, 0x12, 0x26, 0x0a, - 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x22, - 0x97, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x43, 0x53, 0x50, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, - 0x65, 0x64, 0x41, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, - 0x6b, 0x65, 0x64, 0x41, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x1a, 0x0a, - 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x22, 0x2a, 0x0a, 0x0c, 0x4f, 0x43, 0x53, - 0x50, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x92, 0x02, 0x0a, 0x14, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x55, - 0x0a, 0x13, 0x49, 0x73, 0x73, 0x75, 0x65, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x2e, 0x63, 0x61, 0x2e, 0x49, 0x73, 0x73, 0x75, 0x65, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x01, 0x0a, 0x17, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x61, 0x2e, 0x49, 0x73, 0x73, 0x75, 0x65, 0x50, 0x72, 0x65, - 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x21, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x46, 0x6f, 0x72, 0x50, 0x72, 0x65, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x2c, 0x2e, 0x63, 0x61, 0x2e, - 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x46, 0x6f, 0x72, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, - 0x0c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x43, 0x53, 0x50, 0x12, 0x17, 0x2e, - 0x63, 0x61, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x43, 0x53, 0x50, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x63, 0x61, 0x2e, 0x4f, 0x43, 0x53, 0x50, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0x4c, 0x0a, 0x0d, 0x4f, 0x43, - 0x53, 0x50, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x0a, 0x0c, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x43, 0x53, 0x50, 0x12, 0x17, 0x2e, 0x63, 0x61, - 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x43, 0x53, 0x50, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x63, 0x61, 0x2e, 0x4f, 0x43, 0x53, 0x50, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x61, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} + 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x73, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x03, 0x63, 0x73, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, + 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6f, + 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x50, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x63, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x2c, 0x0a, 0x18, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x44, 0x45, 0x52, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x03, 0x44, 0x45, 0x52, 0x22, 0x76, 0x0a, 0x12, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x43, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, + 0x61, 0x2e, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x26, 0x0a, 0x05, 0x65, 0x6e, 0x74, + 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x43, 0x52, 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x8f, 0x01, 0x0a, + 0x0b, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x0c, + 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, + 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x68, 0x69, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0a, 0x74, 0x68, 0x69, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x2b, + 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x32, 0x67, 0x0a, 0x14, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x10, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x2e, 0x63, 0x61, 0x2e, 0x49, 0x73, 0x73, + 0x75, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x61, 0x2e, 0x49, 0x73, 0x73, 0x75, 0x65, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x32, 0x54, 0x0a, 0x0c, 0x43, 0x52, 0x4c, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x12, 0x44, 0x0a, 0x0b, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x43, 0x52, 0x4c, 0x12, 0x16, 0x2e, 0x63, 0x61, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x43, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x61, + 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x61, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) var ( file_ca_proto_rawDescOnce sync.Once - file_ca_proto_rawDescData = file_ca_proto_rawDesc + file_ca_proto_rawDescData []byte ) func file_ca_proto_rawDescGZIP() []byte { file_ca_proto_rawDescOnce.Do(func() { - file_ca_proto_rawDescData = protoimpl.X.CompressGZIP(file_ca_proto_rawDescData) + file_ca_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ca_proto_rawDesc), len(file_ca_proto_rawDesc))) }) return file_ca_proto_rawDescData } var file_ca_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_ca_proto_goTypes = []interface{}{ - (*IssueCertificateRequest)(nil), // 0: ca.IssueCertificateRequest - (*IssuePrecertificateResponse)(nil), // 1: ca.IssuePrecertificateResponse - (*IssueCertificateForPrecertificateRequest)(nil), // 2: ca.IssueCertificateForPrecertificateRequest - (*GenerateOCSPRequest)(nil), // 3: ca.GenerateOCSPRequest - (*OCSPResponse)(nil), // 4: ca.OCSPResponse - (*proto.Certificate)(nil), // 5: core.Certificate +var file_ca_proto_goTypes = []any{ + (*IssueCertificateRequest)(nil), // 0: ca.IssueCertificateRequest + (*IssueCertificateResponse)(nil), // 1: ca.IssueCertificateResponse + (*GenerateCRLRequest)(nil), // 2: ca.GenerateCRLRequest + (*CRLMetadata)(nil), // 3: ca.CRLMetadata + (*GenerateCRLResponse)(nil), // 4: ca.GenerateCRLResponse + (*proto.CRLEntry)(nil), // 5: core.CRLEntry + (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp } var file_ca_proto_depIdxs = []int32{ - 0, // 0: ca.CertificateAuthority.IssuePrecertificate:input_type -> ca.IssueCertificateRequest - 2, // 1: ca.CertificateAuthority.IssueCertificateForPrecertificate:input_type -> ca.IssueCertificateForPrecertificateRequest - 3, // 2: ca.CertificateAuthority.GenerateOCSP:input_type -> ca.GenerateOCSPRequest - 3, // 3: ca.OCSPGenerator.GenerateOCSP:input_type -> ca.GenerateOCSPRequest - 1, // 4: ca.CertificateAuthority.IssuePrecertificate:output_type -> ca.IssuePrecertificateResponse - 5, // 5: ca.CertificateAuthority.IssueCertificateForPrecertificate:output_type -> core.Certificate - 4, // 6: ca.CertificateAuthority.GenerateOCSP:output_type -> ca.OCSPResponse - 4, // 7: ca.OCSPGenerator.GenerateOCSP:output_type -> ca.OCSPResponse - 4, // [4:8] is the sub-list for method output_type - 0, // [0:4] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 3, // 0: ca.GenerateCRLRequest.metadata:type_name -> ca.CRLMetadata + 5, // 1: ca.GenerateCRLRequest.entry:type_name -> core.CRLEntry + 6, // 2: ca.CRLMetadata.thisUpdate:type_name -> google.protobuf.Timestamp + 0, // 3: ca.CertificateAuthority.IssueCertificate:input_type -> ca.IssueCertificateRequest + 2, // 4: ca.CRLGenerator.GenerateCRL:input_type -> ca.GenerateCRLRequest + 1, // 5: ca.CertificateAuthority.IssueCertificate:output_type -> ca.IssueCertificateResponse + 4, // 6: ca.CRLGenerator.GenerateCRL:output_type -> ca.GenerateCRLResponse + 5, // [5:7] is the sub-list for method output_type + 3, // [3:5] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_ca_proto_init() } @@ -446,73 +426,15 @@ func file_ca_proto_init() { if File_ca_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_ca_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IssueCertificateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ca_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IssuePrecertificateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ca_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IssueCertificateForPrecertificateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ca_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateOCSPRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ca_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OCSPResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } + file_ca_proto_msgTypes[2].OneofWrappers = []any{ + (*GenerateCRLRequest_Metadata)(nil), + (*GenerateCRLRequest_Entry)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_ca_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_ca_proto_rawDesc), len(file_ca_proto_rawDesc)), NumEnums: 0, NumMessages: 5, NumExtensions: 0, @@ -523,7 +445,6 @@ func file_ca_proto_init() { MessageInfos: file_ca_proto_msgTypes, }.Build() File_ca_proto = out.File - file_ca_proto_rawDesc = nil file_ca_proto_goTypes = nil file_ca_proto_depIdxs = nil } diff --git a/ca/proto/ca.proto b/ca/proto/ca.proto index 3eac116d740..d5c75d38390 100644 --- a/ca/proto/ca.proto +++ b/ca/proto/ca.proto @@ -4,49 +4,52 @@ package ca; option go_package = "github.com/letsencrypt/boulder/ca/proto"; import "core/proto/core.proto"; +import "google/protobuf/timestamp.proto"; // CertificateAuthority issues certificates. service CertificateAuthority { - rpc IssuePrecertificate(IssueCertificateRequest) returns (IssuePrecertificateResponse) {} - rpc IssueCertificateForPrecertificate(IssueCertificateForPrecertificateRequest) returns (core.Certificate) {} - rpc GenerateOCSP(GenerateOCSPRequest) returns (OCSPResponse) {} -} - -// OCSPGenerator generates OCSP. We separate this out from -// CertificateAuthority so that we can restrict access to a different subset of -// hosts, so the hosts that need to request OCSP generation don't need to be -// able to request certificate issuance. -service OCSPGenerator { - rpc GenerateOCSP(GenerateOCSPRequest) returns (OCSPResponse) {} + // IssueCertificate issues a precertificate, gets SCTs, issues a certificate, and returns that. + rpc IssueCertificate(IssueCertificateRequest) returns (IssueCertificateResponse) {} } message IssueCertificateRequest { + // Next unused field number: 6 bytes csr = 1; int64 registrationID = 2; int64 orderID = 3; - int64 issuerNameID = 4; + reserved 4; // Previously issuerNameID + + // certProfileName is a human readable name provided by the RA and used to + // determine if the CA can issue for that profile. A default name will be + // assigned inside the CA during *Profile construction if no name is provided. + // The value of this field should not be relied upon inside the RA. + string certProfileName = 5; } -message IssuePrecertificateResponse { +message IssueCertificateResponse { bytes DER = 1; } -message IssueCertificateForPrecertificateRequest { - bytes DER = 1; - repeated bytes SCTs = 2; - int64 registrationID = 3; - int64 orderID = 4; +// CRLGenerator signs CRLs. It is separated for the same reason as OCSPGenerator. +service CRLGenerator { + rpc GenerateCRL(stream GenerateCRLRequest) returns (stream GenerateCRLResponse) {} +} + +message GenerateCRLRequest { + oneof payload { + CRLMetadata metadata = 1; + core.CRLEntry entry = 2; + } } -// Exactly one of certDER or [serial and issuerID] must be set. -message GenerateOCSPRequest { - string status = 2; - int32 reason = 3; - int64 revokedAt = 4; - string serial = 5; - int64 issuerID = 6; +message CRLMetadata { + // Next unused field number: 5 + int64 issuerNameID = 1; + reserved 2; // Previously thisUpdateNS + google.protobuf.Timestamp thisUpdate = 4; + int64 shardIdx = 3; } -message OCSPResponse { - bytes response = 1; +message GenerateCRLResponse { + bytes chunk = 1; } diff --git a/ca/proto/ca_grpc.pb.go b/ca/proto/ca_grpc.pb.go index 7cc6062f43f..a1cfef27cc6 100644 --- a/ca/proto/ca_grpc.pb.go +++ b/ca/proto/ca_grpc.pb.go @@ -1,10 +1,13 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.20.1 +// source: ca.proto package proto import ( context "context" - proto "github.com/letsencrypt/boulder/core/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -12,16 +15,21 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + CertificateAuthority_IssueCertificate_FullMethodName = "/ca.CertificateAuthority/IssueCertificate" +) // CertificateAuthorityClient is the client API for CertificateAuthority service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// CertificateAuthority issues certificates. type CertificateAuthorityClient interface { - IssuePrecertificate(ctx context.Context, in *IssueCertificateRequest, opts ...grpc.CallOption) (*IssuePrecertificateResponse, error) - IssueCertificateForPrecertificate(ctx context.Context, in *IssueCertificateForPrecertificateRequest, opts ...grpc.CallOption) (*proto.Certificate, error) - GenerateOCSP(ctx context.Context, in *GenerateOCSPRequest, opts ...grpc.CallOption) (*OCSPResponse, error) + // IssueCertificate issues a precertificate, gets SCTs, issues a certificate, and returns that. + IssueCertificate(ctx context.Context, in *IssueCertificateRequest, opts ...grpc.CallOption) (*IssueCertificateResponse, error) } type certificateAuthorityClient struct { @@ -32,27 +40,10 @@ func NewCertificateAuthorityClient(cc grpc.ClientConnInterface) CertificateAutho return &certificateAuthorityClient{cc} } -func (c *certificateAuthorityClient) IssuePrecertificate(ctx context.Context, in *IssueCertificateRequest, opts ...grpc.CallOption) (*IssuePrecertificateResponse, error) { - out := new(IssuePrecertificateResponse) - err := c.cc.Invoke(ctx, "/ca.CertificateAuthority/IssuePrecertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *certificateAuthorityClient) IssueCertificateForPrecertificate(ctx context.Context, in *IssueCertificateForPrecertificateRequest, opts ...grpc.CallOption) (*proto.Certificate, error) { - out := new(proto.Certificate) - err := c.cc.Invoke(ctx, "/ca.CertificateAuthority/IssueCertificateForPrecertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *certificateAuthorityClient) GenerateOCSP(ctx context.Context, in *GenerateOCSPRequest, opts ...grpc.CallOption) (*OCSPResponse, error) { - out := new(OCSPResponse) - err := c.cc.Invoke(ctx, "/ca.CertificateAuthority/GenerateOCSP", in, out, opts...) +func (c *certificateAuthorityClient) IssueCertificate(ctx context.Context, in *IssueCertificateRequest, opts ...grpc.CallOption) (*IssueCertificateResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(IssueCertificateResponse) + err := c.cc.Invoke(ctx, CertificateAuthority_IssueCertificate_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -61,28 +52,27 @@ func (c *certificateAuthorityClient) GenerateOCSP(ctx context.Context, in *Gener // CertificateAuthorityServer is the server API for CertificateAuthority service. // All implementations must embed UnimplementedCertificateAuthorityServer -// for forward compatibility +// for forward compatibility. +// +// CertificateAuthority issues certificates. type CertificateAuthorityServer interface { - IssuePrecertificate(context.Context, *IssueCertificateRequest) (*IssuePrecertificateResponse, error) - IssueCertificateForPrecertificate(context.Context, *IssueCertificateForPrecertificateRequest) (*proto.Certificate, error) - GenerateOCSP(context.Context, *GenerateOCSPRequest) (*OCSPResponse, error) + // IssueCertificate issues a precertificate, gets SCTs, issues a certificate, and returns that. + IssueCertificate(context.Context, *IssueCertificateRequest) (*IssueCertificateResponse, error) mustEmbedUnimplementedCertificateAuthorityServer() } -// UnimplementedCertificateAuthorityServer must be embedded to have forward compatible implementations. -type UnimplementedCertificateAuthorityServer struct { -} +// UnimplementedCertificateAuthorityServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedCertificateAuthorityServer struct{} -func (UnimplementedCertificateAuthorityServer) IssuePrecertificate(context.Context, *IssueCertificateRequest) (*IssuePrecertificateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method IssuePrecertificate not implemented") -} -func (UnimplementedCertificateAuthorityServer) IssueCertificateForPrecertificate(context.Context, *IssueCertificateForPrecertificateRequest) (*proto.Certificate, error) { - return nil, status.Errorf(codes.Unimplemented, "method IssueCertificateForPrecertificate not implemented") -} -func (UnimplementedCertificateAuthorityServer) GenerateOCSP(context.Context, *GenerateOCSPRequest) (*OCSPResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GenerateOCSP not implemented") +func (UnimplementedCertificateAuthorityServer) IssueCertificate(context.Context, *IssueCertificateRequest) (*IssueCertificateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IssueCertificate not implemented") } func (UnimplementedCertificateAuthorityServer) mustEmbedUnimplementedCertificateAuthorityServer() {} +func (UnimplementedCertificateAuthorityServer) testEmbeddedByValue() {} // UnsafeCertificateAuthorityServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to CertificateAuthorityServer will @@ -92,59 +82,30 @@ type UnsafeCertificateAuthorityServer interface { } func RegisterCertificateAuthorityServer(s grpc.ServiceRegistrar, srv CertificateAuthorityServer) { + // If the following call pancis, it indicates UnimplementedCertificateAuthorityServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&CertificateAuthority_ServiceDesc, srv) } -func _CertificateAuthority_IssuePrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _CertificateAuthority_IssueCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(IssueCertificateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(CertificateAuthorityServer).IssuePrecertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ca.CertificateAuthority/IssuePrecertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CertificateAuthorityServer).IssuePrecertificate(ctx, req.(*IssueCertificateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _CertificateAuthority_IssueCertificateForPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(IssueCertificateForPrecertificateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CertificateAuthorityServer).IssueCertificateForPrecertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ca.CertificateAuthority/IssueCertificateForPrecertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CertificateAuthorityServer).IssueCertificateForPrecertificate(ctx, req.(*IssueCertificateForPrecertificateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _CertificateAuthority_GenerateOCSP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GenerateOCSPRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CertificateAuthorityServer).GenerateOCSP(ctx, in) + return srv.(CertificateAuthorityServer).IssueCertificate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ca.CertificateAuthority/GenerateOCSP", + FullMethod: CertificateAuthority_IssueCertificate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CertificateAuthorityServer).GenerateOCSP(ctx, req.(*GenerateOCSPRequest)) + return srv.(CertificateAuthorityServer).IssueCertificate(ctx, req.(*IssueCertificateRequest)) } return interceptor(ctx, in, info, handler) } @@ -157,104 +118,110 @@ var CertificateAuthority_ServiceDesc = grpc.ServiceDesc{ HandlerType: (*CertificateAuthorityServer)(nil), Methods: []grpc.MethodDesc{ { - MethodName: "IssuePrecertificate", - Handler: _CertificateAuthority_IssuePrecertificate_Handler, - }, - { - MethodName: "IssueCertificateForPrecertificate", - Handler: _CertificateAuthority_IssueCertificateForPrecertificate_Handler, - }, - { - MethodName: "GenerateOCSP", - Handler: _CertificateAuthority_GenerateOCSP_Handler, + MethodName: "IssueCertificate", + Handler: _CertificateAuthority_IssueCertificate_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "ca.proto", } -// OCSPGeneratorClient is the client API for OCSPGenerator service. +const ( + CRLGenerator_GenerateCRL_FullMethodName = "/ca.CRLGenerator/GenerateCRL" +) + +// CRLGeneratorClient is the client API for CRLGenerator service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type OCSPGeneratorClient interface { - GenerateOCSP(ctx context.Context, in *GenerateOCSPRequest, opts ...grpc.CallOption) (*OCSPResponse, error) +// +// CRLGenerator signs CRLs. It is separated for the same reason as OCSPGenerator. +type CRLGeneratorClient interface { + GenerateCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[GenerateCRLRequest, GenerateCRLResponse], error) } -type oCSPGeneratorClient struct { +type cRLGeneratorClient struct { cc grpc.ClientConnInterface } -func NewOCSPGeneratorClient(cc grpc.ClientConnInterface) OCSPGeneratorClient { - return &oCSPGeneratorClient{cc} +func NewCRLGeneratorClient(cc grpc.ClientConnInterface) CRLGeneratorClient { + return &cRLGeneratorClient{cc} } -func (c *oCSPGeneratorClient) GenerateOCSP(ctx context.Context, in *GenerateOCSPRequest, opts ...grpc.CallOption) (*OCSPResponse, error) { - out := new(OCSPResponse) - err := c.cc.Invoke(ctx, "/ca.OCSPGenerator/GenerateOCSP", in, out, opts...) +func (c *cRLGeneratorClient) GenerateCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[GenerateCRLRequest, GenerateCRLResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &CRLGenerator_ServiceDesc.Streams[0], CRLGenerator_GenerateCRL_FullMethodName, cOpts...) if err != nil { return nil, err } - return out, nil + x := &grpc.GenericClientStream[GenerateCRLRequest, GenerateCRLResponse]{ClientStream: stream} + return x, nil } -// OCSPGeneratorServer is the server API for OCSPGenerator service. -// All implementations must embed UnimplementedOCSPGeneratorServer -// for forward compatibility -type OCSPGeneratorServer interface { - GenerateOCSP(context.Context, *GenerateOCSPRequest) (*OCSPResponse, error) - mustEmbedUnimplementedOCSPGeneratorServer() -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type CRLGenerator_GenerateCRLClient = grpc.BidiStreamingClient[GenerateCRLRequest, GenerateCRLResponse] -// UnimplementedOCSPGeneratorServer must be embedded to have forward compatible implementations. -type UnimplementedOCSPGeneratorServer struct { +// CRLGeneratorServer is the server API for CRLGenerator service. +// All implementations must embed UnimplementedCRLGeneratorServer +// for forward compatibility. +// +// CRLGenerator signs CRLs. It is separated for the same reason as OCSPGenerator. +type CRLGeneratorServer interface { + GenerateCRL(grpc.BidiStreamingServer[GenerateCRLRequest, GenerateCRLResponse]) error + mustEmbedUnimplementedCRLGeneratorServer() } -func (UnimplementedOCSPGeneratorServer) GenerateOCSP(context.Context, *GenerateOCSPRequest) (*OCSPResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GenerateOCSP not implemented") +// UnimplementedCRLGeneratorServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedCRLGeneratorServer struct{} + +func (UnimplementedCRLGeneratorServer) GenerateCRL(grpc.BidiStreamingServer[GenerateCRLRequest, GenerateCRLResponse]) error { + return status.Errorf(codes.Unimplemented, "method GenerateCRL not implemented") } -func (UnimplementedOCSPGeneratorServer) mustEmbedUnimplementedOCSPGeneratorServer() {} +func (UnimplementedCRLGeneratorServer) mustEmbedUnimplementedCRLGeneratorServer() {} +func (UnimplementedCRLGeneratorServer) testEmbeddedByValue() {} -// UnsafeOCSPGeneratorServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to OCSPGeneratorServer will +// UnsafeCRLGeneratorServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CRLGeneratorServer will // result in compilation errors. -type UnsafeOCSPGeneratorServer interface { - mustEmbedUnimplementedOCSPGeneratorServer() +type UnsafeCRLGeneratorServer interface { + mustEmbedUnimplementedCRLGeneratorServer() } -func RegisterOCSPGeneratorServer(s grpc.ServiceRegistrar, srv OCSPGeneratorServer) { - s.RegisterService(&OCSPGenerator_ServiceDesc, srv) +func RegisterCRLGeneratorServer(s grpc.ServiceRegistrar, srv CRLGeneratorServer) { + // If the following call pancis, it indicates UnimplementedCRLGeneratorServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&CRLGenerator_ServiceDesc, srv) } -func _OCSPGenerator_GenerateOCSP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GenerateOCSPRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OCSPGeneratorServer).GenerateOCSP(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ca.OCSPGenerator/GenerateOCSP", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OCSPGeneratorServer).GenerateOCSP(ctx, req.(*GenerateOCSPRequest)) - } - return interceptor(ctx, in, info, handler) +func _CRLGenerator_GenerateCRL_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(CRLGeneratorServer).GenerateCRL(&grpc.GenericServerStream[GenerateCRLRequest, GenerateCRLResponse]{ServerStream: stream}) } -// OCSPGenerator_ServiceDesc is the grpc.ServiceDesc for OCSPGenerator service. +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type CRLGenerator_GenerateCRLServer = grpc.BidiStreamingServer[GenerateCRLRequest, GenerateCRLResponse] + +// CRLGenerator_ServiceDesc is the grpc.ServiceDesc for CRLGenerator service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) -var OCSPGenerator_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "ca.OCSPGenerator", - HandlerType: (*OCSPGeneratorServer)(nil), - Methods: []grpc.MethodDesc{ +var CRLGenerator_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "ca.CRLGenerator", + HandlerType: (*CRLGeneratorServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ { - MethodName: "GenerateOCSP", - Handler: _OCSPGenerator_GenerateOCSP_Handler, + StreamName: "GenerateCRL", + Handler: _CRLGenerator_GenerateCRL_Handler, + ServerStreams: true, + ClientStreams: true, }, }, - Streams: []grpc.StreamDesc{}, Metadata: "ca.proto", } diff --git a/ca/testdata/ca_cert.pem b/ca/testdata/ca_cert.pem deleted file mode 100644 index 4737897abc9..00000000000 --- a/ca/testdata/ca_cert.pem +++ /dev/null @@ -1,33 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFxDCCA6ygAwIBAgIJALe2d/gZHJqAMA0GCSqGSIb3DQEBCwUAMDExCzAJBgNV -BAYTAlVTMRAwDgYDVQQKDAdUZXN0IENBMRAwDgYDVQQDDAdUZXN0IENBMB4XDTE1 -MDIxMzAwMzI0NFoXDTI1MDIxMDAwMzI0NFowMTELMAkGA1UEBhMCVVMxEDAOBgNV -BAoMB1Rlc3QgQ0ExEDAOBgNVBAMMB1Rlc3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUA -A4ICDwAwggIKAoICAQCqYzR0R/8n0wKTYi3N68vR0onziVVS1/+9DsBcWLj3a8Vd -zds+snPbJu2M7TyhWSFGsUYaAu58vYl44GfmlRlCunpOrIIuhDh//Kua720J4bwK -0ODGLph70uO+VyEQeFQqEAdzy4v5puUfNbEdN66Ge5OGuwsVRwlBZvXRTbsuJend -cJadRC5kzxiPbnAqj9V44RK1Cn615dK/JTFVho2iHFER1k+MGMrso+8mn6asLZOj -RSx5wt+JEPbrE24X9fb+cF5J/e5AWL3OrcgdAf4953OJn5N/v+6F5FyaE+t0JKzn -THtLL1HCKMQmocpU2rTfYA1MWfLdY/KQZAdychoD6sQ6uuxCKRf6Zan/UH+4RcTW -ciPk8QAXRztkJGyJQozzLXfLnZFFHKtrS80h55SyvAA5UhwpVGjlKwKbwFHmNDj4 -5XE3anmiZFNdrAgAwDf+Pbukmolh2ffz++vZhHJuvorFhGziG9+O9IoBdTkKvJwY -qAkk+PP6Pe8GKgZsojvPr6vVewDEVGoBNth9/OAAVmIDXtoHEqWpk2rlCQsYcMjt -w+bVUxNpjs5kFXGwOpe6XfOxiMQxWaadqq3VUB06XXyS4JADtYm6EjrFPtEUG6Yu -9bGefjN/jyMls/8MwQR/HKNidueeKpuLfJYKvbudNf9XLVaZW9zf52WT0bqEdwID -AQABo4HeMIHbMB0GA1UdDgQWBBSaJqZ383/ySesJvVCWHAHhZcKpqzBhBgNVHSME -WjBYgBSaJqZ383/ySesJvVCWHAHhZcKpq6E1pDMwMTELMAkGA1UEBhMCVVMxEDAO -BgNVBAoMB1Rlc3QgQ0ExEDAOBgNVBAMMB1Rlc3QgQ0GCCQC3tnf4GRyagDAPBgNV -HRMECDAGAQH/AgEBMAsGA1UdDwQEAwIBBjA5BggrBgEFBQcBAQQtMCswKQYIKwYB -BQUHMAGGHWh0dHA6Ly9vY3NwLmV4YW1wbGUuY29tOjgwODAvMA0GCSqGSIb3DQEB -CwUAA4ICAQCWJo5AaOIW9n17sZIMRO4m3S2gF2Bs03X4i29/NyMCtOGlGk+VFmu/ -1rP3XYE4KJpSq+9/LV1xXFd2FTvuSz18MAvlCz2b5V7aBl88qup1htM/0VXXTy9e -p9tapIDuclcVez1kkdxPSwXh9sejcfNoZrgkPr/skvWp4WPy+rMvskHGB1BcRIG3 -xgR0IYIS0/3N6k6mcDaDGjGHMPoKY3sgg8Q/FToTxiMux1p2eGjbTmjKzOirXOj4 -Alv82qEjIRCMdnvOkZI35cd7tiO8Z3m209fhpkmvye2IERZxSBPRC84vrFfh0aWK -U/PisgsVD5/suRfWMqtdMHf0Mm+ycpgcTjijqMZF1gc05zfDqfzNH/MCcCdH9R2F -13ig5W8zJU8M1tV04ftElPi0/a6pCDs9UWk+ADIsAScee7P5kW+4WWo3t7sIuj8i -wAGiF+tljMOkzvGnxcuy+okR3EhhQdwOl+XKBgBXrK/hfvLobSQeHKk6+oUJzg4b -wL7gg7ommDqj181eBc1tiTzXv15Jd4cy9s/hvZA0+EfZc6+21urlwEGmEmm0EsAG -ldK1FVOTRlXJrjw0K57bI+7MxhdD06I4ikFCXRTAIxVSRlXegrDyAwUZv7CqH0mr -8jcQV9i1MJFGXV7k3En0lQv2z5AD9aFtkc6UjHpAzB8xEWMO0ZAtBg== ------END CERTIFICATE----- \ No newline at end of file diff --git a/ca/testdata/ca_key.pem b/ca/testdata/ca_key.pem deleted file mode 100644 index e7dcfd5b88f..00000000000 --- a/ca/testdata/ca_key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAqmM0dEf/J9MCk2ItzevL0dKJ84lVUtf/vQ7AXFi492vFXc3b -PrJz2ybtjO08oVkhRrFGGgLufL2JeOBn5pUZQrp6TqyCLoQ4f/yrmu9tCeG8CtDg -xi6Ye9LjvlchEHhUKhAHc8uL+ablHzWxHTeuhnuThrsLFUcJQWb10U27LiXp3XCW -nUQuZM8Yj25wKo/VeOEStQp+teXSvyUxVYaNohxREdZPjBjK7KPvJp+mrC2To0Us -ecLfiRD26xNuF/X2/nBeSf3uQFi9zq3IHQH+PedziZ+Tf7/uheRcmhPrdCSs50x7 -Sy9RwijEJqHKVNq032ANTFny3WPykGQHcnIaA+rEOrrsQikX+mWp/1B/uEXE1nIj -5PEAF0c7ZCRsiUKM8y13y52RRRyra0vNIeeUsrwAOVIcKVRo5SsCm8BR5jQ4+OVx -N2p5omRTXawIAMA3/j27pJqJYdn38/vr2YRybr6KxYRs4hvfjvSKAXU5CrycGKgJ -JPjz+j3vBioGbKI7z6+r1XsAxFRqATbYffzgAFZiA17aBxKlqZNq5QkLGHDI7cPm -1VMTaY7OZBVxsDqXul3zsYjEMVmmnaqt1VAdOl18kuCQA7WJuhI6xT7RFBumLvWx -nn4zf48jJbP/DMEEfxyjYnbnniqbi3yWCr27nTX/Vy1WmVvc3+dlk9G6hHcCAwEA -AQKCAgEAirFJ50Ubmu0V8aY/JplDRT4dcJFfVJnh36B8UC8gELY2545DYpub1s2v -G8GYUrXcclCmgVHVktAtcKkpqfW/pCNqn1Ooe/jAjN29SdaOaTbH+/3emTMgh9o3 -6528mk14JOz7Q/Rxsft6EZeA3gmPFITOpyLleKJkFEqc2YxuSrgtz0RwNP9kzEYO -9eGth9egqk57DcbHMYUrsM+zgqyN6WEnVF+gTKd5tnoSltvprclDnekWtN49WrLm -ap9cREDAlogdGBmMr/AMQIoQlBwlOXqG/4VXaOtwWqhyADEqvVWFMJl+2spfwK2y -TMfxjHSiOhlTeczV9gP/VC04Kp5aMXXoCg2Gwlcr4DBic1k6eI/lmUQv6kg/4Nbf -yU+BCUtBW5nfKgf4DOcqX51n92ELnKbPKe41rcZxbTMvjsEQsGB51QLOMHa5tKe8 -F2R3fuP9y5k9lrMcz2vWL+9Qt4No5e++Ej+Jy1NKhrcfwQ6fGpMcZNesl0KHGjhN -dfZZRMHNZNBbJKHrXxAHDxtvoSqWOk8XOwP12C2MbckHkSaXGTLIuGfwcW6rvdF2 -EXrSCINIT1eCmMrnXWzWCm6UWxxshLsqzU7xY5Ov8qId211gXnC2IonAezWwFDE9 -JYjwGJJzNTiEjX6WdeCzT64FMtJk4hpoa3GzroRG2LAmhhnWVaECggEBANblf0L5 -2IywbeqwGF3VsSOyT8EeiAhOD9NUj4cYfU8ueqfY0T9/0pN39kFF8StVk5kOXEmn -dFk74gUC4+PBjrBAMoKvpQ2UpUvX9hgFQYoNmJZxSqF8KzdjS4ABcWIWi8thOAGc -NLssTw3eBsWT7ahX097flpWFVqVaFx5OmB6DOIHVTA+ppf6RYCETgDJomaRbzn8p -FMTpRZBYRLj/w2WxFy1J8gWGSq2sATFCMc3KNFwVQnDVS03g8W/1APqMVU0mIeau -TltSACvdwigLgWUhYxN+1F5awBlGqMdP+TixisVrHZWZw7uFMb8L/MXW1YA4FN8h -k2/Bp8wJTD+G/dkCggEBAMr6Tobi/VlYG+05cLmHoXGH98XaGBokYXdVrHiADGQI -lhYtnqpXQc1vRqp+zFacjpBjcun+nd6HzIFzsoWykevxYKgONol+iTSyHaTtYDm0 -MYrgH8nBo26GSCdz3IGHJ/ux1LL8ZAbY2AbP81x63ke+g9yXQPBkZQp6vYW/SEIG -IKhy+ZK6tZa0/z7zJNfM8PuN+bK4xJorUwbRqIv4owj0Bf92v+Q/wETYeEBpkDGU -uJ3wDc3FVsK5+gaJECS8DNkOmZ+o5aIlMQHbwxXe8NUm4uZDT+znx0uf+Hw1wP1P -zGL/TnjrZcmKRR47apkPXOGZWpPaNV0wkch/Xh1KEs8CggEBAJaRoJRt+LPC3pEE -p13/3yjSxBzc5pVjFKWO5y3SE+LJ/zjhquNiDUo0UH+1oOArCsrADBuzT8tCMQAv -4TrwoKiPopR8uxoD37l/bLex3xT6p8IpSRBSrvkVAo6C9E203Gg5CwPdzfijeBSQ -T5BaMLe2KgZMBPdowKgEspQSn3UpngsiRzPmOx9d/svOHRG0xooppUrlnt7FT29u -2WACHIeBCGs8F26VhHehQAiih8DX/83RO4dRe3zqsmAue2wRrabro+88jDxh/Sq/ -K03hmd0hAoljYStnTJepMZLNTyLRCxl+DvGGFmWqUou4u3hnKZq4MK+Sl/pC5u4I -SbttOykCggEAEk0RSX4r46NbGT+Fl2TQPKFKyM8KP0kqdI0H+PFqrJZNmgBQ/wDR -EQnIcFTwbZq+C+y7jreDWm4aFU3uObnJCGICGgT2C92Z12N74sP4WhuSH/hnRVSt -PKjk1pHOvusFwt7c06qIBkoE6FBVm/AEHKnjz77ffw0+QvygG/AMPs+4oBeFwyIM -f2MgZHedyctTqwq5CdE5AMGJQeMjdENdx8/gvpDhal4JIuv1o7Eg7CeBodPkGrqB -QRttnKs9BmLiMavsVAXxdnYt/gHnjBBG3KEd8i79hNm9EWeCCwj5tp08S2zDkYl/ -6vUJmFk5GkXVVQ3zqcMR7q4TZuV9Ad0M5wKCAQAY89F3qpokGhDtlVrB78gY8Ol3 -w9eq7HwEYfu8ZTN0+TEQMTEbvLbCcNYQqfRSqAAtb8hejaBQYbxFwNx9VA6sV4Tj -6EUMnp9ijzBf4KH0+r1wgkxobDjFH+XCewDLfTvhFDXjFcpRsaLfYRWz82JqSag6 -v+lJi6B2hbZUt750aQhomS6Bu0GE9/cE+e17xpZaMgXcWDDnse6W0JfpGHe8p6qD -EcaaKadeO/gSnv8wM08nHL0d80JDOE/C5I0psKryMpmicJK0bI92ooGrkJsF+Sg1 -huu1W6p9RdxJHgphzmGAvTrOmrDAZeKtubsMS69VZVFjQFa1ZD/VMzWK1X2o ------END RSA PRIVATE KEY----- \ No newline at end of file diff --git a/ca/testdata/ct_poison_extension_empty.der.csr b/ca/testdata/ct_poison_extension_empty.der.csr index afb20370916..2b7df0bfbc3 100644 Binary files a/ca/testdata/ct_poison_extension_empty.der.csr and b/ca/testdata/ct_poison_extension_empty.der.csr differ diff --git a/ca/testdata/dupe_name.der.csr b/ca/testdata/dupe_name.der.csr deleted file mode 100644 index 6884aa08e86..00000000000 Binary files a/ca/testdata/dupe_name.der.csr and /dev/null differ diff --git a/ca/testdata/duplicate_must_staple.der.csr b/ca/testdata/duplicate_must_staple.der.csr deleted file mode 100644 index 8efda2761a0..00000000000 Binary files a/ca/testdata/duplicate_must_staple.der.csr and /dev/null differ diff --git a/ca/testdata/ecdsa_allow_list.yml b/ca/testdata/ecdsa_allow_list.yml deleted file mode 100644 index a648abda31b..00000000000 --- a/ca/testdata/ecdsa_allow_list.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- 1337 diff --git a/ca/testdata/ecdsa_allow_list2.yml b/ca/testdata/ecdsa_allow_list2.yml deleted file mode 100644 index 3365f2b9c2b..00000000000 --- a/ca/testdata/ecdsa_allow_list2.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -- 1338 diff --git a/ca/testdata/ecdsa_allow_list_malformed.yml b/ca/testdata/ecdsa_allow_list_malformed.yml deleted file mode 100644 index 286888a0ab5..00000000000 --- a/ca/testdata/ecdsa_allow_list_malformed.yml +++ /dev/null @@ -1 +0,0 @@ -not yaml diff --git a/ca/testdata/must_staple.der.csr b/ca/testdata/must_staple.der.csr index a8c8462598f..c256d35c9fa 100644 Binary files a/ca/testdata/must_staple.der.csr and b/ca/testdata/must_staple.der.csr differ diff --git a/ca/testdata/no_cn.der.csr b/ca/testdata/no_cn.der.csr deleted file mode 100644 index d1f70368a75..00000000000 Binary files a/ca/testdata/no_cn.der.csr and /dev/null differ diff --git a/ca/testdata/no_san.der.csr b/ca/testdata/no_san.der.csr deleted file mode 100644 index db855823636..00000000000 Binary files a/ca/testdata/no_san.der.csr and /dev/null differ diff --git a/ca/testdata/short_key.der.csr b/ca/testdata/short_key.der.csr index fcb05edcf10..7864f44f85c 100644 Binary files a/ca/testdata/short_key.der.csr and b/ca/testdata/short_key.der.csr differ diff --git a/ca/testdata/testcsr.go b/ca/testdata/testcsr.go index e1a1b07dfa0..cd22487cde0 100644 --- a/ca/testdata/testcsr.go +++ b/ca/testdata/testcsr.go @@ -3,53 +3,17 @@ package main import ( + "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" "crypto/x509" "crypto/x509/pkix" - "encoding/pem" "log" "os" ) -// A 2048-bit RSA private key -var rsaPrivateKey = `-----BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA5cpXqfCaUDD+hf93j5jxbrhK4jrJAzfAEjeZj/Lx5Rv/7eEO -uhS2DdCU2is82vR6yJ7EidUYVz/nUAjSTP7JIEsbyvfsfACABbqRyGltHlJnULVH -y/EMjt9xKZf17T8tOLHVUEAJTxsvjKn4TMIQJTNrAqm/lNrUXmCIR41Go+3RBGC6 -YdAKEwcZMCzrjQGF06mC6/6xMmYMSMd6+VQRFIPpuPK/6BBp1Tgju2LleRC5uatj -QcFOoilGkfh1RnZp3GJ7q58KaqHiPmjl31rkY5vS3LP7yfU5TRBcxCSG8l8LKuRt -MArkbTEtj3PkDjbipL/SkLrZ28e5w9Egl4g1MwIDAQABAoIBABZqY5zPPK5f6SQ3 -JHmciMitL5jb9SncMV9VjyRMpa4cyh1xW9dpF81HMI4Ls7cELEoPuspbQDGaqTzU -b3dVT1dYHFDzWF1MSzDD3162cg+IKE3mMSfCzt/NCiPtj+7hv86NAmr+pCnUVBIb -rn4GXD7UwjaTSn4Bzr+aGREpxd9Nr0JdNQwxVHZ75A92vTihCfaXyMCjhW3JEpF9 -N89XehgidoGgtUxxeeb+WsO3nvVBpLv/HDxMTx/IDzvSA5nLlYMcqVzb7IJoeAQu -og0WJKlniYzvIdoQ6/hGydAW5sKd0qWh0JPYs7uLKAWrdAWvrFAp7//fYKVamalU -8pUu/WkCgYEA+tcTQ3qTnVh41O9YeM/7NULpIkuCAlR+PBRky294zho9nGQIPdaW -VNvyqqjLaHaXJVokYHbU4hDk6RbrhoWVd4Po/5g9cUkT1f6nrdZGRkg4XOCzHWvV -Yrqh3eYYX4bdiH5EhB78m0rrbjHfd7SF3cdYNzOUS2kJvCInYC6zPx8CgYEA6oRr -UhZFuoqRsEb28ELM8sHvdIMA/C3aWCu+nUGQ4gHSEb4uvuOD/7tQNuCaBioiXVPM -/4hjk9jHJcjYf5l33ANqIP7JiYAt4rzTWXF3iS6kQOhQhjksSlSnWqw0Uu1DtlpG -rzeG1ZkBuwH7Bx0yj4sGSz5sAvyF44aRsE6AC20CgYEArafWO0ISDb1hMbFdo44B -ELd45Pg3UluiZP+NZFWQ4cbC3pFWL1FvE+KNll5zK6fmLcLBKlM6QCOIBmKKvb+f -YXVeCg0ghFweMmkxNqUAU8nN02bwOa8ctFQWmaOhPgkFN2iLEJjPMsdkRA6c8ad1 -gbtvNBAuWyKlzawrbGgISesCgYBkGEjGLINubx5noqJbQee/5U6S6CdPezKqV2Fw -NT/ldul2cTn6d5krWYOPKKYU437vXokst8XooKm/Us41CAfEfCCcHKNgcLklAXsj -ve5LOwEYQw+7ekORJjiX1tAuZN51wmpQ9t4x5LB8ZQgDrU6bPbdd/jKTw7xRtGoS -Wi8EsQKBgG8iGy3+kVBIjKHxrN5jVs3vj/l/fQL0WRMLCMmVuDBfsKyy3f9n8R1B -/KdwoyQFwsLOyr5vAjiDgpFurXQbVyH4GDFiJGS1gb6MNcinwSTpsbOLLV7zgibX -A2NgiQ+UeWMia16dZVd6gGDlY3lQpeyLdsdDd+YppNfy9vedjbvT ------END RSA PRIVATE KEY-----` - -// NISTP256 ECDSA private key -var ecdsaPrivateKey = `-----BEGIN EC PRIVATE KEY----- -MHcCAQEEIKwK8ik0Zgw26bWaGuNYa/QAtCDRwpOPS5FIhbwuFqWuoAoGCCqGSM49 -AwEHoUQDQgAEfkxXCNEy4/zfwQ4arciDYQql7/+ftYvf51JTLCJAFu8kWKvNBENT -X8ays994FANu2VsJTF5Ud5JPYWHT87hjAA== ------END EC PRIVATE KEY-----` - func main() { - block, _ := pem.Decode([]byte(rsaPrivateKey)) - rsaPriv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { log.Fatalf("Failed to parse private key: %s", err) } @@ -65,7 +29,7 @@ func main() { "Capitalizedletters.COM", }, } - csr, err := x509.CreateCertificateRequest(rand.Reader, req, rsaPriv) + csr, err := x509.CreateCertificateRequest(rand.Reader, req, priv) if err != nil { log.Fatalf("unable to create CSR: %s", err) } diff --git a/canceled/canceled.go b/canceled/canceled.go deleted file mode 100644 index 405cacd3e44..00000000000 --- a/canceled/canceled.go +++ /dev/null @@ -1,16 +0,0 @@ -package canceled - -import ( - "context" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// Is returns true if err is non-nil and is either context.Canceled, or has a -// grpc code of Canceled. This is useful because cancellations propagate through -// gRPC boundaries, and if we choose to treat in-process cancellations a certain -// way, we usually want to treat cross-process cancellations the same way. -func Is(err error) bool { - return err == context.Canceled || status.Code(err) == codes.Canceled -} diff --git a/canceled/canceled_test.go b/canceled/canceled_test.go deleted file mode 100644 index 251072d8ee8..00000000000 --- a/canceled/canceled_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package canceled - -import ( - "context" - "errors" - "testing" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func TestCanceled(t *testing.T) { - if !Is(context.Canceled) { - t.Errorf("Expected context.Canceled to be canceled, but wasn't.") - } - if !Is(status.Errorf(codes.Canceled, "hi")) { - t.Errorf("Expected gRPC cancellation to be cancelled, but wasn't.") - } - if Is(errors.New("hi")) { - t.Errorf("Expected random error to not be cancelled, but was.") - } -} diff --git a/cmd/admin-revoker/main.go b/cmd/admin-revoker/main.go deleted file mode 100644 index 458eb243197..00000000000 --- a/cmd/admin-revoker/main.go +++ /dev/null @@ -1,563 +0,0 @@ -package notmain - -import ( - "bufio" - "context" - "crypto" - "crypto/sha256" - "crypto/x509" - "errors" - "flag" - "fmt" - "os" - "os/user" - "sort" - "strconv" - "sync" - - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/db" - berrors "github.com/letsencrypt/boulder/errors" - "github.com/letsencrypt/boulder/features" - bgrpc "github.com/letsencrypt/boulder/grpc" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/privatekey" - rapb "github.com/letsencrypt/boulder/ra/proto" - "github.com/letsencrypt/boulder/revocation" - "github.com/letsencrypt/boulder/sa" - sapb "github.com/letsencrypt/boulder/sa/proto" -) - -const usageString = ` -usage: - list-reasons -config - serial-revoke -config - batched-serial-revoke -config - reg-revoke -config - private-key-block -config -dry-run= - private-key-revoke -config -dry-run= - - -descriptions: - list-reasons List all revocation reason codes - serial-revoke Revoke a single certificate by the hex serial number - batched-serial-revoke Revokes all certificates contained in a file of hex serial numbers - reg-revoke Revoke all certificates associated with a registration ID - private-key-block Adds the SPKI hash, derived from the provided private key, to the - blocked keys table. is expected to be the path - to a PEM formatted file containing an RSA or ECDSA private key - - private-key-revoke Revokes all certificates matching the SPKI hash derived from the - provided private key. Then adds the hash to the blocked keys - table. is expected to be the path to a PEM - formatted file containing an RSA or ECDSA private key - -flags: - all: - -config File path to the configuration file for this service (required) - - private-key-block | private-key-revoke: - -dry-run true (default): only queries for affected certificates. false: will - perform the requested block or revoke action. Only implemented for - private-key-block and private-key-revoke. -` - -type Config struct { - Revoker struct { - DB cmd.DBConfig - // Similarly, the Revoker needs a TLSConfig to set up its GRPC client - // certs, but doesn't get the TLS field from ServiceConfig, so declares - // its own. - TLS cmd.TLSConfig - - RAService *cmd.GRPCClientConfig - SAService *cmd.GRPCClientConfig - - Features map[string]bool - } - - Syslog cmd.SyslogConfig -} - -type revoker struct { - rac rapb.RegistrationAuthorityClient - sac sapb.StorageAuthorityClient - dbMap *db.WrappedMap - clk clock.Clock - log blog.Logger -} - -func newRevoker(c Config) *revoker { - logger := cmd.NewLogger(c.Syslog) - - tlsConfig, err := c.Revoker.TLS.Load() - cmd.FailOnError(err, "TLS config") - - clk := cmd.Clock() - - clientMetrics := bgrpc.NewClientMetrics(metrics.NoopRegisterer) - raConn, err := bgrpc.ClientSetup(c.Revoker.RAService, tlsConfig, clientMetrics, clk) - cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA") - rac := rapb.NewRegistrationAuthorityClient(raConn) - - dbMap, err := sa.InitWrappedDb(c.Revoker.DB, nil, logger) - cmd.FailOnError(err, "While initializing dbMap") - - saConn, err := bgrpc.ClientSetup(c.Revoker.SAService, tlsConfig, clientMetrics, clk) - cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA") - sac := sapb.NewStorageAuthorityClient(saConn) - - return &revoker{ - rac: rac, - sac: sac, - dbMap: dbMap, - clk: clk, - log: logger, - } -} - -func (r *revoker) revokeCertificate(ctx context.Context, certObj core.Certificate, reasonCode revocation.Reason, skipBlockKey bool) error { - if reasonCode < 0 || reasonCode == 7 || reasonCode > 10 { - panic(fmt.Sprintf("Invalid reason code: %d", reasonCode)) - } - u, err := user.Current() - if err != nil { - return err - } - - var req *rapb.AdministrativelyRevokeCertificateRequest - if certObj.DER != nil { - cert, err := x509.ParseCertificate(certObj.DER) - if err != nil { - return err - } - req = &rapb.AdministrativelyRevokeCertificateRequest{ - Cert: cert.Raw, - Code: int64(reasonCode), - AdminName: u.Username, - SkipBlockKey: skipBlockKey, - } - } else { - req = &rapb.AdministrativelyRevokeCertificateRequest{ - Serial: certObj.Serial, - Code: int64(reasonCode), - AdminName: u.Username, - SkipBlockKey: skipBlockKey, - } - } - _, err = r.rac.AdministrativelyRevokeCertificate(ctx, req) - if err != nil { - return err - } - r.log.Infof("Revoked certificate %s with reason '%s'", certObj.Serial, revocation.ReasonToString[reasonCode]) - return nil -} - -func (r *revoker) revokeBySerial(ctx context.Context, serial string, reasonCode revocation.Reason, skipBlockKey bool) error { - certObj, err := sa.SelectPrecertificate(r.dbMap, serial) - if err != nil { - if db.IsNoRows(err) { - return berrors.NotFoundError("precertificate with serial %q not found", serial) - } - return err - } - return r.revokeCertificate(ctx, certObj, reasonCode, skipBlockKey) -} - -func (r *revoker) revokeBySerialBatch(ctx context.Context, serialPath string, reasonCode revocation.Reason, parallelism int) error { - file, err := os.Open(serialPath) - if err != nil { - return err - } - - scanner := bufio.NewScanner(file) - if err != nil { - return err - } - - wg := new(sync.WaitGroup) - work := make(chan string, parallelism) - for i := 0; i < parallelism; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for serial := range work { - // handle newlines gracefully - if serial == "" { - continue - } - err := r.revokeBySerial(ctx, serial, reasonCode, false) - if err != nil { - r.log.Errf("failed to revoke %q: %s", serial, err) - } - } - }() - } - - for scanner.Scan() { - serial := scanner.Text() - if serial == "" { - continue - } - work <- serial - } - close(work) - wg.Wait() - - return nil -} - -func (r *revoker) revokeByReg(ctx context.Context, regID int64, reasonCode revocation.Reason) error { - _, err := r.sac.GetRegistration(ctx, &sapb.RegistrationID{Id: regID}) - if err != nil { - return fmt.Errorf("couldn't fetch registration: %w", err) - } - - certObjs, err := sa.SelectPrecertificates(r.dbMap, "WHERE registrationID = :regID", map[string]interface{}{"regID": regID}) - if err != nil { - return err - } - for _, certObj := range certObjs { - err = r.revokeCertificate(ctx, certObj.Certificate, reasonCode, false) - if err != nil { - return err - } - } - return nil -} - -func (r *revoker) revokeMalformedBySerial(ctx context.Context, serial string, reasonCode revocation.Reason) error { - return r.revokeCertificate(ctx, core.Certificate{Serial: serial}, reasonCode, false) -} - -// blockByPrivateKey blocks future issuance for certificates with a a public key -// matching the SubjectPublicKeyInfo hash generated from the PublicKey embedded -// in privateKey. The embedded PublicKey will be verified as an actual match for -// the provided private key before any blocking takes place. This method does -// not revoke any certificates directly. However, 'bad-key-revoker', which -// references the 'blockedKeys' table, will eventually revoke certificates with -// a matching SPKI hash. -func (r *revoker) blockByPrivateKey(ctx context.Context, privateKey string) error { - _, publicKey, err := privatekey.Load(privateKey) - if err != nil { - return err - } - - spkiHash, err := getPublicKeySPKIHash(publicKey) - if err != nil { - return err - } - - u, err := user.Current() - if err != nil { - return err - } - - req := &sapb.AddBlockedKeyRequest{ - KeyHash: spkiHash, - Added: r.clk.Now().UnixNano(), - Source: "admin-revoker", - Comment: fmt.Sprintf("blocked by %s", u), - RevokedBy: 0, - } - - _, err = r.sac.AddBlockedKey(ctx, req) - if err != nil { - return err - } - return nil -} - -// revokeByPrivateKey revokes all certificates with a public key matching the -// SubjectPublicKeyInfo hash generated from the PublicKey embedded in -// privateKey. The embedded PublicKey will be verified as an actual match for the -// provided private key before any revocation takes place. The provided key will -// not be added to the 'blockedKeys' table. This is done to avoid a race between -// 'admin-revoker' and 'bad-key-revoker'. You MUST call blockByPrivateKey after -// calling this function, on pain of violating the BRs. -func (r *revoker) revokeByPrivateKey(ctx context.Context, privateKey string) error { - _, publicKey, err := privatekey.Load(privateKey) - if err != nil { - return err - } - - spkiHash, err := getPublicKeySPKIHash(publicKey) - if err != nil { - return err - } - - matches, err := r.getCertsMatchingSPKIHash(spkiHash) - if err != nil { - return err - } - - for i, match := range matches { - resp, err := r.sac.GetCertificateStatus(ctx, &sapb.Serial{Serial: match}) - if err != nil { - return fmt.Errorf( - "failed to get status for serial %q. Entry %d of %d affected certificates: %w", - match, - (i + 1), - len(matches), - err, - ) - } - - if resp.Status != string(core.OCSPStatusGood) { - r.log.AuditInfof("serial %q is already revoked, skipping", match) - continue - } - - err = r.revokeBySerial(ctx, match, revocation.Reason(1), true) - if err != nil { - return fmt.Errorf( - "failed to revoke serial %q. Entry %d of %d affected certificates: %w", - match, - (i + 1), - len(matches), - err, - ) - } - } - return nil -} - -func (r *revoker) spkiHashInBlockedKeys(spkiHash []byte) (bool, error) { - var count int - err := r.dbMap.SelectOne(&count, "SELECT COUNT(*) as count FROM blockedKeys WHERE keyHash = ?", spkiHash) - if err != nil { - return false, err - } - - if count > 0 { - return true, nil - } - return false, nil -} - -func (r *revoker) countCertsMatchingSPKIHash(spkiHash []byte) (int, error) { - var count int - err := r.dbMap.SelectOne(&count, "SELECT COUNT(*) as count FROM keyHashToSerial WHERE keyHash = ?", spkiHash) - if err != nil { - return 0, err - } - return count, nil -} - -// TODO(#5899) Use an non-wrapped sql.Db client to iterate over results and -// return them on a channel. -func (r *revoker) getCertsMatchingSPKIHash(spkiHash []byte) ([]string, error) { - var h []string - _, err := r.dbMap.Select(&h, "SELECT certSerial FROM keyHashToSerial WHERE keyHash = ?", spkiHash) - if err != nil { - if db.IsNoRows(err) { - return nil, berrors.NotFoundError("no certificates with a matching SPKI hash were found") - } - return nil, err - } - return h, nil -} - -// This abstraction is needed so that we can use sort.Sort below -type revocationCodes []revocation.Reason - -func (rc revocationCodes) Len() int { return len(rc) } -func (rc revocationCodes) Less(i, j int) bool { return rc[i] < rc[j] } -func (rc revocationCodes) Swap(i, j int) { rc[i], rc[j] = rc[j], rc[i] } - -func privateKeyBlock(r *revoker, dryRun bool, count int, spkiHash []byte, keyPath string) error { - keyExists, err := r.spkiHashInBlockedKeys(spkiHash) - if err != nil { - return fmt.Errorf("while checking if the provided key already exists in the 'blockedKeys' table: %s", err) - } - if keyExists { - return errors.New("the provided key already exists in the 'blockedKeys' table") - } - - if dryRun { - r.log.AuditInfof( - "To block issuance for this key and revoke %d certificates via bad-key-revoker, run with -dry-run=false", - count, - ) - r.log.AuditInfo("No keys were blocked or certificates revoked, exiting...") - return nil - } - - r.log.AuditInfo("Attempting to block issuance for the provided key") - err = r.blockByPrivateKey(context.Background(), keyPath) - if err != nil { - return fmt.Errorf("while attempting to block issuance for the provided key: %s", err) - } - r.log.AuditInfo("Issuance for the provided key has been successfully blocked, exiting...") - return nil -} - -func privateKeyRevoke(r *revoker, dryRun bool, count int, keyPath string) error { - if dryRun { - r.log.AuditInfof( - "To immediately revoke %d certificates and block issuance for this key, run with -dry-run=false", - count, - ) - r.log.AuditInfo("No keys were blocked or certificates revoked, exiting...") - return nil - } - - if count <= 0 { - // Do not revoke. - return nil - } - - // Revoke certificates. - r.log.AuditInfof("Attempting to revoke %d certificates", count) - err := r.revokeByPrivateKey(context.Background(), keyPath) - if err != nil { - return fmt.Errorf("while attempting to revoke certificates for the provided key: %s", err) - } - r.log.AuditInfo("All certificates matching using the provided key have been successfully") - - // Block future issuance. - r.log.AuditInfo("Attempting to block issuance for the provided key") - err = r.blockByPrivateKey(context.Background(), keyPath) - if err != nil { - return fmt.Errorf("while attempting to block issuance for the provided key: %s", err) - } - r.log.AuditInfo("All certificates have been successfully revoked and issuance blocked, exiting...") - return nil -} - -// getPublicKeySPKIHash returns a hash of the SubjectPublicKeyInfo for the -// provided public key. -func getPublicKeySPKIHash(pubKey crypto.PublicKey) ([]byte, error) { - rawSubjectPublicKeyInfo, err := x509.MarshalPKIXPublicKey(pubKey) - if err != nil { - return nil, err - } - spkiHash := sha256.Sum256(rawSubjectPublicKeyInfo) - return spkiHash[:], nil -} - -func main() { - usage := func() { - fmt.Fprint(os.Stderr, usageString) - os.Exit(1) - } - if len(os.Args) <= 2 { - usage() - } - - command := os.Args[1] - flagSet := flag.NewFlagSet(command, flag.ContinueOnError) - configFile := flagSet.String("config", "", "File path to the configuration file for this service") - dryRun := flagSet.Bool( - "dry-run", - true, - "true (default): only queries for affected certificates. false: will perform the requested block or revoke action", - ) - err := flagSet.Parse(os.Args[2:]) - cmd.FailOnError(err, "Error parsing flagset") - - if *configFile == "" { - usage() - } - - var c Config - err = cmd.ReadConfigFile(*configFile, &c) - cmd.FailOnError(err, "Reading JSON config file into config structure") - err = features.Set(c.Revoker.Features) - cmd.FailOnError(err, "Failed to set feature flags") - - ctx := context.Background() - r := newRevoker(c) - defer r.log.AuditPanic() - - args := flagSet.Args() - switch { - case command == "serial-revoke" && len(args) == 2: - // 1: serial, 2: reasonCode - serial := args[0] - reasonCode, err := strconv.Atoi(args[1]) - cmd.FailOnError(err, "Reason code argument must be an integer") - - err = r.revokeBySerial(ctx, serial, revocation.Reason(reasonCode), false) - cmd.FailOnError(err, "Couldn't revoke certificate by serial") - - case command == "batched-serial-revoke" && len(args) == 3: - // 1: serial file path, 2: reasonCode, 3: parallelism - serialPath := args[0] - reasonCode, err := strconv.Atoi(args[1]) - cmd.FailOnError(err, "Reason code argument must be an integer") - parallelism, err := strconv.Atoi(args[2]) - cmd.FailOnError(err, "parallelism argument must be an integer") - if parallelism < 1 { - cmd.Fail("parallelism argument must be >= 1") - } - - err = r.revokeBySerialBatch(ctx, serialPath, revocation.Reason(reasonCode), parallelism) - cmd.FailOnError(err, "Batch revocation failed") - - case command == "reg-revoke" && len(args) == 2: - // 1: registration ID, 2: reasonCode - regID, err := strconv.ParseInt(args[0], 10, 64) - cmd.FailOnError(err, "Registration ID argument must be an integer") - reasonCode, err := strconv.Atoi(args[1]) - cmd.FailOnError(err, "Reason code argument must be an integer") - - err = r.revokeByReg(ctx, regID, revocation.Reason(reasonCode)) - cmd.FailOnError(err, "Couldn't revoke certificate by registration") - - case command == "malformed-revoke" && len(args) == 3: - // 1: serial, 2: reasonCode - serial := args[0] - reasonCode, err := strconv.Atoi(args[1]) - cmd.FailOnError(err, "Reason code argument must be an integer") - - err = r.revokeMalformedBySerial(ctx, serial, revocation.Reason(reasonCode)) - cmd.FailOnError(err, "Couldn't revoke certificate by serial") - - case command == "list-reasons": - var codes revocationCodes - for k := range revocation.ReasonToString { - codes = append(codes, k) - } - sort.Sort(codes) - fmt.Printf("Revocation reason codes\n-----------------------\n\n") - for _, k := range codes { - fmt.Printf("%d: %s\n", k, revocation.ReasonToString[k]) - } - - case (command == "private-key-block" || command == "private-key-revoke") && len(args) == 1: - // 1: keyPath - keyPath := args[0] - - _, publicKey, err := privatekey.Load(keyPath) - cmd.FailOnError(err, "Failed to load the provided private key") - r.log.AuditInfo("The provided private key has been successfully verified") - - spkiHash, err := getPublicKeySPKIHash(publicKey) - cmd.FailOnError(err, "While obtaining the SPKI hash for the provided key") - - count, err := r.countCertsMatchingSPKIHash(spkiHash) - cmd.FailOnError(err, "While retrieving a count of certificates matching the provided key") - r.log.AuditInfof("Found %d certificates matching the provided key", count) - - if command == "private-key-block" { - err := privateKeyBlock(r, *dryRun, count, spkiHash, keyPath) - cmd.FailOnError(err, "") - } - - if command == "private-key-revoke" { - err := privateKeyRevoke(r, *dryRun, count, keyPath) - cmd.FailOnError(err, "") - } - - default: - usage() - } -} - -func init() { - cmd.RegisterCommand("admin-revoker", main) -} diff --git a/cmd/admin-revoker/main_test.go b/cmd/admin-revoker/main_test.go deleted file mode 100644 index e0d77dbb463..00000000000 --- a/cmd/admin-revoker/main_test.go +++ /dev/null @@ -1,556 +0,0 @@ -package notmain - -import ( - "context" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io/ioutil" - "math/big" - "net" - "os" - "testing" - "time" - - "github.com/jmhodges/clock" - akamaipb "github.com/letsencrypt/boulder/akamai/proto" - capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/core" - corepb "github.com/letsencrypt/boulder/core/proto" - "github.com/letsencrypt/boulder/goodkey" - "github.com/letsencrypt/boulder/issuance" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/mocks" - "github.com/letsencrypt/boulder/ra" - "github.com/letsencrypt/boulder/rocsp" - rocsp_config "github.com/letsencrypt/boulder/rocsp/config" - "github.com/letsencrypt/boulder/sa" - sapb "github.com/letsencrypt/boulder/sa/proto" - "github.com/letsencrypt/boulder/sa/satest" - "github.com/letsencrypt/boulder/test" - ira "github.com/letsencrypt/boulder/test/inmem/ra" - isa "github.com/letsencrypt/boulder/test/inmem/sa" - "github.com/letsencrypt/boulder/test/vars" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" -) - -type mockCA struct { - mocks.MockCA -} - -func (ca *mockCA) GenerateOCSP(context.Context, *capb.GenerateOCSPRequest, ...grpc.CallOption) (*capb.OCSPResponse, error) { - return &capb.OCSPResponse{Response: []byte("fakeocspbytes")}, nil -} - -type mockPurger struct{} - -func (mp *mockPurger) Purge(context.Context, *akamaipb.PurgeRequest, ...grpc.CallOption) (*emptypb.Empty, error) { - return &emptypb.Empty{}, nil -} - -func TestRevokeBatch(t *testing.T) { - log := blog.UseMock() - fc := clock.NewFake() - // Set to some non-zero time. - fc.Set(time.Date(2015, 3, 4, 5, 0, 0, 0, time.UTC)) - dbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{}) - if err != nil { - t.Fatalf("Failed to create dbMap: %s", err) - } - rocspIssuers, err := rocsp_config.LoadIssuers(map[string]int{ - "../../test/hierarchy/int-r3.cert.pem": 102, - }) - test.AssertNotError(t, err, "error loading issuers") - ssa, err := sa.NewSQLStorageAuthority(dbMap, dbMap, rocsp.NewMockWriteSucceedClient(), rocspIssuers, fc, log, metrics.NoopRegisterer, 1) - if err != nil { - t.Fatalf("Failed to create SA: %s", err) - } - defer test.ResetSATestDatabase(t) - reg := satest.CreateWorkingRegistration(t, isa.SA{Impl: ssa}) - - issuer, err := issuance.LoadCertificate("../../test/hierarchy/int-r3.cert.pem") - test.AssertNotError(t, err, "Failed to load test issuer") - signer, err := test.LoadSigner("../../test/hierarchy/int-r3.key.pem") - test.AssertNotError(t, err, "failed to load test signer") - - ra := ra.NewRegistrationAuthorityImpl(fc, - log, - metrics.NoopRegisterer, - 1, - goodkey.KeyPolicy{}, - 100, - true, - 300*24*time.Hour, - 7*24*time.Hour, - nil, - nil, - 0, - nil, - &mockPurger{}, - []*issuance.Certificate{issuer}, - ) - ra.SA = isa.SA{Impl: ssa} - ra.CA = &mockCA{} - rac := ira.RA{Impl: ra} - - r := revoker{ - rac: rac, - sac: isa.SA{Impl: ssa}, - dbMap: dbMap, - clk: fc, - log: log, - } - - serialFile, err := ioutil.TempFile("", "serials") - test.AssertNotError(t, err, "failed to open temp file") - defer os.Remove(serialFile.Name()) - - serials := []*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)} - for _, serial := range serials { - template := &x509.Certificate{ - SerialNumber: serial, - DNSNames: []string{"asd"}, - } - der, err := x509.CreateCertificate(rand.Reader, template, issuer.Certificate, signer.Public(), signer) - test.AssertNotError(t, err, "failed to generate test cert") - _, err = ssa.AddPrecertificate(context.Background(), &sapb.AddCertificateRequest{ - Der: der, - RegID: reg.Id, - Issued: time.Now().UnixNano(), - IssuerID: 1, - }) - test.AssertNotError(t, err, "failed to add test cert") - _, err = ssa.AddCertificate(context.Background(), &sapb.AddCertificateRequest{ - Der: der, - RegID: reg.Id, - Issued: time.Now().UnixNano(), - }) - test.AssertNotError(t, err, "failed to add test cert") - _, err = serialFile.WriteString(fmt.Sprintf("%s\n", core.SerialToString(serial))) - test.AssertNotError(t, err, "failed to write serial to temp file") - } - - err = r.revokeBySerialBatch(context.Background(), serialFile.Name(), 0, 2) - test.AssertNotError(t, err, "revokeBatch failed") - - for _, serial := range serials { - status, err := ssa.GetCertificateStatus(context.Background(), &sapb.Serial{Serial: core.SerialToString(serial)}) - test.AssertNotError(t, err, "failed to retrieve certificate status") - test.AssertEquals(t, core.OCSPStatus(status.Status), core.OCSPStatusRevoked) - } -} - -func TestBlockAndRevokeByPrivateKey(t *testing.T) { - testCtx := setup(t) - defer testCtx.cleanUp() - - // Unique keys for each of our test certificates. - testKey1, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "Failed to generate test key 1") - testKey2, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "Failed to generate test key 2") - testKey3, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "Failed to generate test key 3") - - // Write the contents of testKey1 to a temp file. - testKey1File, err := ioutil.TempFile("", "key") - test.AssertNotError(t, err, "failed to create temp file") - der, err := x509.MarshalPKCS8PrivateKey(testKey1) - test.AssertNotError(t, err, "failed to marshal testKey1 to DER") - err = pem.Encode(testKey1File, - &pem.Block{ - Type: "PRIVATE KEY", - Bytes: der, - }, - ) - test.AssertNotError(t, err, "failed to PEM encode test key 1") - test.AssertNotError(t, err, "failed to write to temp file") - defer os.Remove(testKey1File.Name()) - - // Unique JWKs so we can register each of our entries. - testJWK1 := `{"kty":"RSA","n":"yNWVhtYEKJR21y9xsHV-PD_bYwbXSeNuFal46xYxVfRL5mqha7vttvjB_vc7Xg2RvgCxHPCqoxgMPTzHrZT75LjCwIW2K_klBYN8oYvTwwmeSkAz6ut7ZxPv-nZaT5TJhGk0NT2kh_zSpdriEJ_3vW-mqxYbbBmpvHqsa1_zx9fSuHYctAZJWzxzUZXykbWMWQZpEiE0J4ajj51fInEzVn7VxV-mzfMyboQjujPh7aNJxAWSq4oQEJJDgWwSh9leyoJoPpONHxh5nEE5AjE01FkGICSxjpZsF-w8hOTI3XXohUdu29Se26k2B0PolDSuj0GIQU6-W9TdLXSjBb2SpQ","e":"AQAB"}` - testJWK2 := `{"kty":"RSA","n":"qnARLrT7Xz4gRcKyLdydmCr-ey9OuPImX4X40thk3on26FkMznR3fRjs66eLK7mmPcBZ6uOJseURU6wAaZNmemoYx1dMvqvWWIyiQleHSD7Q8vBrhR6uIoO4jAzJZR-ChzZuSDt7iHN-3xUVspu5XGwXU_MVJZshTwp4TaFx5elHIT_ObnTvTOU3Xhish07AbgZKmWsVbXh5s-CrIicU4OexJPgunWZ_YJJueOKmTvnLlTV4MzKR2oZlBKZ27S0-SfdV_QDx_ydle5oMAyKVtlAV35cyPMIsYNwgUGBCdY_2Uzi5eX0lTc7MPRwz6qR1kip-i59VcGcUQgqHV6Fyqw","e":"AQAB"}` - testJWK3 := `{"kty":"RSA","n":"uTQER6vUA1RDixS8xsfCRiKUNGRzzyIK0MhbS2biClShbb0hSx2mPP7gBvis2lizZ9r-y9hL57kNQoYCKndOBg0FYsHzrQ3O9AcoV1z2Mq-XhHZbFrVYaXI0M3oY9BJCWog0dyi3XC0x8AxC1npd1U61cToHx-3uSvgZOuQA5ffEn5L38Dz1Ti7OV3E4XahnRJvejadUmTkki7phLBUXm5MnnyFm0CPpf6ApV7zhLjN5W-nV0WL17o7v8aDgV_t9nIdi1Y26c3PlCEtiVHZcebDH5F1Deta3oLLg9-g6rWnTqPbY3knffhp4m0scLD6e33k8MtzxDX_D7vHsg0_X1w","e":"AQAB"}` - testJWK4 := `{"kty":"RSA","n":"qih-cx32M0wq8MhhN-kBi2xPE-wnw4_iIg1hWO5wtBfpt2PtWikgPuBT6jvK9oyQwAWbSfwqlVZatMPY_-3IyytMNb9R9OatNr6o5HROBoyZnDVSiC4iMRd7bRl_PWSIqj_MjhPNa9cYwBdW5iC3jM5TaOgmp0-YFm4tkLGirDcIBDkQYlnv9NKILvuwqkapZ7XBixeqdCcikUcTRXW5unqygO6bnapzw-YtPsPPlj4Ih3SvK4doyziPV96U8u5lbNYYEzYiW1mbu9n0KLvmKDikGcdOpf6-yRa_10kMZyYQatY1eclIKI0xb54kbluEl0GQDaL5FxLmiKeVnsapzw","e":"AQAB"}` - - type entry struct { - jwk string - serial *big.Int - names []string - testKey *rsa.PrivateKey - spkiHash []byte - } - - entries := []*entry{ - {jwk: testJWK1, serial: big.NewInt(1), names: []string{"example-1337.com"}, testKey: testKey1}, - {jwk: testJWK2, serial: big.NewInt(2), names: []string{"example-1338.com"}, testKey: testKey2}, - {jwk: testJWK3, serial: big.NewInt(3), names: []string{"example-1339.com"}, testKey: testKey3}, - } - - // Register and insert our first 3 certificates. - for _, entry := range entries { - regId := testCtx.addRegistation(t, entry.names, entry.jwk) - cert := testCtx.addCertificate(t, entry.serial, entry.names, entry.testKey.PublicKey, regId) - - entry.spkiHash, err = getPublicKeySPKIHash(cert.PublicKey) - test.AssertNotError(t, err, "Failed to get SPKI hash for test cert") - - count, err := testCtx.revoker.countCertsMatchingSPKIHash(entry.spkiHash) - test.AssertNotError(t, err, "countCertsMatchingSPKIHash for entry failed") - test.AssertEquals(t, count, 1) - } - - // Register and insert a certificate which re-uses the same public key as - // our first test certificate. - regId := testCtx.addRegistation(t, []string{"example-1336.com"}, testJWK4) - testCtx.addCertificate(t, big.NewInt(4), []string{"example-1336.com"}, testKey1.PublicKey, regId) - - // Get the SPKI hash for the provided keypair. - spkiHash, err := getPublicKeySPKIHash(&testKey1.PublicKey) - test.AssertNotError(t, err, "Failed to get SPKI hash for dupe.") - - // Ensure that the SPKI hash hasn't already been added to the blockedKeys - // table. - keyExists, err := testCtx.revoker.spkiHashInBlockedKeys(spkiHash) - test.AssertNotError(t, err, "countCertsMatchingSPKIHash for dupe failed") - test.Assert(t, !keyExists, "SPKI hash should not be in blockedKeys") - - // For some additional validation let's ensure that counts for all test - // entries, except our known duplicate, are 1. - for _, entry := range entries { - switch entry.names[0] { - case "example-1337.com": - count, err := testCtx.revoker.countCertsMatchingSPKIHash(entry.spkiHash) - test.AssertNotError(t, err, "countCertsMatchingSPKIHash for entry failed") - test.AssertEquals(t, count, 2) - - case "example-1338.com": - count, err := testCtx.revoker.countCertsMatchingSPKIHash(entry.spkiHash) - test.AssertNotError(t, err, "countCertsMatchingSPKIHash for entry failed") - test.AssertEquals(t, count, 1) - - case "example-1339.com": - count, err := testCtx.revoker.countCertsMatchingSPKIHash(entry.spkiHash) - test.AssertNotError(t, err, "countCertsMatchingSPKIHash for entry failed") - test.AssertEquals(t, count, 1) - } - } - - // Revoke one of our two testKey1 certificates by serial. This is to test - // that revokeByPrivateKey will continue if one of the two matching - // certificates has already been revoked. - err = testCtx.revoker.revokeBySerial(context.Background(), core.SerialToString(big.NewInt(1)), 1, true) - test.AssertNotError(t, err, "While attempting to revoke 1 of our matching certificates ahead of time") - - // Revoke the certificates, but do not block issuance. - err = testCtx.revoker.revokeByPrivateKey(context.Background(), testKey1File.Name()) - test.AssertNotError(t, err, "While attempting to revoke certificates for the provided key") - - // Ensure that the key is not blocked, yet. - keyExists, err = testCtx.revoker.spkiHashInBlockedKeys(spkiHash) - test.AssertNotError(t, err, "countCertsMatchingSPKIHash for dupe failed") - test.Assert(t, !keyExists, "SPKI hash should not be in blockedKeys") - - // Block issuance for the key. - err = testCtx.revoker.blockByPrivateKey(context.Background(), testKey1File.Name()) - test.AssertNotError(t, err, "While attempting to block issuance for the provided key") - - // Ensure that the key is now blocked. - keyExists, err = testCtx.revoker.spkiHashInBlockedKeys(spkiHash) - test.AssertNotError(t, err, "countCertsMatchingSPKIHash for dupe failed") - test.Assert(t, keyExists, "SPKI hash should not be in blockedKeys") - - // Ensure that blocking issuance is idempotent. - err = testCtx.revoker.blockByPrivateKey(context.Background(), testKey1File.Name()) - test.AssertNotError(t, err, "While attempting to block issuance for the provided key") -} - -func TestPrivateKeyBlock(t *testing.T) { - testCtx := setup(t) - defer testCtx.cleanUp() - - // Unique keys for each of our test certificates. - testKey1, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "Failed to generate test key 1") - testKey2, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "Failed to generate test key 2") - testKey3, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "Failed to generate test key 3") - - // Write the contents of testKey1 to a temp file. - testKey1File, err := ioutil.TempFile("", "key") - test.AssertNotError(t, err, "failed to create temp file") - der, err := x509.MarshalPKCS8PrivateKey(testKey1) - test.AssertNotError(t, err, "failed to marshal testKey1 to DER") - err = pem.Encode(testKey1File, - &pem.Block{ - Type: "PRIVATE KEY", - Bytes: der, - }, - ) - test.AssertNotError(t, err, "failed to PEM encode test key 1") - test.AssertNotError(t, err, "failed to write to temp file") - defer os.Remove(testKey1File.Name()) - - // Unique JWKs so we can register each of our entries. - testJWK1 := `{"kty":"RSA","n":"yNWVhtYEKJR21y9xsHV-PD_bYwbXSeNuFal46xYxVfRL5mqha7vttvjB_vc7Xg2RvgCxHPCqoxgMPTzHrZT75LjCwIW2K_klBYN8oYvTwwmeSkAz6ut7ZxPv-nZaT5TJhGk0NT2kh_zSpdriEJ_3vW-mqxYbbBmpvHqsa1_zx9fSuHYctAZJWzxzUZXykbWMWQZpEiE0J4ajj51fInEzVn7VxV-mzfMyboQjujPh7aNJxAWSq4oQEJJDgWwSh9leyoJoPpONHxh5nEE5AjE01FkGICSxjpZsF-w8hOTI3XXohUdu29Se26k2B0PolDSuj0GIQU6-W9TdLXSjBb2SpQ","e":"AQAB"}` - testJWK2 := `{"kty":"RSA","n":"qnARLrT7Xz4gRcKyLdydmCr-ey9OuPImX4X40thk3on26FkMznR3fRjs66eLK7mmPcBZ6uOJseURU6wAaZNmemoYx1dMvqvWWIyiQleHSD7Q8vBrhR6uIoO4jAzJZR-ChzZuSDt7iHN-3xUVspu5XGwXU_MVJZshTwp4TaFx5elHIT_ObnTvTOU3Xhish07AbgZKmWsVbXh5s-CrIicU4OexJPgunWZ_YJJueOKmTvnLlTV4MzKR2oZlBKZ27S0-SfdV_QDx_ydle5oMAyKVtlAV35cyPMIsYNwgUGBCdY_2Uzi5eX0lTc7MPRwz6qR1kip-i59VcGcUQgqHV6Fyqw","e":"AQAB"}` - testJWK3 := `{"kty":"RSA","n":"uTQER6vUA1RDixS8xsfCRiKUNGRzzyIK0MhbS2biClShbb0hSx2mPP7gBvis2lizZ9r-y9hL57kNQoYCKndOBg0FYsHzrQ3O9AcoV1z2Mq-XhHZbFrVYaXI0M3oY9BJCWog0dyi3XC0x8AxC1npd1U61cToHx-3uSvgZOuQA5ffEn5L38Dz1Ti7OV3E4XahnRJvejadUmTkki7phLBUXm5MnnyFm0CPpf6ApV7zhLjN5W-nV0WL17o7v8aDgV_t9nIdi1Y26c3PlCEtiVHZcebDH5F1Deta3oLLg9-g6rWnTqPbY3knffhp4m0scLD6e33k8MtzxDX_D7vHsg0_X1w","e":"AQAB"}` - testJWK4 := `{"kty":"RSA","n":"qih-cx32M0wq8MhhN-kBi2xPE-wnw4_iIg1hWO5wtBfpt2PtWikgPuBT6jvK9oyQwAWbSfwqlVZatMPY_-3IyytMNb9R9OatNr6o5HROBoyZnDVSiC4iMRd7bRl_PWSIqj_MjhPNa9cYwBdW5iC3jM5TaOgmp0-YFm4tkLGirDcIBDkQYlnv9NKILvuwqkapZ7XBixeqdCcikUcTRXW5unqygO6bnapzw-YtPsPPlj4Ih3SvK4doyziPV96U8u5lbNYYEzYiW1mbu9n0KLvmKDikGcdOpf6-yRa_10kMZyYQatY1eclIKI0xb54kbluEl0GQDaL5FxLmiKeVnsapzw","e":"AQAB"}` - - type entry struct { - jwk string - serial *big.Int - names []string - testKey *rsa.PrivateKey - } - - entries := []*entry{ - {jwk: testJWK1, serial: big.NewInt(1), names: []string{"example-1337.com"}, testKey: testKey1}, - {jwk: testJWK2, serial: big.NewInt(2), names: []string{"example-1338.com"}, testKey: testKey2}, - {jwk: testJWK3, serial: big.NewInt(3), names: []string{"example-1339.com"}, testKey: testKey3}, - } - - // Register and insert our first 3 certificates. - for _, entry := range entries { - regId := testCtx.addRegistation(t, entry.names, entry.jwk) - testCtx.addCertificate(t, entry.serial, entry.names, entry.testKey.PublicKey, regId) - } - - // Register and insert a certificate which re-uses the same public key as - // our first test certificate. - regId := testCtx.addRegistation(t, []string{"example-1336.com"}, testJWK4) - testCtx.addCertificate(t, big.NewInt(4), []string{"example-1336.com"}, testKey1.PublicKey, regId) - - // Get the SPKI hash for the provided keypair. - spkiHash1, err := getPublicKeySPKIHash(&testKey1.PublicKey) - test.AssertNotError(t, err, "Failed to get SPKI hash for dupe.") - - // Query the 'keyHashToSerial' table for certificates with a matching SPKI - // hash. We expect that since this key was re-used we'll find 2 matches. - count, err := testCtx.revoker.countCertsMatchingSPKIHash(spkiHash1) - test.AssertNotError(t, err, "countCertsMatchingSPKIHash for dupe failed") - test.AssertEquals(t, count, 2) - - // With dryRun=true this should not block the key. - err = privateKeyBlock(&testCtx.revoker, true, count, spkiHash1, testKey1File.Name()) - test.AssertNotError(t, err, "While attempting to block issuance for the provided key") - - // Ensure that the key is not blocked, yet. - keyExists, err := testCtx.revoker.spkiHashInBlockedKeys(spkiHash1) - test.AssertNotError(t, err, "countCertsMatchingSPKIHash for dupe failed") - test.Assert(t, !keyExists, "SPKI hash should not be in blockedKeys") - - // With dryRun=false this should block the key. - err = privateKeyBlock(&testCtx.revoker, false, count, spkiHash1, testKey1File.Name()) - test.AssertNotError(t, err, "While attempting to block issuance for the provided key") - - // With dryRun=false this should result in an error as the key is already blocked. - err = privateKeyBlock(&testCtx.revoker, false, count, spkiHash1, testKey1File.Name()) - test.AssertError(t, err, "Attempting to block a key which is already blocked should have failed.") - - // Ensure that the key is now blocked. - keyExists, err = testCtx.revoker.spkiHashInBlockedKeys(spkiHash1) - test.AssertNotError(t, err, "countCertsMatchingSPKIHash for dupe failed") - test.Assert(t, keyExists, "SPKI hash should not be in blockedKeys") -} - -func TestPrivateKeyRevoke(t *testing.T) { - testCtx := setup(t) - defer testCtx.cleanUp() - - // Unique keys for each of our test certificates. - testKey1, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "Failed to generate test key 1") - testKey2, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "Failed to generate test key 2") - testKey3, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "Failed to generate test key 3") - - // Write the contents of testKey1 to a temp file. - testKey1File, err := ioutil.TempFile("", "key") - test.AssertNotError(t, err, "failed to create temp file") - der, err := x509.MarshalPKCS8PrivateKey(testKey1) - test.AssertNotError(t, err, "failed to marshal testKey1 to DER") - err = pem.Encode(testKey1File, - &pem.Block{ - Type: "PRIVATE KEY", - Bytes: der, - }, - ) - test.AssertNotError(t, err, "failed to PEM encode test key 1") - test.AssertNotError(t, err, "failed to write to temp file") - defer os.Remove(testKey1File.Name()) - - // Unique JWKs so we can register each of our entries. - testJWK1 := `{"kty":"RSA","n":"yNWVhtYEKJR21y9xsHV-PD_bYwbXSeNuFal46xYxVfRL5mqha7vttvjB_vc7Xg2RvgCxHPCqoxgMPTzHrZT75LjCwIW2K_klBYN8oYvTwwmeSkAz6ut7ZxPv-nZaT5TJhGk0NT2kh_zSpdriEJ_3vW-mqxYbbBmpvHqsa1_zx9fSuHYctAZJWzxzUZXykbWMWQZpEiE0J4ajj51fInEzVn7VxV-mzfMyboQjujPh7aNJxAWSq4oQEJJDgWwSh9leyoJoPpONHxh5nEE5AjE01FkGICSxjpZsF-w8hOTI3XXohUdu29Se26k2B0PolDSuj0GIQU6-W9TdLXSjBb2SpQ","e":"AQAB"}` - testJWK2 := `{"kty":"RSA","n":"qnARLrT7Xz4gRcKyLdydmCr-ey9OuPImX4X40thk3on26FkMznR3fRjs66eLK7mmPcBZ6uOJseURU6wAaZNmemoYx1dMvqvWWIyiQleHSD7Q8vBrhR6uIoO4jAzJZR-ChzZuSDt7iHN-3xUVspu5XGwXU_MVJZshTwp4TaFx5elHIT_ObnTvTOU3Xhish07AbgZKmWsVbXh5s-CrIicU4OexJPgunWZ_YJJueOKmTvnLlTV4MzKR2oZlBKZ27S0-SfdV_QDx_ydle5oMAyKVtlAV35cyPMIsYNwgUGBCdY_2Uzi5eX0lTc7MPRwz6qR1kip-i59VcGcUQgqHV6Fyqw","e":"AQAB"}` - testJWK3 := `{"kty":"RSA","n":"uTQER6vUA1RDixS8xsfCRiKUNGRzzyIK0MhbS2biClShbb0hSx2mPP7gBvis2lizZ9r-y9hL57kNQoYCKndOBg0FYsHzrQ3O9AcoV1z2Mq-XhHZbFrVYaXI0M3oY9BJCWog0dyi3XC0x8AxC1npd1U61cToHx-3uSvgZOuQA5ffEn5L38Dz1Ti7OV3E4XahnRJvejadUmTkki7phLBUXm5MnnyFm0CPpf6ApV7zhLjN5W-nV0WL17o7v8aDgV_t9nIdi1Y26c3PlCEtiVHZcebDH5F1Deta3oLLg9-g6rWnTqPbY3knffhp4m0scLD6e33k8MtzxDX_D7vHsg0_X1w","e":"AQAB"}` - testJWK4 := `{"kty":"RSA","n":"qih-cx32M0wq8MhhN-kBi2xPE-wnw4_iIg1hWO5wtBfpt2PtWikgPuBT6jvK9oyQwAWbSfwqlVZatMPY_-3IyytMNb9R9OatNr6o5HROBoyZnDVSiC4iMRd7bRl_PWSIqj_MjhPNa9cYwBdW5iC3jM5TaOgmp0-YFm4tkLGirDcIBDkQYlnv9NKILvuwqkapZ7XBixeqdCcikUcTRXW5unqygO6bnapzw-YtPsPPlj4Ih3SvK4doyziPV96U8u5lbNYYEzYiW1mbu9n0KLvmKDikGcdOpf6-yRa_10kMZyYQatY1eclIKI0xb54kbluEl0GQDaL5FxLmiKeVnsapzw","e":"AQAB"}` - - type entry struct { - jwk string - serial *big.Int - names []string - testKey *rsa.PrivateKey - } - - entries := []*entry{ - {jwk: testJWK1, serial: big.NewInt(1), names: []string{"example-1337.com"}, testKey: testKey1}, - {jwk: testJWK2, serial: big.NewInt(2), names: []string{"example-1338.com"}, testKey: testKey2}, - {jwk: testJWK3, serial: big.NewInt(3), names: []string{"example-1339.com"}, testKey: testKey3}, - } - - // Register and insert our first 3 certificates. - for _, entry := range entries { - regId := testCtx.addRegistation(t, entry.names, entry.jwk) - testCtx.addCertificate(t, entry.serial, entry.names, entry.testKey.PublicKey, regId) - } - - // Register and insert a certificate which re-uses the same public key as - // our first test certificate. - regId := testCtx.addRegistation(t, []string{"example-1336.com"}, testJWK4) - testCtx.addCertificate(t, big.NewInt(4), []string{"example-1336.com"}, testKey1.PublicKey, regId) - - // Get the SPKI hash for the provided keypair. - spkiHash1, err := getPublicKeySPKIHash(&testKey1.PublicKey) - test.AssertNotError(t, err, "Failed to get SPKI hash for dupe.") - - // Query the 'keyHashToSerial' table for certificates with a matching SPKI - // hash. We expect that since this key was re-used we'll find 2 matches. - count, err := testCtx.revoker.countCertsMatchingSPKIHash(spkiHash1) - test.AssertNotError(t, err, "countCertsMatchingSPKIHash for dupe failed") - test.AssertEquals(t, count, 2) - - // With dryRun=true this should not revoke certificates or block issuance. - err = privateKeyRevoke(&testCtx.revoker, true, count, testKey1File.Name()) - test.AssertNotError(t, err, "While attempting to block issuance for the provided key") - - // Ensure that the key is not blocked, yet. - keyExists, err := testCtx.revoker.spkiHashInBlockedKeys(spkiHash1) - test.AssertNotError(t, err, "spkiHashInBlockedKeys failed for key that shouldn't be blocked yet") - test.Assert(t, !keyExists, "SPKI hash should not be in blockedKeys") - - // With dryRun=false this should revoke matching certificates and block the key. - err = privateKeyRevoke(&testCtx.revoker, false, count, testKey1File.Name()) - test.AssertNotError(t, err, "While attempting to block issuance for the provided key") - - // Ensure that the key is now blocked. - keyExists, err = testCtx.revoker.spkiHashInBlockedKeys(spkiHash1) - test.AssertNotError(t, err, "spkiHashInBlockedKeys failed for key that should now be blocked") - test.Assert(t, keyExists, "SPKI hash should not be in blockedKeys") -} - -type testCtx struct { - revoker revoker - ssa sapb.StorageAuthorityClient - cleanUp func() - issuer *issuance.Certificate - signer crypto.Signer -} - -func (c testCtx) addRegistation(t *testing.T, names []string, jwk string) int64 { - initialIP, err := net.ParseIP("127.0.0.1").MarshalText() - test.AssertNotError(t, err, "Failed to create initialIP") - - reg := &corepb.Registration{ - Id: 1, - Contact: []string{fmt.Sprintf("hello@%s", names[0])}, - Key: []byte(jwk), - InitialIP: initialIP, - } - - reg, err = c.ssa.NewRegistration(context.Background(), reg) - test.AssertNotError(t, err, "Failed to store test registration") - return reg.Id -} - -func (c testCtx) addCertificate(t *testing.T, serial *big.Int, names []string, pubKey rsa.PublicKey, regId int64) *x509.Certificate { - template := &x509.Certificate{ - SerialNumber: serial, - Subject: pkix.Name{Organization: []string{"tests"}}, - NotBefore: time.Now(), - NotAfter: time.Now().AddDate(0, 0, 1), - DNSNames: names, - } - - rawCert, err := x509.CreateCertificate(rand.Reader, template, c.issuer.Certificate, &pubKey, c.signer) - test.AssertNotError(t, err, "Failed to generate test cert") - - _, err = c.ssa.AddPrecertificate( - context.Background(), &sapb.AddCertificateRequest{ - Der: rawCert, - RegID: regId, - Issued: time.Now().UnixNano(), - IssuerID: 1, - }, - ) - test.AssertNotError(t, err, "Failed to add test precert") - - cert, err := x509.ParseCertificate(rawCert) - test.AssertNotError(t, err, "Failed to parse test cert") - return cert -} - -func setup(t *testing.T) testCtx { - log := blog.UseMock() - fc := clock.NewFake() - - // Set some non-zero time for GRPC requests to be non-nil. - fc.Set(time.Now()) - - dbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{}) - if err != nil { - t.Fatalf("Failed to create dbMap: %s", err) - } - rocspIssuers, err := rocsp_config.LoadIssuers(map[string]int{ - "../../test/hierarchy/int-r3.cert.pem": 102, - }) - test.AssertNotError(t, err, "error loading issuers") - ssa, err := sa.NewSQLStorageAuthority(dbMap, dbMap, rocsp.NewMockWriteSucceedClient(), rocspIssuers, fc, log, metrics.NoopRegisterer, 1) - if err != nil { - t.Fatalf("Failed to create SA: %s", err) - } - cleanUp := test.ResetSATestDatabase(t) - - issuer, err := issuance.LoadCertificate("../../test/hierarchy/int-r3.cert.pem") - test.AssertNotError(t, err, "Failed to load test issuer") - - signer, err := test.LoadSigner("../../test/hierarchy/int-r3.key.pem") - test.AssertNotError(t, err, "Failed to load test signer") - - ra := ra.NewRegistrationAuthorityImpl( - fc, - log, - metrics.NoopRegisterer, - 1, - goodkey.KeyPolicy{}, - 100, - true, - 300*24*time.Hour, - 7*24*time.Hour, - nil, - nil, - 0, - nil, - &mockPurger{}, - []*issuance.Certificate{issuer}, - ) - ra.SA = isa.SA{Impl: ssa} - ra.CA = &mockCA{} - rac := ira.RA{Impl: ra} - - return testCtx{ - revoker: revoker{rac, isa.SA{Impl: ssa}, dbMap, fc, log}, - ssa: isa.SA{Impl: ssa}, - cleanUp: cleanUp, - issuer: issuer, - signer: signer, - } -} diff --git a/cmd/admin/admin.go b/cmd/admin/admin.go new file mode 100644 index 00000000000..8816546537d --- /dev/null +++ b/cmd/admin/admin.go @@ -0,0 +1,117 @@ +package main + +import ( + "context" + "errors" + "fmt" + + "github.com/jmhodges/clock" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/features" + bgrpc "github.com/letsencrypt/boulder/grpc" + blog "github.com/letsencrypt/boulder/log" + rapb "github.com/letsencrypt/boulder/ra/proto" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +// admin holds all of the external connections necessary to perform admin +// actions on a boulder deployment. +type admin struct { + rac adminRAClient + sac adminSAClient + saroc sapb.StorageAuthorityReadOnlyClient + + clk clock.Clock + log blog.Logger +} + +// adminRAClient defines the subset of RA methods that the admin tool relies on. +type adminRAClient interface { + AdministrativelyRevokeCertificate(context.Context, *rapb.AdministrativelyRevokeCertificateRequest, ...grpc.CallOption) (*emptypb.Empty, error) +} + +// adminSAClient defines the subset of SA methods that the admin tool relies on. +type adminSAClient interface { + AddBlockedKey(context.Context, *sapb.AddBlockedKeyRequest, ...grpc.CallOption) (*emptypb.Empty, error) + AddRateLimitOverride(context.Context, *sapb.AddRateLimitOverrideRequest, ...grpc.CallOption) (*sapb.AddRateLimitOverrideResponse, error) + DisableRateLimitOverride(context.Context, *sapb.DisableRateLimitOverrideRequest, ...grpc.CallOption) (*emptypb.Empty, error) + EnableRateLimitOverride(context.Context, *sapb.EnableRateLimitOverrideRequest, ...grpc.CallOption) (*emptypb.Empty, error) + PauseIdentifiers(context.Context, *sapb.PauseRequest, ...grpc.CallOption) (*sapb.PauseIdentifiersResponse, error) + UnpauseAccount(context.Context, *sapb.RegistrationID, ...grpc.CallOption) (*sapb.Count, error) +} + +// newAdmin constructs a new admin object on the heap and returns a pointer to +// it. +func newAdmin(configFile string, dryRun bool) (*admin, error) { + // Unlike most boulder service constructors, this does all of its own config + // parsing and dependency setup. If this is broken out into its own package + // (outside the //cmd/ directory) those pieces of setup should stay behind + // in //cmd/admin/main.go, to match other boulder services. + var c Config + err := cmd.ReadConfigFile(configFile, &c) + if err != nil { + return nil, fmt.Errorf("parsing config file: %w", err) + } + + scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, "") + defer oTelShutdown(context.Background()) + cmd.LogStartup(logger) + + clk := clock.New() + features.Set(c.Admin.Features) + + tlsConfig, err := c.Admin.TLS.Load(scope) + if err != nil { + return nil, fmt.Errorf("loading TLS config: %w", err) + } + + var rac adminRAClient = dryRunRAC{log: logger} + if !dryRun { + raConn, err := bgrpc.ClientSetup(c.Admin.RAService, tlsConfig, scope, clk) + if err != nil { + return nil, fmt.Errorf("creating RA gRPC client: %w", err) + } + rac = rapb.NewRegistrationAuthorityClient(raConn) + } + + saConn, err := bgrpc.ClientSetup(c.Admin.SAService, tlsConfig, scope, clk) + if err != nil { + return nil, fmt.Errorf("creating SA gRPC client: %w", err) + } + saroc := sapb.NewStorageAuthorityReadOnlyClient(saConn) + + var sac adminSAClient = dryRunSAC{log: logger} + if !dryRun { + sac = sapb.NewStorageAuthorityClient(saConn) + } + + return &admin{ + rac: rac, + sac: sac, + saroc: saroc, + clk: clk, + log: logger, + }, nil +} + +// findActiveInputMethodFlag returns a single key from setInputs with a value of `true`, +// if exactly one exists. Otherwise it returns an error. +func findActiveInputMethodFlag(setInputs map[string]bool) (string, error) { + var activeFlags []string + for flag, isSet := range setInputs { + if isSet { + activeFlags = append(activeFlags, flag) + } + } + + if len(activeFlags) == 0 { + return "", errors.New("at least one input method flag must be specified") + } else if len(activeFlags) > 1 { + return "", fmt.Errorf("more than one input method flag specified: %v", activeFlags) + } + + return activeFlags[0], nil +} diff --git a/cmd/admin/admin_test.go b/cmd/admin/admin_test.go new file mode 100644 index 00000000000..1e0ba3d2e3c --- /dev/null +++ b/cmd/admin/admin_test.go @@ -0,0 +1,59 @@ +package main + +import ( + "testing" + + "github.com/letsencrypt/boulder/test" +) + +func Test_findActiveInputMethodFlag(t *testing.T) { + tests := []struct { + name string + setInputs map[string]bool + expected string + wantErr bool + }{ + { + name: "No active flags", + setInputs: map[string]bool{ + "-private-key": false, + "-spki-file": false, + "-cert-file": false, + }, + expected: "", + wantErr: true, + }, + { + name: "Multiple active flags", + setInputs: map[string]bool{ + "-private-key": true, + "-spki-file": true, + "-cert-file": false, + }, + expected: "", + wantErr: true, + }, + { + name: "Single active flag", + setInputs: map[string]bool{ + "-private-key": true, + "-spki-file": false, + "-cert-file": false, + }, + expected: "-private-key", + wantErr: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result, err := findActiveInputMethodFlag(tc.setInputs) + if tc.wantErr { + test.AssertError(t, err, "findActiveInputMethodFlag() should have errored") + } else { + test.AssertNotError(t, err, "findActiveInputMethodFlag() should not have errored") + test.AssertEquals(t, result, tc.expected) + } + }) + } +} diff --git a/cmd/admin/cert.go b/cmd/admin/cert.go new file mode 100644 index 00000000000..607a5c5e438 --- /dev/null +++ b/cmd/admin/cert.go @@ -0,0 +1,355 @@ +package main + +import ( + "bufio" + "context" + "errors" + "flag" + "fmt" + "io" + "os" + "os/user" + "strings" + "sync" + "sync/atomic" + "unicode" + + core "github.com/letsencrypt/boulder/core" + berrors "github.com/letsencrypt/boulder/errors" + rapb "github.com/letsencrypt/boulder/ra/proto" + "github.com/letsencrypt/boulder/revocation" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +// subcommandRevokeCert encapsulates the "admin revoke-cert" command. It accepts +// many flags specifying different ways a to-be-revoked certificate can be +// identified. It then gathers the serial numbers of all identified certs, spins +// up a worker pool, and revokes all of those serials individually. +// +// Note that some batch methods (such as -incident-table and -serials-file) can +// result in high memory usage, as this subcommand will gather every serial in +// memory before beginning to revoke any of them. This trades local memory usage +// for shorter database and gRPC query times, so that we don't need massive +// timeouts when collecting serials to revoke. +type subcommandRevokeCert struct { + parallelism uint + reasonStr string + skipBlock bool + malformed bool + serial string + incidentTable string + serialsFile string + privKey string + regID int64 + certFile string + crlShard int64 +} + +var _ subcommand = (*subcommandRevokeCert)(nil) + +func (s *subcommandRevokeCert) Desc() string { + return "Revoke one or more certificates" +} + +func (s *subcommandRevokeCert) Flags(flag *flag.FlagSet) { + // General flags relevant to all certificate input methods. + flag.UintVar(&s.parallelism, "parallelism", 10, "Number of concurrent workers to use while revoking certs") + flag.StringVar(&s.reasonStr, "reason", "unspecified", "Revocation reason (unspecified, keyCompromise, superseded, cessationOfOperation, or privilegeWithdrawn)") + flag.BoolVar(&s.skipBlock, "skip-block-key", false, "Skip blocking the key, if revoked for keyCompromise - use with extreme caution") + flag.BoolVar(&s.malformed, "malformed", false, "Indicates that the cert cannot be parsed - use with caution") + flag.Int64Var(&s.crlShard, "crl-shard", 0, "For malformed certs, the CRL shard the certificate belongs to") + + // Flags specifying the input method for the certificates to be revoked. + flag.StringVar(&s.serial, "serial", "", "Revoke the certificate with this hex serial") + flag.StringVar(&s.incidentTable, "incident-table", "", "Revoke all certificates whose serials are in this table") + flag.StringVar(&s.serialsFile, "serials-file", "", "Revoke all certificates whose hex serials are in this file") + flag.StringVar(&s.privKey, "private-key", "", "Revoke all certificates whose pubkey matches this private key") + flag.Int64Var(&s.regID, "reg-id", 0, "Revoke all certificates issued to this account") + flag.StringVar(&s.certFile, "cert-file", "", "Revoke the single PEM-formatted certificate in this file") +} + +func (s *subcommandRevokeCert) Run(ctx context.Context, a *admin) error { + if s.parallelism == 0 { + // Why did they override it to 0, instead of just leaving it the default? + return fmt.Errorf("got unacceptable parallelism %d", s.parallelism) + } + + reasonCode, err := revocation.StringToReason(s.reasonStr) + if err != nil { + return fmt.Errorf("looking up revocation reason: %w", err) + } + + if s.skipBlock && reasonCode == revocation.KeyCompromise { + // We would only add the SPKI hash of the pubkey to the blockedKeys table if + // the revocation reason is keyCompromise. + return errors.New("-skip-block-key only makes sense with -reason=1") + } + + if s.malformed && reasonCode == revocation.KeyCompromise { + // This is because we can't extract and block the pubkey if we can't + // parse the certificate. + return errors.New("cannot revoke malformed certs for reason keyCompromise") + } + + // This is a map of all input-selection flags to whether or not they were set + // to a non-default value. We use this to ensure that exactly one input + // selection flag was given on the command line. + setInputs := map[string]bool{ + "-serial": s.serial != "", + "-incident-table": s.incidentTable != "", + "-serials-file": s.serialsFile != "", + "-private-key": s.privKey != "", + "-reg-id": s.regID != 0, + "-cert-file": s.certFile != "", + } + activeFlag, err := findActiveInputMethodFlag(setInputs) + if err != nil { + return err + } + + var serials []string + switch activeFlag { + case "-serial": + serials, err = []string{s.serial}, nil + case "-incident-table": + serials, err = a.serialsFromIncidentTable(ctx, s.incidentTable) + case "-serials-file": + serials, err = a.serialsFromFile(ctx, s.serialsFile) + case "-private-key": + serials, err = a.serialsFromPrivateKeys(ctx, s.privKey) + case "-reg-id": + serials, err = a.serialsFromRegID(ctx, s.regID) + case "-cert-file": + serials, err = a.serialsFromCertPEM(ctx, s.certFile) + default: + return errors.New("no recognized input method flag set (this shouldn't happen)") + } + if err != nil { + return fmt.Errorf("collecting serials to revoke: %w", err) + } + + serials, err = cleanSerials(serials) + if err != nil { + return err + } + + if len(serials) == 0 { + return errors.New("no serials to revoke found") + } + + a.log.Infof("Found %d certificates to revoke", len(serials)) + + if s.malformed { + return s.revokeMalformed(ctx, a, serials, reasonCode) + } + + err = a.revokeSerials(ctx, serials, reasonCode, s.skipBlock, s.parallelism) + if err != nil { + return fmt.Errorf("revoking serials: %w", err) + } + + return nil +} + +func (s *subcommandRevokeCert) revokeMalformed(ctx context.Context, a *admin, serials []string, reasonCode revocation.Reason) error { + u, err := user.Current() + if err != nil { + return fmt.Errorf("getting admin username: %w", err) + } + if s.crlShard == 0 { + return errors.New("when revoking malformed certificates, a nonzero CRL shard must be specified") + } + if len(serials) > 1 { + return errors.New("when revoking malformed certificates, only one cert at a time is allowed") + } + _, err = a.rac.AdministrativelyRevokeCertificate( + ctx, + &rapb.AdministrativelyRevokeCertificateRequest{ + Serial: serials[0], + Code: int64(reasonCode), + AdminName: u.Username, + SkipBlockKey: s.skipBlock, + Malformed: true, + CrlShard: s.crlShard, + }, + ) + return err +} + +func (a *admin) serialsFromIncidentTable(ctx context.Context, tableName string) ([]string, error) { + stream, err := a.saroc.SerialsForIncident(ctx, &sapb.SerialsForIncidentRequest{IncidentTable: tableName}) + if err != nil { + return nil, fmt.Errorf("setting up stream of serials from incident table %q: %s", tableName, err) + } + + var serials []string + for { + is, err := stream.Recv() + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("streaming serials from incident table %q: %s", tableName, err) + } + serials = append(serials, is.Serial) + } + + return serials, nil +} + +func (a *admin) serialsFromFile(_ context.Context, filePath string) ([]string, error) { + file, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("opening serials file: %w", err) + } + + var serials []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + serial := scanner.Text() + if serial == "" { + continue + } + serials = append(serials, serial) + } + + return serials, nil +} + +func (a *admin) serialsFromPrivateKeys(ctx context.Context, privkeyFile string) ([]string, error) { + spkiHashes, err := a.spkiHashesFromPrivateKeys(privkeyFile) + if err != nil { + return nil, err + } + + var serials []string + + for _, spkiHash := range spkiHashes { + stream, err := a.saroc.GetSerialsByKey(ctx, &sapb.SPKIHash{KeyHash: spkiHash}) + if err != nil { + return nil, fmt.Errorf("setting up stream of serials from SA: %s", err) + } + + for { + serial, err := stream.Recv() + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("streaming serials from SA: %s", err) + } + serials = append(serials, serial.Serial) + } + } + + return serials, nil +} + +func (a *admin) serialsFromRegID(ctx context.Context, regID int64) ([]string, error) { + _, err := a.saroc.GetRegistration(ctx, &sapb.RegistrationID{Id: regID}) + if err != nil { + return nil, fmt.Errorf("couldn't confirm regID exists: %w", err) + } + + stream, err := a.saroc.GetSerialsByAccount(ctx, &sapb.RegistrationID{Id: regID}) + if err != nil { + return nil, fmt.Errorf("setting up stream of serials from SA: %s", err) + } + + var serials []string + for { + serial, err := stream.Recv() + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("streaming serials from SA: %s", err) + } + serials = append(serials, serial.Serial) + } + + return serials, nil +} + +func (a *admin) serialsFromCertPEM(_ context.Context, filename string) ([]string, error) { + cert, err := core.LoadCert(filename) + if err != nil { + return nil, fmt.Errorf("loading certificate pem: %w", err) + } + + return []string{core.SerialToString(cert.SerialNumber)}, nil +} + +// cleanSerials removes non-alphanumeric characters from the serials and checks +// that all resulting serials are valid (hex encoded, and the correct length). +func cleanSerials(serials []string) ([]string, error) { + serialStrip := func(r rune) rune { + switch { + case unicode.IsLetter(r): + return r + case unicode.IsDigit(r): + return r + } + return rune(-1) + } + + var ret []string + for _, s := range serials { + cleaned := strings.Map(serialStrip, s) + if !core.ValidSerial(cleaned) { + return nil, fmt.Errorf("cleaned serial %q is not valid", cleaned) + } + ret = append(ret, cleaned) + } + return ret, nil +} + +func (a *admin) revokeSerials(ctx context.Context, serials []string, reason revocation.Reason, skipBlockKey bool, parallelism uint) error { + u, err := user.Current() + if err != nil { + return fmt.Errorf("getting admin username: %w", err) + } + + var errCount atomic.Uint64 + wg := new(sync.WaitGroup) + work := make(chan string, parallelism) + for range parallelism { + wg.Go(func() { + for serial := range work { + _, err := a.rac.AdministrativelyRevokeCertificate( + ctx, + &rapb.AdministrativelyRevokeCertificateRequest{ + Serial: serial, + Code: int64(reason), + AdminName: u.Username, + SkipBlockKey: skipBlockKey, + // This is a well-formed certificate so send CrlShard 0 + // to let the RA figure out the right shard from the cert. + Malformed: false, + CrlShard: 0, + }, + ) + if err != nil { + errCount.Add(1) + if errors.Is(err, berrors.AlreadyRevoked) { + a.log.Warningf("not revoking %q: already revoked", serial) + } else { + a.log.Errf("failed to revoke %q: %s", serial, err) + } + } + } + }) + } + + for _, serial := range serials { + work <- serial + } + close(work) + wg.Wait() + + if errCount.Load() > 0 { + return fmt.Errorf("encountered %d errors while revoking certs; see logs above for details", errCount.Load()) + } + + return nil +} diff --git a/cmd/admin/cert_test.go b/cmd/admin/cert_test.go new file mode 100644 index 00000000000..7a42898703d --- /dev/null +++ b/cmd/admin/cert_test.go @@ -0,0 +1,329 @@ +package main + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "errors" + "os" + "path" + "reflect" + "slices" + "strings" + "sync" + "testing" + "time" + + "github.com/jmhodges/clock" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/letsencrypt/boulder/core" + corepb "github.com/letsencrypt/boulder/core/proto" + berrors "github.com/letsencrypt/boulder/errors" + blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/mocks" + rapb "github.com/letsencrypt/boulder/ra/proto" + "github.com/letsencrypt/boulder/revocation" + sapb "github.com/letsencrypt/boulder/sa/proto" + "github.com/letsencrypt/boulder/test" +) + +// mockSAWithIncident is a mock which only implements the SerialsForIncident +// gRPC method. It can be initialized with a set of serials for that method +// to return. +type mockSAWithIncident struct { + sapb.StorageAuthorityReadOnlyClient + incidentSerials []string +} + +// SerialsForIncident returns a fake gRPC stream client object which itself +// will return the mockSAWithIncident's serials in order. +func (msa *mockSAWithIncident) SerialsForIncident(_ context.Context, _ *sapb.SerialsForIncidentRequest, _ ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.IncidentSerial], error) { + fakeResults := make([]*sapb.IncidentSerial, len(msa.incidentSerials)) + for i, serial := range msa.incidentSerials { + fakeResults[i] = &sapb.IncidentSerial{Serial: serial} + } + return &mocks.ServerStreamClient[sapb.IncidentSerial]{Results: fakeResults}, nil +} + +func TestSerialsFromIncidentTable(t *testing.T) { + t.Parallel() + serials := []string{"foo", "bar", "baz"} + + a := admin{ + saroc: &mockSAWithIncident{incidentSerials: serials}, + } + + res, err := a.serialsFromIncidentTable(context.Background(), "tablename") + test.AssertNotError(t, err, "getting serials from mock SA") + test.AssertDeepEquals(t, res, serials) +} + +func TestSerialsFromFile(t *testing.T) { + t.Parallel() + serials := []string{"foo", "bar", "baz"} + + serialsFile := path.Join(t.TempDir(), "serials.txt") + err := os.WriteFile(serialsFile, []byte(strings.Join(serials, "\n")), os.ModeAppend) + test.AssertNotError(t, err, "writing temp serials file") + + a := admin{} + + res, err := a.serialsFromFile(context.Background(), serialsFile) + test.AssertNotError(t, err, "getting serials from file") + test.AssertDeepEquals(t, res, serials) +} + +// mockSAWithKey is a mock which only implements the GetSerialsByKey +// gRPC method. It can be initialized with a set of serials for that method +// to return. +type mockSAWithKey struct { + sapb.StorageAuthorityReadOnlyClient + keyHash []byte + serials []string +} + +// GetSerialsByKey returns a fake gRPC stream client object which itself +// will return the mockSAWithKey's serials in order. +func (msa *mockSAWithKey) GetSerialsByKey(_ context.Context, req *sapb.SPKIHash, _ ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.Serial], error) { + if !slices.Equal(req.KeyHash, msa.keyHash) { + return &mocks.ServerStreamClient[sapb.Serial]{}, nil + } + fakeResults := make([]*sapb.Serial, len(msa.serials)) + for i, serial := range msa.serials { + fakeResults[i] = &sapb.Serial{Serial: serial} + } + return &mocks.ServerStreamClient[sapb.Serial]{Results: fakeResults}, nil +} + +func TestSerialsFromPrivateKey(t *testing.T) { + serials := []string{"foo", "bar", "baz"} + fc := clock.NewFake() + fc.Set(time.Now()) + + privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "creating test private key") + keyBytes, err := x509.MarshalPKCS8PrivateKey(privKey) + test.AssertNotError(t, err, "marshalling test private key bytes") + + keyFile := path.Join(t.TempDir(), "key.pem") + keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: keyBytes}) + err = os.WriteFile(keyFile, keyPEM, os.ModeAppend) + test.AssertNotError(t, err, "writing test private key file") + + keyHash, err := core.KeyDigest(privKey.Public()) + test.AssertNotError(t, err, "computing test SPKI hash") + + a := admin{saroc: &mockSAWithKey{keyHash: keyHash[:], serials: serials}} + + res, err := a.serialsFromPrivateKeys(context.Background(), keyFile) + test.AssertNotError(t, err, "getting serials from keyHashToSerial table") + test.AssertDeepEquals(t, res, serials) +} + +// mockSAWithAccount is a mock which only implements the GetSerialsByAccount +// gRPC method. It can be initialized with a set of serials for that method +// to return. +type mockSAWithAccount struct { + sapb.StorageAuthorityReadOnlyClient + regID int64 + serials []string +} + +func (msa *mockSAWithAccount) GetRegistration(_ context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*corepb.Registration, error) { + if req.Id != msa.regID { + return nil, errors.New("no such reg") + } + return &corepb.Registration{}, nil +} + +// GetSerialsByAccount returns a fake gRPC stream client object which itself +// will return the mockSAWithAccount's serials in order. +func (msa *mockSAWithAccount) GetSerialsByAccount(_ context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.Serial], error) { + if req.Id != msa.regID { + return &mocks.ServerStreamClient[sapb.Serial]{}, nil + } + fakeResults := make([]*sapb.Serial, len(msa.serials)) + for i, serial := range msa.serials { + fakeResults[i] = &sapb.Serial{Serial: serial} + } + return &mocks.ServerStreamClient[sapb.Serial]{Results: fakeResults}, nil +} + +func TestSerialsFromRegID(t *testing.T) { + serials := []string{"foo", "bar", "baz"} + a := admin{saroc: &mockSAWithAccount{regID: 123, serials: serials}} + + res, err := a.serialsFromRegID(context.Background(), 123) + test.AssertNotError(t, err, "getting serials from serials table") + test.AssertDeepEquals(t, res, serials) +} + +// mockRARecordingRevocations is a mock which only implements the +// AdministrativelyRevokeCertificate gRPC method. It can be initialized with +// serials to recognize as already revoked, or to fail. +type mockRARecordingRevocations struct { + rapb.RegistrationAuthorityClient + doomedToFail []string + alreadyRevoked []string + revocationRequests []*rapb.AdministrativelyRevokeCertificateRequest + sync.Mutex +} + +// AdministrativelyRevokeCertificate records the request it received on the mock +// RA struct, and succeeds if it doesn't recognize the serial as one it should +// fail for. +func (mra *mockRARecordingRevocations) AdministrativelyRevokeCertificate(_ context.Context, req *rapb.AdministrativelyRevokeCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + mra.Lock() + defer mra.Unlock() + mra.revocationRequests = append(mra.revocationRequests, req) + if slices.Contains(mra.doomedToFail, req.Serial) { + return nil, errors.New("oops") + } + if slices.Contains(mra.alreadyRevoked, req.Serial) { + return nil, berrors.AlreadyRevokedError("too slow") + } + return &emptypb.Empty{}, nil +} + +func (mra *mockRARecordingRevocations) reset() { + mra.doomedToFail = nil + mra.alreadyRevoked = nil + mra.revocationRequests = nil +} + +func TestRevokeSerials(t *testing.T) { + t.Parallel() + serials := []string{ + "2a18592b7f4bf596fb1a1df135567acd825a", + "038c3f6388afb7695dd4d6bbe3d264f1e4e2", + "048c3f6388afb7695dd4d6bbe3d264f1e5e5", + } + mra := mockRARecordingRevocations{} + log := blog.NewMock() + a := admin{rac: &mra, log: log} + + assertRequestsContain := func(reqs []*rapb.AdministrativelyRevokeCertificateRequest, code revocation.Reason, skipBlockKey bool) { + t.Helper() + for _, req := range reqs { + test.AssertEquals(t, len(req.Cert), 0) + test.AssertEquals(t, req.Code, int64(code)) + test.AssertEquals(t, req.SkipBlockKey, skipBlockKey) + } + } + + // Revoking should result in 3 gRPC requests and quiet execution. + mra.reset() + log.Clear() + err := a.revokeSerials(context.Background(), serials, 0, false, 1) + test.AssertEquals(t, len(log.GetAllMatching("invalid serial format")), 0) + test.AssertNotError(t, err, "") + test.AssertEquals(t, len(log.GetAll()), 0) + test.AssertEquals(t, len(mra.revocationRequests), 3) + assertRequestsContain(mra.revocationRequests, 0, false) + + // Revoking an already-revoked serial should result in one log line. + mra.reset() + log.Clear() + mra.alreadyRevoked = []string{"048c3f6388afb7695dd4d6bbe3d264f1e5e5"} + err = a.revokeSerials(context.Background(), serials, 0, false, 1) + t.Logf("error: %s", err) + t.Logf("logs: %s", strings.Join(log.GetAll(), "")) + test.AssertError(t, err, "already-revoked should result in error") + test.AssertEquals(t, len(log.GetAllMatching("not revoking")), 1) + test.AssertEquals(t, len(mra.revocationRequests), 3) + assertRequestsContain(mra.revocationRequests, 0, false) + + // Revoking a doomed-to-fail serial should also result in one log line. + mra.reset() + log.Clear() + mra.doomedToFail = []string{"048c3f6388afb7695dd4d6bbe3d264f1e5e5"} + err = a.revokeSerials(context.Background(), serials, 0, false, 1) + test.AssertError(t, err, "gRPC error should result in error") + test.AssertEquals(t, len(log.GetAllMatching("failed to revoke")), 1) + test.AssertEquals(t, len(mra.revocationRequests), 3) + assertRequestsContain(mra.revocationRequests, 0, false) + + // Revoking with other parameters should get carried through. + mra.reset() + log.Clear() + err = a.revokeSerials(context.Background(), serials, 1, true, 3) + test.AssertNotError(t, err, "") + test.AssertEquals(t, len(mra.revocationRequests), 3) + assertRequestsContain(mra.revocationRequests, 1, true) + + // Revoking in dry-run mode should result in no gRPC requests and three logs. + mra.reset() + log.Clear() + a.rac = dryRunRAC{log: log} + err = a.revokeSerials(context.Background(), serials, 0, false, 1) + test.AssertNotError(t, err, "") + test.AssertEquals(t, len(log.GetAllMatching("dry-run:")), 3) + test.AssertEquals(t, len(mra.revocationRequests), 0) + assertRequestsContain(mra.revocationRequests, 0, false) +} + +func TestRevokeMalformed(t *testing.T) { + t.Parallel() + mra := mockRARecordingRevocations{} + log := blog.NewMock() + a := &admin{ + rac: &mra, + log: log, + } + + s := subcommandRevokeCert{ + crlShard: 623, + } + serial := "0379c3dfdd518be45948f2dbfa6ea3e9b209" + err := s.revokeMalformed(context.Background(), a, []string{serial}, 1) + if err != nil { + t.Errorf("revokedMalformed with crlShard 623: want success, got %s", err) + } + if len(mra.revocationRequests) != 1 { + t.Errorf("revokeMalformed: want 1 revocation request to SA, got %v", mra.revocationRequests) + } + if mra.revocationRequests[0].Serial != serial { + t.Errorf("revokeMalformed: want %s to be revoked, got %s", serial, mra.revocationRequests[0]) + } + + s = subcommandRevokeCert{ + crlShard: 0, + } + err = s.revokeMalformed(context.Background(), a, []string{"038c3f6388afb7695dd4d6bbe3d264f1e4e2"}, 1) + if err == nil { + t.Errorf("revokedMalformed with crlShard 0: want error, got none") + } + + s = subcommandRevokeCert{ + crlShard: 623, + } + err = s.revokeMalformed(context.Background(), a, []string{"038c3f6388afb7695dd4d6bbe3d264f1e4e2", "28a94f966eae14e525777188512ddf5a0a3b"}, 1) + if err == nil { + t.Errorf("revokedMalformed with multiple serials: want error, got none") + } +} + +func TestCleanSerials(t *testing.T) { + input := []string{ + "2a:18:59:2b:7f:4b:f5:96:fb:1a:1d:f1:35:56:7a:cd:82:5a", + "03:8c:3f:63:88:af:b7:69:5d:d4:d6:bb:e3:d2:64:f1:e4:e2", + "038c3f6388afb7695dd4d6bbe3d264f1e4e2", + } + expected := []string{ + "2a18592b7f4bf596fb1a1df135567acd825a", + "038c3f6388afb7695dd4d6bbe3d264f1e4e2", + "038c3f6388afb7695dd4d6bbe3d264f1e4e2", + } + output, err := cleanSerials(input) + if err != nil { + t.Errorf("cleanSerials(%s): %s, want %s", input, err, expected) + } + if !reflect.DeepEqual(output, expected) { + t.Errorf("cleanSerials(%s)=%s, want %s", input, output, expected) + } +} diff --git a/cmd/admin/dryrun.go b/cmd/admin/dryrun.go new file mode 100644 index 00000000000..13c15f062e4 --- /dev/null +++ b/cmd/admin/dryrun.go @@ -0,0 +1,64 @@ +package main + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/types/known/emptypb" + + blog "github.com/letsencrypt/boulder/log" + rapb "github.com/letsencrypt/boulder/ra/proto" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +type dryRunRAC struct { + log blog.Logger +} + +var _ adminRAClient = (*dryRunRAC)(nil) + +func (d dryRunRAC) AdministrativelyRevokeCertificate(_ context.Context, req *rapb.AdministrativelyRevokeCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + b, err := prototext.Marshal(req) + if err != nil { + return nil, err + } + d.log.Infof("dry-run: %#v", string(b)) + return &emptypb.Empty{}, nil +} + +type dryRunSAC struct { + log blog.Logger +} + +var _ adminSAClient = (*dryRunSAC)(nil) + +func (d dryRunSAC) AddBlockedKey(_ context.Context, req *sapb.AddBlockedKeyRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + d.log.Infof("dry-run: Block SPKI hash %x by %s %s", req.KeyHash, req.Comment, req.Source) + return &emptypb.Empty{}, nil +} + +func (d dryRunSAC) AddRateLimitOverride(_ context.Context, req *sapb.AddRateLimitOverrideRequest, _ ...grpc.CallOption) (*sapb.AddRateLimitOverrideResponse, error) { + d.log.Infof("dry-run: Add override for %q (%s)", req.Override.BucketKey, req.Override.Comment) + return &sapb.AddRateLimitOverrideResponse{Inserted: true, Enabled: true}, nil +} + +func (d dryRunSAC) DisableRateLimitOverride(_ context.Context, req *sapb.DisableRateLimitOverrideRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + d.log.Infof("dry-run: Disable override for %q", req.BucketKey) + return &emptypb.Empty{}, nil +} + +func (d dryRunSAC) EnableRateLimitOverride(_ context.Context, req *sapb.EnableRateLimitOverrideRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + d.log.Infof("dry-run: Enable override for %q", req.BucketKey) + return &emptypb.Empty{}, nil +} + +func (d dryRunSAC) PauseIdentifiers(_ context.Context, req *sapb.PauseRequest, _ ...grpc.CallOption) (*sapb.PauseIdentifiersResponse, error) { + d.log.Infof("dry-run: Pause identifiers %#v for account %d", req.Identifiers, req.RegistrationID) + return &sapb.PauseIdentifiersResponse{Paused: int64(len(req.Identifiers))}, nil +} + +func (d dryRunSAC) UnpauseAccount(_ context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*sapb.Count, error) { + d.log.Infof("dry-run: Unpause account %d", req.Id) + return &sapb.Count{Count: 1}, nil +} diff --git a/cmd/admin/key.go b/cmd/admin/key.go new file mode 100644 index 00000000000..bf66236817e --- /dev/null +++ b/cmd/admin/key.go @@ -0,0 +1,287 @@ +package main + +import ( + "bufio" + "context" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "errors" + "flag" + "fmt" + "io" + "os" + "os/user" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/letsencrypt/boulder/core" + berrors "github.com/letsencrypt/boulder/errors" + "github.com/letsencrypt/boulder/privatekey" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +// subcommandBlockKey encapsulates the "admin block-key" command. +type subcommandBlockKey struct { + parallelism uint + comment string + + privKey string + spkiFile string + certFile string + csrFile string + csrFileExpectedCN string + + checkSignature bool +} + +var _ subcommand = (*subcommandBlockKey)(nil) + +func (s *subcommandBlockKey) Desc() string { + return "Block a keypair from any future issuance" +} + +func (s *subcommandBlockKey) Flags(flag *flag.FlagSet) { + // General flags relevant to all key input methods. + flag.UintVar(&s.parallelism, "parallelism", 10, "Number of concurrent workers to use while blocking keys") + flag.StringVar(&s.comment, "comment", "", "Additional context to add to database comment column") + + // Flags specifying the input method for the keys to be blocked. + flag.StringVar(&s.privKey, "private-key", "", "Block issuance for the pubkey corresponding to this private key") + flag.StringVar(&s.spkiFile, "spki-file", "", "Block issuance for all keys listed in this file as SHA256 hashes of SPKI, hex encoded, one per line") + flag.StringVar(&s.certFile, "cert-file", "", "Block issuance for the public key of the single PEM-formatted certificate in this file") + flag.StringVar(&s.csrFile, "csr-file", "", "Block issuance for the public key of the single PEM-formatted CSR in this file") + flag.StringVar(&s.csrFileExpectedCN, "csr-file-expected-cn", "The key that signed this CSR has been publicly disclosed. It should not be used for any purpose.", "The Subject CN of a CSR will be verified to match this before blocking") + + flag.BoolVar(&s.checkSignature, "check-signature", true, "Check self-signature of CSR before revoking") +} + +func (s *subcommandBlockKey) Run(ctx context.Context, a *admin) error { + // This is a map of all input-selection flags to whether or not they were set + // to a non-default value. We use this to ensure that exactly one input + // selection flag was given on the command line. + setInputs := map[string]bool{ + "-private-key": s.privKey != "", + "-spki-file": s.spkiFile != "", + "-cert-file": s.certFile != "", + "-csr-file": s.csrFile != "", + } + activeFlag, err := findActiveInputMethodFlag(setInputs) + if err != nil { + return err + } + + var spkiHashes [][]byte + switch activeFlag { + case "-private-key": + spkiHashes, err = a.spkiHashesFromPrivateKeys(s.privKey) + case "-spki-file": + spkiHashes, err = a.spkiHashesFromFile(s.spkiFile) + case "-cert-file": + spkiHashes, err = a.spkiHashesFromCertPEM(s.certFile) + case "-csr-file": + spkiHashes, err = a.spkiHashFromCSRPEM(s.csrFile, s.checkSignature, s.csrFileExpectedCN) + default: + return errors.New("no recognized input method flag set (this shouldn't happen)") + } + if err != nil { + return fmt.Errorf("collecting spki hashes to block: %w", err) + } + + err = a.blockSPKIHashes(ctx, spkiHashes, s.comment, s.parallelism) + if err != nil { + return err + } + + return nil +} + +func (a *admin) spkiHashesFromPrivateKeys(keyFile string) ([][]byte, error) { + var spkiHashes [][]byte + + keyPEMs, err := os.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("reading private key file %q: %w", keyFile, err) + } + + for { + var keyDER *pem.Block + keyDER, keyPEMs = pem.Decode(keyPEMs) + if keyDER == nil { + return spkiHashes, nil + } + + _, publicKey, err := privatekey.LoadDER(keyDER) + if err != nil { + return nil, fmt.Errorf("loading private key file %q key %d: %w", keyFile, len(spkiHashes), err) + } + + spkiHash, err := core.KeyDigest(publicKey) + if err != nil { + return nil, fmt.Errorf("computing SPKI hash %d: %w", len(spkiHashes), err) + } + + spkiHashes = append(spkiHashes, spkiHash[:]) + } +} + +func (a *admin) spkiHashesFromFile(filePath string) ([][]byte, error) { + file, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("opening spki hashes file: %w", err) + } + + var spkiHashes [][]byte + scanner := bufio.NewScanner(file) + for scanner.Scan() { + spkiHex := scanner.Text() + if spkiHex == "" { + continue + } + spkiHash, err := hex.DecodeString(spkiHex) + if err != nil { + return nil, fmt.Errorf("decoding hex spki hash %q: %w", spkiHex, err) + } + + if len(spkiHash) != 32 { + return nil, fmt.Errorf("got spki hash of unexpected length: %q (%d)", spkiHex, len(spkiHash)) + } + + spkiHashes = append(spkiHashes, spkiHash) + } + + return spkiHashes, nil +} + +func (a *admin) spkiHashesFromCertPEM(filename string) ([][]byte, error) { + cert, err := core.LoadCert(filename) + if err != nil { + return nil, fmt.Errorf("loading certificate pem: %w", err) + } + + spkiHash, err := core.KeyDigest(cert.PublicKey) + if err != nil { + return nil, fmt.Errorf("computing SPKI hash: %w", err) + } + + return [][]byte{spkiHash[:]}, nil +} + +func (a *admin) spkiHashFromCSRPEM(filename string, checkSignature bool, expectedCN string) ([][]byte, error) { + csrFile, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("reading CSR file %q: %w", filename, err) + } + + data, _ := pem.Decode(csrFile) + if data == nil { + return nil, fmt.Errorf("no PEM data found in %q", filename) + } + + a.log.Debugf("Parsing key to block from CSR PEM: %x", data) + + csr, err := x509.ParseCertificateRequest(data.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing CSR %q: %w", filename, err) + } + + if checkSignature { + err = csr.CheckSignature() + if err != nil { + return nil, fmt.Errorf("checking CSR signature: %w", err) + } + } + + if csr.Subject.CommonName != expectedCN { + return nil, fmt.Errorf("Got CSR CommonName %q, expected %q", csr.Subject.CommonName, expectedCN) + } + + spkiHash, err := core.KeyDigest(csr.PublicKey) + if err != nil { + return nil, fmt.Errorf("computing SPKI hash: %w", err) + } + + return [][]byte{spkiHash[:]}, nil +} + +func (a *admin) blockSPKIHashes(ctx context.Context, spkiHashes [][]byte, comment string, parallelism uint) error { + u, err := user.Current() + if err != nil { + return fmt.Errorf("getting admin username: %w", err) + } + + var errCount atomic.Uint64 + wg := new(sync.WaitGroup) + work := make(chan []byte, parallelism) + for range parallelism { + wg.Go(func() { + for spkiHash := range work { + err = a.blockSPKIHash(ctx, spkiHash, u, comment) + if err != nil { + errCount.Add(1) + if errors.Is(err, berrors.AlreadyRevoked) { + a.log.Warningf("not blocking %x: already blocked", spkiHash) + } else { + a.log.Errf("failed to block %x: %s", spkiHash, err) + } + } + } + }) + } + + for _, spkiHash := range spkiHashes { + work <- spkiHash + } + close(work) + wg.Wait() + + if errCount.Load() > 0 { + return fmt.Errorf("encountered %d errors while revoking certs; see logs above for details", errCount.Load()) + } + + return nil +} + +func (a *admin) blockSPKIHash(ctx context.Context, spkiHash []byte, u *user.User, comment string) error { + exists, err := a.saroc.KeyBlocked(ctx, &sapb.SPKIHash{KeyHash: spkiHash}) + if err != nil { + return fmt.Errorf("checking if key is already blocked: %w", err) + } + if exists.Exists { + return berrors.AlreadyRevokedError("the provided key already exists in the 'blockedKeys' table") + } + + stream, err := a.saroc.GetSerialsByKey(ctx, &sapb.SPKIHash{KeyHash: spkiHash}) + if err != nil { + return fmt.Errorf("setting up stream of serials from SA: %s", err) + } + + var count int + for { + _, err := stream.Recv() + if err != nil { + if err == io.EOF { + break + } + return fmt.Errorf("streaming serials from SA: %s", err) + } + count++ + } + + a.log.Infof("Found %d unexpired certificates matching the provided key", count) + + _, err = a.sac.AddBlockedKey(ctx, &sapb.AddBlockedKeyRequest{ + KeyHash: spkiHash[:], + Added: timestamppb.New(a.clk.Now()), + Source: "admin-revoker", + Comment: fmt.Sprintf("%s: %s", u.Username, comment), + RevokedBy: 0, + }) + if err != nil { + return fmt.Errorf("blocking key: %w", err) + } + + return nil +} diff --git a/cmd/admin/key_test.go b/cmd/admin/key_test.go new file mode 100644 index 00000000000..6a41b687c02 --- /dev/null +++ b/cmd/admin/key_test.go @@ -0,0 +1,196 @@ +package main + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "os" + "os/user" + "path" + "strconv" + "strings" + "testing" + "time" + + "github.com/jmhodges/clock" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/letsencrypt/boulder/core" + blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/mocks" + sapb "github.com/letsencrypt/boulder/sa/proto" + "github.com/letsencrypt/boulder/test" +) + +func TestSPKIHashesFromPrivateKeys(t *testing.T) { + + ecdsaKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "Generating ECDSA key") + pkcs8ecdsa, err := x509.MarshalPKCS8PrivateKey(ecdsaKey) + test.AssertNotError(t, err, "Marshalling PKCS8 private key") + + rsaKey, err := rsa.GenerateKey(rand.Reader, 2048) + test.AssertNotError(t, err, "Generating RSA key") + pkcs8rsa, err := x509.MarshalPKCS8PrivateKey(rsaKey) + test.AssertNotError(t, err, "Marshalling PKCS8 private key") + pkcs1rsa := x509.MarshalPKCS1PrivateKey(rsaKey) + + keyFile := path.Join(t.TempDir(), "key.pem") + file, err := os.Create(keyFile) + test.AssertNotError(t, err, "Creating key file") + test.AssertNotError(t, pem.Encode(file, &pem.Block{Type: "PRIVATE KEY", Bytes: pkcs8ecdsa}), "encoding PEM") + test.AssertNotError(t, pem.Encode(file, &pem.Block{Type: "PRIVATE KEY", Bytes: pkcs8rsa}), "encoding PEM") + test.AssertNotError(t, pem.Encode(file, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: pkcs1rsa}), "encoding PEM") + test.AssertNotError(t, file.Close(), "writing test private key file") + + a := admin{} + + res, err := a.spkiHashesFromPrivateKeys(keyFile) + test.AssertNotError(t, err, "getting hashes from private key") + + for i, pubkey := range []crypto.PublicKey{&ecdsaKey.PublicKey, &rsaKey.PublicKey, &rsaKey.PublicKey} { + expectedHash, err := core.KeyDigest(pubkey) + test.AssertNotError(t, err, "hashing public key") + test.AssertByteEquals(t, res[i], expectedHash[:]) + } +} + +func TestSPKIHashesFromFile(t *testing.T) { + var spkiHexes []string + for i := range 10 { + h := sha256.Sum256([]byte(strconv.Itoa(i))) + spkiHexes = append(spkiHexes, hex.EncodeToString(h[:])) + } + + spkiFile := path.Join(t.TempDir(), "spkis.txt") + err := os.WriteFile(spkiFile, []byte(strings.Join(spkiHexes, "\n")), os.ModeAppend) + test.AssertNotError(t, err, "writing test spki file") + + a := admin{} + + res, err := a.spkiHashesFromFile(spkiFile) + test.AssertNotError(t, err, "") + for i, spkiHash := range res { + test.AssertEquals(t, hex.EncodeToString(spkiHash), spkiHexes[i]) + } +} + +// The key is the p256 test key from RFC9500 +const goodCSR = ` +-----BEGIN CERTIFICATE REQUEST----- +MIG6MGICAQAwADBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABEIlSPiPt4L/teyj +dERSxyoeVY+9b3O+XkjpMjLMRcWxbEzRDEy41bihcTnpSILImSVymTQl9BQZq36Q +pCpJQnKgADAKBggqhkjOPQQDAgNIADBFAiBadw3gvL9IjUfASUTa7MvmkbC4ZCvl +21m1KMwkIx/+CQIhAKvuyfCcdZ0cWJYOXCOb1OavolWHIUzgEpNGUWul6O0s +-----END CERTIFICATE REQUEST----- +` + +// TestCSR checks that we get the correct SPKI from a CSR, even if its signature is invalid +func TestCSR(t *testing.T) { + expectedSPKIHash := "b2b04340cfaee616ec9c2c62d261b208e54bb197498df52e8cadede23ac0ba5e" + + goodCSRFile := path.Join(t.TempDir(), "good.csr") + err := os.WriteFile(goodCSRFile, []byte(goodCSR), 0600) + test.AssertNotError(t, err, "writing good csr") + + a := admin{log: blog.NewMock()} + + goodHash, err := a.spkiHashFromCSRPEM(goodCSRFile, true, "") + test.AssertNotError(t, err, "expected to read CSR") + + if len(goodHash) != 1 { + t.Fatalf("expected to read 1 SPKI from CSR, read %d", len(goodHash)) + } + test.AssertEquals(t, hex.EncodeToString(goodHash[0]), expectedSPKIHash) + + // Flip a bit, in the signature, to make a bad CSR: + badCSR := strings.Replace(goodCSR, "Wul6", "Wul7", 1) + + csrFile := path.Join(t.TempDir(), "bad.csr") + err = os.WriteFile(csrFile, []byte(badCSR), 0600) + test.AssertNotError(t, err, "writing bad csr") + + _, err = a.spkiHashFromCSRPEM(csrFile, true, "") + test.AssertError(t, err, "expected invalid signature") + + badHash, err := a.spkiHashFromCSRPEM(csrFile, false, "") + test.AssertNotError(t, err, "expected to read CSR with bad signature") + + if len(badHash) != 1 { + t.Fatalf("expected to read 1 SPKI from CSR, read %d", len(badHash)) + } + test.AssertEquals(t, hex.EncodeToString(badHash[0]), expectedSPKIHash) +} + +// mockSARecordingBlocks is a mock which only implements the AddBlockedKey gRPC +// method. +type mockSARecordingBlocks struct { + sapb.StorageAuthorityClient + blockRequests []*sapb.AddBlockedKeyRequest +} + +// AddBlockedKey is a mock which always succeeds and records the request it +// received. +func (msa *mockSARecordingBlocks) AddBlockedKey(ctx context.Context, req *sapb.AddBlockedKeyRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + msa.blockRequests = append(msa.blockRequests, req) + return &emptypb.Empty{}, nil +} + +func (msa *mockSARecordingBlocks) reset() { + msa.blockRequests = nil +} + +type mockSARO struct { + sapb.StorageAuthorityReadOnlyClient +} + +func (sa *mockSARO) GetSerialsByKey(ctx context.Context, _ *sapb.SPKIHash, _ ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.Serial], error) { + return &mocks.ServerStreamClient[sapb.Serial]{}, nil +} + +func (sa *mockSARO) KeyBlocked(ctx context.Context, req *sapb.SPKIHash, _ ...grpc.CallOption) (*sapb.Exists, error) { + return &sapb.Exists{Exists: false}, nil +} + +func TestBlockSPKIHash(t *testing.T) { + fc := clock.NewFake() + fc.Set(time.Now()) + log := blog.NewMock() + msa := mockSARecordingBlocks{} + + privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "creating test private key") + keyHash, err := core.KeyDigest(privKey.Public()) + test.AssertNotError(t, err, "computing test SPKI hash") + + a := admin{saroc: &mockSARO{}, sac: &msa, clk: fc, log: log} + u := &user.User{} + + // A full run should result in one request with the right fields. + msa.reset() + log.Clear() + err = a.blockSPKIHash(context.Background(), keyHash[:], u, "hello world") + test.AssertNotError(t, err, "") + test.AssertEquals(t, len(log.GetAllMatching("Found 0 unexpired certificates")), 1) + test.AssertEquals(t, len(msa.blockRequests), 1) + test.AssertByteEquals(t, msa.blockRequests[0].KeyHash, keyHash[:]) + test.AssertContains(t, msa.blockRequests[0].Comment, "hello world") + + // A dry-run should result in zero requests and two log lines. + msa.reset() + log.Clear() + a.sac = dryRunSAC{log: log} + err = a.blockSPKIHash(context.Background(), keyHash[:], u, "") + test.AssertNotError(t, err, "") + test.AssertEquals(t, len(log.GetAllMatching("Found 0 unexpired certificates")), 1) + test.AssertEquals(t, len(log.GetAllMatching("dry-run: Block SPKI hash "+hex.EncodeToString(keyHash[:]))), 1) + test.AssertEquals(t, len(msa.blockRequests), 0) +} diff --git a/cmd/admin/main.go b/cmd/admin/main.go new file mode 100644 index 00000000000..4b37d00c709 --- /dev/null +++ b/cmd/admin/main.go @@ -0,0 +1,148 @@ +// Package main provides the "admin" tool, which can perform various +// administrative actions (such as revoking certificates) against a Boulder +// deployment. +// +// Run "admin -h" for a list of flags and subcommands. +// +// Note that the admin tool runs in "dry-run" mode *by default*. All commands +// which mutate the database (either directly or via gRPC requests) will refuse +// to do so, and instead print log lines representing the work they would do, +// unless the "-dry-run=false" flag is passed. +package main + +import ( + "context" + "flag" + "fmt" + "os" + "strings" + + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/features" +) + +type Config struct { + Admin struct { + // TLS controls the TLS client the admin tool uses for gRPC connections. + TLS cmd.TLSConfig + + RAService *cmd.GRPCClientConfig + SAService *cmd.GRPCClientConfig + + Features features.Config + } + + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig +} + +// subcommand specifies the set of methods that a struct must implement to be +// usable as an admin subcommand. +type subcommand interface { + // Desc should return a short (one-sentence) description of the subcommand for + // use in help/usage strings. + Desc() string + // Flags should register command line flags on the provided flagset. These + // should use the "TypeVar" methods on the provided flagset, targeting fields + // on the subcommand struct, so that the results of command line parsing can + // be used by other methods on the struct. + Flags(*flag.FlagSet) + // Run should do all of the subcommand's heavy lifting, with behavior gated on + // the subcommand struct's member fields which have been populated from the + // command line. The provided admin object can be used for access to external + // services like the RA, SA, and configured logger. + Run(context.Context, *admin) error +} + +// main is the entry-point for the admin tool. We do not include admin in the +// suite of tools which are subcommands of the "boulder" binary, since it +// should be small and portable and standalone. +func main() { + // Do setup as similarly as possible to all other boulder services, including + // config parsing and stats and logging setup. However, the one downside of + // not being bundled with the boulder binary is that we don't get config + // validation for free. + defer cmd.AuditPanic() + + // This is the registry of all subcommands that the admin tool can run. + subcommands := map[string]subcommand{ + "revoke-cert": &subcommandRevokeCert{}, + "block-key": &subcommandBlockKey{}, + "pause-identifier": &subcommandPauseIdentifier{}, + "unpause-account": &subcommandUnpauseAccount{}, + "import-limit-overrides": &subcommandImportOverrides{}, + "dump-limit-overrides": &subcommandDumpEnabledOverrides{}, + "toggle-limit-override": &subcommandToggleOverride{}, + "add-limit-override": &subcommandAddOverride{}, + } + + defaultUsage := flag.Usage + flag.Usage = func() { + defaultUsage() + fmt.Printf("\nSubcommands:\n") + for name, command := range subcommands { + fmt.Printf(" %s\n", name) + fmt.Printf("\t%s\n", command.Desc()) + } + fmt.Print("\nYou can run \"admin -help\" to get usage for that subcommand.\n") + } + + // Start by parsing just the global flags before we get to the subcommand, if + // they're present. + configFile := flag.String("config", "", "Path to the configuration file for this service (required)") + dryRun := flag.Bool("dry-run", true, "Print actions instead of mutating the database") + flag.Parse() + + // Figure out which subcommand they want us to run. + unparsedArgs := flag.Args() + if len(unparsedArgs) == 0 { + flag.Usage() + os.Exit(1) + } + + subcommand, ok := subcommands[unparsedArgs[0]] + if !ok { + flag.Usage() + os.Exit(1) + } + + // Then parse the rest of the args according to the selected subcommand's + // flags, and allow the global flags to be placed after the subcommand name. + subflags := flag.NewFlagSet(unparsedArgs[0], flag.ExitOnError) + subcommand.Flags(subflags) + flag.VisitAll(func(f *flag.Flag) { + // For each flag registered at the global/package level, also register it on + // the subflags FlagSet. The `f.Value` here is a pointer to the same var + // that the original global flag would populate, so the same variable can + // be set either way. + subflags.Var(f.Value, f.Name, f.Usage) + }) + _ = subflags.Parse(unparsedArgs[1:]) + + // With the flags all parsed, now we can parse our config and set up our admin + // object. + if *configFile == "" { + flag.Usage() + os.Exit(1) + } + + a, err := newAdmin(*configFile, *dryRun) + cmd.FailOnError(err, "creating admin object") + + // Finally, run the selected subcommand. + if *dryRun { + a.log.Infof("admin tool executing a dry-run with the following arguments: %q", strings.Join(os.Args, " ")) + } else { + a.log.AuditInfo("admin tool beginning execution", map[string]any{"cmd": strings.Join(os.Args, " ")}) + } + + err = subcommand.Run(context.Background(), a) + cmd.FailOnError(err, "executing subcommand") + + if *dryRun { + a.log.Infof("admin tool has successfully completed executing a dry-run with the following arguments: %q", strings.Join(os.Args, " ")) + a.log.Info("Dry run complete. Pass -dry-run=false to mutate the database.") + } else { + a.log.AuditInfo("admin tool completed successfully", map[string]any{"cmd": strings.Join(os.Args, " ")}) + } +} diff --git a/cmd/admin/overrides_add.go b/cmd/admin/overrides_add.go new file mode 100644 index 00000000000..6c217b0a13f --- /dev/null +++ b/cmd/admin/overrides_add.go @@ -0,0 +1,153 @@ +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "net/netip" + "strings" + "time" + + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/identifier" + "github.com/letsencrypt/boulder/policy" + rl "github.com/letsencrypt/boulder/ratelimits" + sapb "github.com/letsencrypt/boulder/sa/proto" + "google.golang.org/protobuf/types/known/durationpb" +) + +type subcommandAddOverride struct { + limit string + regId int64 + singleIdentifier string + setOfIdentifiers string + subscriberIP string + count int64 + burst int64 + period string + comment string +} + +func (*subcommandAddOverride) Desc() string { + return "Add or update a rate limit override. New overrides are enabled by default. Updates to existing overrides will not change the enabled state." +} + +func (c *subcommandAddOverride) Flags(f *flag.FlagSet) { + f.StringVar(&c.limit, "limit", "", "ratelimit name (required)") + f.Int64Var(&c.regId, "regId", 0, "a single registration/account ID") + f.StringVar(&c.singleIdentifier, "singleIdentifier", "", "a single identifier (e.g. example.com or www.example.com or 55.66.77.88 or 2602:80a:6000::1)") + f.StringVar(&c.setOfIdentifiers, "setOfIdentifiers", "", "comma-separated list of unique identifiers (e.g. example.com,www.example.com,55.66.77.88,2602:80a:6000::1)") + f.StringVar(&c.subscriberIP, "subscriberIP", "", "a single IPv4/IPv6 address the subscriber uses for requests (e.g. 55.66.77.88 or 2602:80a:6000::1)") + + f.Int64Var(&c.count, "count", 0, "allowed requests per period (required)") + f.Int64Var(&c.burst, "burst", 0, "burst size (required)") + f.StringVar(&c.period, "period", "", "period duration (e.g. 1h, 168h) (required)") + f.StringVar(&c.comment, "comment", "", "comment for the override (required)") +} + +// validateIdentifiers checks that the provided identifiers are valid according policy. +func validateIdentifiers(idents ...identifier.ACMEIdentifier) error { + for _, ident := range idents { + switch ident.Type { + case identifier.TypeDNS: + err := policy.ValidDomain(ident.Value) + if err != nil { + return fmt.Errorf("invalid domain %s: %s", ident.Value, err) + } + case identifier.TypeIP: + err := policy.ValidIP(ident.Value) + if err != nil { + return fmt.Errorf("invalid IP address %s", ident.Value) + } + } + } + return nil +} + +func (c *subcommandAddOverride) Run(ctx context.Context, a *admin) error { + if c.limit == "" { + return errors.New("--limit is required") + } + if c.count == 0 || c.burst == 0 || c.period == "" || c.comment == "" { + return errors.New("all of --count, --burst, --period, and --comment are required") + } + + name, ok := rl.StringToName[c.limit] + if !ok { + return fmt.Errorf("unknown limit name %q, must be one in %s", c.limit, rl.LimitNames) + } + + dur, err := time.ParseDuration(c.period) + if err != nil { + return fmt.Errorf("invalid period value: %s", err) + } + + var subscriberIP netip.Addr + if c.subscriberIP != "" { + subscriberIP, err = netip.ParseAddr(c.subscriberIP) + if err != nil { + return fmt.Errorf("invalid subscriberIP %q", err) + } + err := policy.ValidIP(c.subscriberIP) + if err != nil { + return fmt.Errorf("invalid subscriberIP %q: %w", c.subscriberIP, err) + } + } + + singleIdent := identifier.FromString(c.singleIdentifier) + err = validateIdentifiers(singleIdent) + if err != nil { + return fmt.Errorf("invalid singleIdentifier: %w", err) + } + + var setOfIdents identifier.ACMEIdentifiers + if c.setOfIdentifiers != "" { + setOfIdents = identifier.FromStringSlice(strings.Split(c.setOfIdentifiers, ",")) + err := validateIdentifiers(setOfIdents...) + if err != nil { + return fmt.Errorf("invalid setOfIdentifiers: %w", err) + } + } + + bucketKey, err := rl.BuildBucketKey(name, c.regId, singleIdent, setOfIdents, subscriberIP) + if err != nil { + return fmt.Errorf("building bucket key for limit %s: %s", name, err) + } + + err = rl.ValidateLimit(&rl.Limit{ + Name: name, + Count: c.count, + Burst: c.burst, + Period: config.Duration{Duration: dur}, + }) + if err != nil { + return fmt.Errorf("validating override for limit %s key %q: %s", name, bucketKey, err) + } + + resp, err := a.sac.AddRateLimitOverride(ctx, &sapb.AddRateLimitOverrideRequest{ + Override: &sapb.RateLimitOverride{ + LimitEnum: int64(name), + BucketKey: bucketKey, + Count: c.count, + Burst: c.burst, + Period: durationpb.New(dur), + Comment: c.comment, + }, + }) + if err != nil { + return fmt.Errorf("adding override for limit %s key %q: %s", name, bucketKey, err) + } + + status := "disabled" + if resp.Enabled { + status = "enabled" + } + + if resp.Inserted { + a.log.Infof("Added new override for limit %s key %q, status=[%s]\n", name, bucketKey, status) + } else { + a.log.Infof("Updated existing override for limit %s key %q, status=[%s]\n", name, bucketKey, status) + } + return nil +} diff --git a/cmd/admin/overrides_dump.go b/cmd/admin/overrides_dump.go new file mode 100644 index 00000000000..41947493fd6 --- /dev/null +++ b/cmd/admin/overrides_dump.go @@ -0,0 +1,67 @@ +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/ratelimits" +) + +type subcommandDumpEnabledOverrides struct { + file string +} + +func (*subcommandDumpEnabledOverrides) Desc() string { + return "Dump all enabled rate limit overrides to a CSV file" +} + +func (c *subcommandDumpEnabledOverrides) Flags(f *flag.FlagSet) { + f.StringVar(&c.file, "file", "", "destination path for YAML output (required)") +} + +func (c *subcommandDumpEnabledOverrides) Run(ctx context.Context, a *admin) error { + if c.file == "" { + return errors.New("--file is required") + } + + stream, err := a.saroc.GetEnabledRateLimitOverrides(ctx, &emptypb.Empty{}) + if err != nil { + return fmt.Errorf("fetching enabled overrides: %w", err) + } + + overrides := make(ratelimits.Limits) + for { + r, err := stream.Recv() + if err != nil { + if err == io.EOF { + break + } + return fmt.Errorf("reading overrides stream: %w", err) + } + + overrides[r.Override.BucketKey] = &ratelimits.Limit{ + Burst: r.Override.Burst, + Count: r.Override.Count, + Period: config.Duration{Duration: r.Override.Period.AsDuration()}, + Name: ratelimits.Name(r.Override.LimitEnum), + Comment: fmt.Sprintf("Last Updated: %s - %s", + r.UpdatedAt.AsTime().Format("2006-01-02"), + r.Override.Comment, + ), + } + } + + err = ratelimits.DumpOverrides(c.file, overrides) + if err != nil { + return fmt.Errorf("dumping overrides: %w", err) + } + + a.log.Infof("Wrote %d overrides to %q\n", len(overrides), c.file) + return nil +} diff --git a/cmd/admin/overrides_import.go b/cmd/admin/overrides_import.go new file mode 100644 index 00000000000..c24e78b42c3 --- /dev/null +++ b/cmd/admin/overrides_import.go @@ -0,0 +1,87 @@ +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "sync" + + "google.golang.org/protobuf/types/known/durationpb" + + "github.com/letsencrypt/boulder/ratelimits" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +type subcommandImportOverrides struct { + file string + parallelism int +} + +func (*subcommandImportOverrides) Desc() string { return "Push overrides to SA" } + +func (c *subcommandImportOverrides) Flags(f *flag.FlagSet) { + f.StringVar(&c.file, "file", "", "path to YAML file containing rate limit overrides") + f.IntVar(&c.parallelism, "parallelism", 10, "the number of concurrent RPCs to send to the SA (default: 10)") +} + +func (c *subcommandImportOverrides) Run(ctx context.Context, a *admin) error { + if c.file == "" { + return errors.New("--file is required") + } + if c.parallelism <= 0 { + return errors.New("--parallelism must be greater than 0") + } + overrides, err := ratelimits.LoadOverridesByBucketKey(c.file) + if err != nil { + return err + } + var overrideCount = len(overrides) + + work := make(chan *sapb.RateLimitOverride, overrideCount) + for k, ov := range overrides { + work <- &sapb.RateLimitOverride{ + LimitEnum: int64(ov.Name), + BucketKey: k, + Comment: ov.Comment, + Period: durationpb.New(ov.Period.Duration), + Count: ov.Count, + Burst: ov.Burst, + } + } + close(work) + + type result struct { + ov *sapb.RateLimitOverride + err error + } + results := make(chan result, c.parallelism) + + var wg sync.WaitGroup + for i := 0; i < c.parallelism; i++ { + wg.Go(func() { + for ov := range work { + _, err := a.sac.AddRateLimitOverride(ctx, &sapb.AddRateLimitOverrideRequest{Override: ov}) + results <- result{ov: ov, err: err} + } + }) + } + + var errorCount int + for range overrideCount { + result := <-results + if result.err != nil { + a.log.Errf("failed to add override: key=%q limit=%d: %s", result.ov.BucketKey, result.ov.LimitEnum, result.err) + errorCount++ + } + } + + wg.Wait() + close(results) + + if errorCount > 0 { + return fmt.Errorf("%d out of %d overrides failed to be added, see log message(s) for more details", errorCount, overrideCount) + } + a.log.Infof("Successfully added %d overrides", overrideCount) + return nil +} diff --git a/cmd/admin/overrides_toggle.go b/cmd/admin/overrides_toggle.go new file mode 100644 index 00000000000..99e713b2907 --- /dev/null +++ b/cmd/admin/overrides_toggle.go @@ -0,0 +1,102 @@ +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "net/netip" + "strings" + + "github.com/letsencrypt/boulder/identifier" + "github.com/letsencrypt/boulder/policy" + rl "github.com/letsencrypt/boulder/ratelimits" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +type subcommandToggleOverride struct { + enabled bool + limit string + regId int64 + singleIdentifier string + setOfIdentifiers string + subscriberIP string +} + +func (*subcommandToggleOverride) Desc() string { + return "Enable or disable a rate limit override." +} + +func (c *subcommandToggleOverride) Flags(f *flag.FlagSet) { + f.BoolVar(&c.enabled, "enabled", false, "true to enable, false to disable (default: false)") + f.StringVar(&c.limit, "limit", "", "ratelimit name (required)") + f.Int64Var(&c.regId, "regId", 0, "a single registration/account ID") + f.StringVar(&c.singleIdentifier, "singleIdentifier", "", "a single identifier (e.g. example.com or www.example.com or 55.66.77.88 or 2602:80a:6000::1)") + f.StringVar(&c.setOfIdentifiers, "setOfIdentifiers", "", "comma-separated list of unique identifiers (e.g. example.com,www.example.com,55.66.77.88,2602:80a:6000::1)") + f.StringVar(&c.subscriberIP, "subscriberIP", "", "a single IPv4/IPv6 address the subscriber uses for requests") +} + +func (c *subcommandToggleOverride) Run(ctx context.Context, a *admin) error { + if c.limit == "" { + return errors.New("--limit is required") + } + name, ok := rl.StringToName[c.limit] + if !ok { + return fmt.Errorf("unknown limit name %q, must be one in %s", c.limit, rl.LimitNames) + } + + var subscriberIP netip.Addr + var err error + if c.subscriberIP != "" { + subscriberIP, err = netip.ParseAddr(c.subscriberIP) + if err != nil { + return fmt.Errorf("invalid subscriberIP %q", err) + } + err := policy.ValidIP(c.subscriberIP) + if err != nil { + return fmt.Errorf("invalid subscriberIP %q: %w", c.subscriberIP, err) + } + } + + singleIdent := identifier.FromString(c.singleIdentifier) + err = validateIdentifiers(singleIdent) + if err != nil { + return fmt.Errorf("invalid singleIdentifier: %w", err) + } + + var setOfIdents identifier.ACMEIdentifiers + if c.setOfIdentifiers != "" { + setOfIdents = identifier.FromStringSlice(strings.Split(c.setOfIdentifiers, ",")) + err := validateIdentifiers(setOfIdents...) + if err != nil { + return fmt.Errorf("invalid setOfIdentifiers: %w", err) + } + } + + bucketKey, err := rl.BuildBucketKey(name, c.regId, singleIdent, setOfIdents, subscriberIP) + if err != nil { + return fmt.Errorf("building bucket key for limit %s: %s", name, err) + } + + if c.enabled { + _, err := a.sac.EnableRateLimitOverride(ctx, &sapb.EnableRateLimitOverrideRequest{ + LimitEnum: int64(name), + BucketKey: bucketKey, + }) + if err != nil { + return fmt.Errorf("enabling override for limit %s key %q: %s", name, bucketKey, err) + } + a.log.Infof("Enabled override for limit %s key %q\n", name, bucketKey) + return nil + } + + _, err = a.sac.DisableRateLimitOverride(ctx, &sapb.DisableRateLimitOverrideRequest{ + LimitEnum: int64(name), + BucketKey: bucketKey, + }) + if err != nil { + return fmt.Errorf("disabling override for limit %s key %q: %s", name, bucketKey, err) + } + a.log.Infof("Disabled override for limit %s key %q\n", name, bucketKey) + return nil +} diff --git a/cmd/admin/pause_identifier.go b/cmd/admin/pause_identifier.go new file mode 100644 index 00000000000..da268d7245b --- /dev/null +++ b/cmd/admin/pause_identifier.go @@ -0,0 +1,192 @@ +package main + +import ( + "context" + "encoding/csv" + "errors" + "flag" + "fmt" + "io" + "os" + "strconv" + "sync" + "sync/atomic" + + corepb "github.com/letsencrypt/boulder/core/proto" + "github.com/letsencrypt/boulder/identifier" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +// subcommandPauseIdentifier encapsulates the "admin pause-identifiers" command. +type subcommandPauseIdentifier struct { + batchFile string + parallelism uint +} + +var _ subcommand = (*subcommandPauseIdentifier)(nil) + +func (p *subcommandPauseIdentifier) Desc() string { + return "Administratively pause an account preventing it from attempting certificate issuance" +} + +func (p *subcommandPauseIdentifier) Flags(flag *flag.FlagSet) { + flag.StringVar(&p.batchFile, "batch-file", "", "Path to a CSV file containing (account ID, identifier type, identifier value)") + flag.UintVar(&p.parallelism, "parallelism", 10, "The maximum number of concurrent pause requests to send to the SA (default: 10)") +} + +func (p *subcommandPauseIdentifier) Run(ctx context.Context, a *admin) error { + if p.batchFile == "" { + return errors.New("the -batch-file flag is required") + } + + idents, err := a.readPausedAccountFile(p.batchFile) + if err != nil { + return err + } + + _, err = a.pauseIdentifiers(ctx, idents, p.parallelism) + if err != nil { + return err + } + + return nil +} + +// pauseIdentifiers concurrently pauses identifiers for each account using up to +// `parallelism` workers. It returns all pause responses and any accumulated +// errors. +func (a *admin) pauseIdentifiers(ctx context.Context, entries []pauseCSVData, parallelism uint) ([]*sapb.PauseIdentifiersResponse, error) { + if len(entries) <= 0 { + return nil, errors.New("cannot pause identifiers because no pauseData was sent") + } + + accountToIdents := make(map[int64][]*corepb.Identifier) + for _, entry := range entries { + accountToIdents[entry.accountID] = append(accountToIdents[entry.accountID], &corepb.Identifier{ + Type: string(entry.identifierType), + Value: entry.identifierValue, + }) + } + + var errCount atomic.Uint64 + respChan := make(chan *sapb.PauseIdentifiersResponse, len(accountToIdents)) + work := make(chan struct { + accountID int64 + idents []*corepb.Identifier + }, parallelism) + + var wg sync.WaitGroup + for range parallelism { + wg.Go(func() { + for data := range work { + response, err := a.sac.PauseIdentifiers(ctx, &sapb.PauseRequest{ + RegistrationID: data.accountID, + Identifiers: data.idents, + }) + if err != nil { + errCount.Add(1) + a.log.Errf("error pausing identifier(s) %q for account %d: %v", data.idents, data.accountID, err) + } else { + respChan <- response + } + } + }) + } + + for accountID, idents := range accountToIdents { + work <- struct { + accountID int64 + idents []*corepb.Identifier + }{accountID, idents} + } + close(work) + wg.Wait() + close(respChan) + + var responses []*sapb.PauseIdentifiersResponse + for response := range respChan { + responses = append(responses, response) + } + + if errCount.Load() > 0 { + return responses, fmt.Errorf("encountered %d errors while pausing identifiers; see logs above for details", errCount.Load()) + } + + return responses, nil +} + +// pauseCSVData contains a golang representation of the data loaded in from a +// CSV file for pausing. +type pauseCSVData struct { + accountID int64 + identifierType identifier.IdentifierType + identifierValue string +} + +// readPausedAccountFile parses the contents of a CSV into a slice of +// `pauseCSVData` objects and returns it or an error. It will skip malformed +// lines and continue processing until either the end of file marker is detected +// or other read error. +func (a *admin) readPausedAccountFile(filePath string) ([]pauseCSVData, error) { + fp, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("opening paused account data file: %w", err) + } + defer fp.Close() + + reader := csv.NewReader(fp) + + // identifierValue can have 1 or more entries + reader.FieldsPerRecord = -1 + reader.TrimLeadingSpace = true + + var parsedRecords []pauseCSVData + lineCounter := 0 + + // Process contents of the CSV file + for { + record, err := reader.Read() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return nil, err + } + + lineCounter++ + + // We should have strictly 3 fields, note that just commas is considered + // a valid CSV line. + if len(record) != 3 { + a.log.Infof("skipping: malformed line %d, should contain exactly 3 fields\n", lineCounter) + continue + } + + recordID := record[0] + accountID, err := strconv.ParseInt(recordID, 10, 64) + if err != nil || accountID == 0 { + a.log.Infof("skipping: malformed accountID entry on line %d\n", lineCounter) + continue + } + + // Ensure that an identifier type is present, otherwise skip the line. + if len(record[1]) == 0 { + a.log.Infof("skipping: malformed identifierType entry on line %d\n", lineCounter) + continue + } + + if len(record[2]) == 0 { + a.log.Infof("skipping: malformed identifierValue entry on line %d\n", lineCounter) + continue + } + + parsedRecord := pauseCSVData{ + accountID: accountID, + identifierType: identifier.IdentifierType(record[1]), + identifierValue: record[2], + } + parsedRecords = append(parsedRecords, parsedRecord) + } + a.log.Infof("detected %d valid record(s) from input file\n", len(parsedRecords)) + + return parsedRecords, nil +} diff --git a/cmd/admin/pause_identifier_test.go b/cmd/admin/pause_identifier_test.go new file mode 100644 index 00000000000..937cf179107 --- /dev/null +++ b/cmd/admin/pause_identifier_test.go @@ -0,0 +1,195 @@ +package main + +import ( + "context" + "errors" + "os" + "path" + "strings" + "testing" + + blog "github.com/letsencrypt/boulder/log" + sapb "github.com/letsencrypt/boulder/sa/proto" + "github.com/letsencrypt/boulder/test" + "google.golang.org/grpc" +) + +func TestReadingPauseCSV(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + data []string + expectedRecords int + }{ + { + name: "No data in file", + data: nil, + }, + { + name: "valid", + data: []string{"1,dns,example.com"}, + expectedRecords: 1, + }, + { + name: "valid with duplicates", + data: []string{"1,dns,example.com", "2,dns,example.org", "1,dns,example.com", "1,dns,example.net", "3,dns,example.gov", "3,dns,example.gov"}, + expectedRecords: 6, + }, + { + name: "invalid with multiple domains on the same line", + data: []string{"1,dns,example.com,example.net"}, + }, + { + name: "invalid just commas", + data: []string{",,,"}, + }, + { + name: "invalid only contains accountID", + data: []string{"1"}, + }, + { + name: "invalid only contains accountID and identifierType", + data: []string{"1,dns"}, + }, + { + name: "invalid missing identifierType", + data: []string{"1,,example.com"}, + }, + { + name: "invalid accountID isnt an int", + data: []string{"blorple"}, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + log := blog.NewMock() + a := admin{log: log} + + csvFile := path.Join(t.TempDir(), path.Base(t.Name()+".csv")) + err := os.WriteFile(csvFile, []byte(strings.Join(testCase.data, "\n")), os.ModePerm) + test.AssertNotError(t, err, "could not write temporary file") + + parsedData, err := a.readPausedAccountFile(csvFile) + test.AssertNotError(t, err, "no error expected, but received one") + test.AssertEquals(t, len(parsedData), testCase.expectedRecords) + }) + } +} + +// mockSAPaused is a mock which always succeeds. It records the PauseRequest it +// received, and returns the number of identifiers as a +// PauseIdentifiersResponse. It does not maintain state of repaused identifiers. +type mockSAPaused struct { + sapb.StorageAuthorityClient +} + +func (msa *mockSAPaused) PauseIdentifiers(ctx context.Context, in *sapb.PauseRequest, _ ...grpc.CallOption) (*sapb.PauseIdentifiersResponse, error) { + return &sapb.PauseIdentifiersResponse{Paused: int64(len(in.Identifiers))}, nil +} + +// mockSAPausedBroken is a mock which always errors. +type mockSAPausedBroken struct { + sapb.StorageAuthorityClient +} + +func (msa *mockSAPausedBroken) PauseIdentifiers(ctx context.Context, in *sapb.PauseRequest, _ ...grpc.CallOption) (*sapb.PauseIdentifiersResponse, error) { + return nil, errors.New("its all jacked up") +} + +func TestPauseIdentifiers(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + data []pauseCSVData + saImpl sapb.StorageAuthorityClient + expectRespLen int + expectErr bool + }{ + { + name: "no data", + data: nil, + expectErr: true, + }, + { + name: "valid single entry", + data: []pauseCSVData{ + { + accountID: 1, + identifierType: "dns", + identifierValue: "example.com", + }, + }, + expectRespLen: 1, + }, + { + name: "valid single entry but broken SA", + expectErr: true, + saImpl: &mockSAPausedBroken{}, + data: []pauseCSVData{ + { + accountID: 1, + identifierType: "dns", + identifierValue: "example.com", + }, + }, + }, + { + name: "valid multiple entries with duplicates", + data: []pauseCSVData{ + { + accountID: 1, + identifierType: "dns", + identifierValue: "example.com", + }, + { + accountID: 1, + identifierType: "dns", + identifierValue: "example.com", + }, + { + accountID: 2, + identifierType: "dns", + identifierValue: "example.org", + }, + { + accountID: 3, + identifierType: "dns", + identifierValue: "example.net", + }, + { + accountID: 3, + identifierType: "dns", + identifierValue: "example.org", + }, + }, + expectRespLen: 3, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + log := blog.NewMock() + + // Default to a working mock SA implementation + if testCase.saImpl == nil { + testCase.saImpl = &mockSAPaused{} + } + a := admin{sac: testCase.saImpl, log: log} + + responses, err := a.pauseIdentifiers(context.Background(), testCase.data, 10) + if testCase.expectErr { + test.AssertError(t, err, "should have errored, but did not") + } else { + test.AssertNotError(t, err, "should not have errored") + // Batching will consolidate identifiers under the same account. + test.AssertEquals(t, len(responses), testCase.expectRespLen) + } + }) + } +} diff --git a/cmd/admin/unpause_account.go b/cmd/admin/unpause_account.go new file mode 100644 index 00000000000..7f38c20b02e --- /dev/null +++ b/cmd/admin/unpause_account.go @@ -0,0 +1,166 @@ +package main + +import ( + "bufio" + "context" + "errors" + "flag" + "fmt" + "os" + "slices" + "strconv" + "sync" + "sync/atomic" + + sapb "github.com/letsencrypt/boulder/sa/proto" + "github.com/letsencrypt/boulder/unpause" +) + +// subcommandUnpauseAccount encapsulates the "admin unpause-account" command. +type subcommandUnpauseAccount struct { + accountID int64 + batchFile string + parallelism uint +} + +var _ subcommand = (*subcommandUnpauseAccount)(nil) + +func (u *subcommandUnpauseAccount) Desc() string { + return "Administratively unpause an account to allow certificate issuance attempts" +} + +func (u *subcommandUnpauseAccount) Flags(flag *flag.FlagSet) { + flag.Int64Var(&u.accountID, "account", 0, "A single account ID to unpause") + flag.StringVar(&u.batchFile, "batch-file", "", "Path to a file containing multiple account IDs where each is separated by a newline") + flag.UintVar(&u.parallelism, "parallelism", 10, "The maximum number of concurrent unpause requests to send to the SA (default: 10)") +} + +func (u *subcommandUnpauseAccount) Run(ctx context.Context, a *admin) error { + // This is a map of all input-selection flags to whether or not they were set + // to a non-default value. We use this to ensure that exactly one input + // selection flag was given on the command line. + setInputs := map[string]bool{ + "-account": u.accountID != 0, + "-batch-file": u.batchFile != "", + } + activeFlag, err := findActiveInputMethodFlag(setInputs) + if err != nil { + return err + } + + var regIDs []int64 + switch activeFlag { + case "-account": + regIDs = []int64{u.accountID} + case "-batch-file": + regIDs, err = a.readUnpauseAccountFile(u.batchFile) + default: + return errors.New("no recognized input method flag set (this shouldn't happen)") + } + if err != nil { + return fmt.Errorf("collecting serials to revoke: %w", err) + } + + _, err = a.unpauseAccounts(ctx, regIDs, u.parallelism) + if err != nil { + return err + } + + return nil +} + +type unpauseCount struct { + accountID int64 + count int64 +} + +// unpauseAccount concurrently unpauses all identifiers for each account using +// up to `parallelism` workers. It returns a count of the number of identifiers +// unpaused for each account and any accumulated errors. +func (a *admin) unpauseAccounts(ctx context.Context, accountIDs []int64, parallelism uint) ([]unpauseCount, error) { + if len(accountIDs) <= 0 { + return nil, errors.New("no account IDs provided for unpausing") + } + slices.Sort(accountIDs) + accountIDs = slices.Compact(accountIDs) + + countChan := make(chan unpauseCount, len(accountIDs)) + work := make(chan int64) + + var wg sync.WaitGroup + var errCount atomic.Uint64 + for range parallelism { + wg.Go(func() { + for accountID := range work { + totalCount := int64(0) + for { + response, err := a.sac.UnpauseAccount(ctx, &sapb.RegistrationID{Id: accountID}) + if err != nil { + errCount.Add(1) + a.log.Errf("error unpausing accountID %d: %v", accountID, err) + break + } + totalCount += response.Count + if response.Count < unpause.RequestLimit { + // All identifiers have been unpaused. + break + } + } + countChan <- unpauseCount{accountID: accountID, count: totalCount} + } + }) + } + + go func() { + for _, accountID := range accountIDs { + work <- accountID + } + close(work) + }() + + go func() { + wg.Wait() + close(countChan) + }() + + var unpauseCounts []unpauseCount + for count := range countChan { + unpauseCounts = append(unpauseCounts, count) + } + + if errCount.Load() > 0 { + return unpauseCounts, fmt.Errorf("encountered %d errors while unpausing; see logs above for details", errCount.Load()) + } + + return unpauseCounts, nil +} + +// readUnpauseAccountFile parses the contents of a file containing one account +// ID per into a slice of int64s. It will skip malformed records and continue +// processing until the end of file marker. +func (a *admin) readUnpauseAccountFile(filePath string) ([]int64, error) { + fp, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("opening paused account data file: %w", err) + } + defer fp.Close() + + var unpauseAccounts []int64 + lineCounter := 0 + scanner := bufio.NewScanner(fp) + for scanner.Scan() { + lineCounter++ + regID, err := strconv.ParseInt(scanner.Text(), 10, 64) + if err != nil { + a.log.Infof("skipping: malformed account ID entry on line %d\n", lineCounter) + continue + } + unpauseAccounts = append(unpauseAccounts, regID) + } + + if err := scanner.Err(); err != nil { + return nil, scanner.Err() + } + + return unpauseAccounts, nil +} diff --git a/cmd/admin/unpause_account_test.go b/cmd/admin/unpause_account_test.go new file mode 100644 index 00000000000..f39b168fcbf --- /dev/null +++ b/cmd/admin/unpause_account_test.go @@ -0,0 +1,134 @@ +package main + +import ( + "context" + "errors" + "os" + "path" + "strings" + "testing" + + blog "github.com/letsencrypt/boulder/log" + sapb "github.com/letsencrypt/boulder/sa/proto" + "github.com/letsencrypt/boulder/test" + "google.golang.org/grpc" +) + +func TestReadingUnpauseAccountsFile(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + data []string + expectedRegIDs int + }{ + { + name: "No data in file", + data: nil, + }, + { + name: "valid", + data: []string{"1"}, + expectedRegIDs: 1, + }, + { + name: "valid with duplicates", + data: []string{"1", "2", "1", "3", "3"}, + expectedRegIDs: 5, + }, + { + name: "valid with empty lines and duplicates", + data: []string{"1", "\n", "6", "6", "6"}, + expectedRegIDs: 4, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + log := blog.NewMock() + a := admin{log: log} + + file := path.Join(t.TempDir(), path.Base(t.Name()+".txt")) + err := os.WriteFile(file, []byte(strings.Join(testCase.data, "\n")), os.ModePerm) + test.AssertNotError(t, err, "could not write temporary file") + + regIDs, err := a.readUnpauseAccountFile(file) + test.AssertNotError(t, err, "no error expected, but received one") + test.AssertEquals(t, len(regIDs), testCase.expectedRegIDs) + }) + } +} + +type mockSAUnpause struct { + sapb.StorageAuthorityClient +} + +func (msa *mockSAUnpause) UnpauseAccount(ctx context.Context, in *sapb.RegistrationID, _ ...grpc.CallOption) (*sapb.Count, error) { + return &sapb.Count{Count: 1}, nil +} + +// mockSAUnpauseBroken is a mock that always returns an error. +type mockSAUnpauseBroken struct { + sapb.StorageAuthorityClient +} + +func (msa *mockSAUnpauseBroken) UnpauseAccount(ctx context.Context, in *sapb.RegistrationID, _ ...grpc.CallOption) (*sapb.Count, error) { + return nil, errors.New("oh dear") +} + +func TestUnpauseAccounts(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + regIDs []int64 + saImpl sapb.StorageAuthorityClient + expectErr bool + expectCounts int + }{ + { + name: "no data", + regIDs: nil, + expectErr: true, + }, + { + name: "valid single entry", + regIDs: []int64{1}, + expectCounts: 1, + }, + { + name: "valid single entry but broken SA", + expectErr: true, + saImpl: &mockSAUnpauseBroken{}, + regIDs: []int64{1}, + }, + { + name: "valid multiple entries with duplicates", + regIDs: []int64{1, 1, 2, 3, 4}, + expectCounts: 4, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + log := blog.NewMock() + + // Default to a working mock SA implementation + if testCase.saImpl == nil { + testCase.saImpl = &mockSAUnpause{} + } + a := admin{sac: testCase.saImpl, log: log} + + counts, err := a.unpauseAccounts(context.Background(), testCase.regIDs, 10) + if testCase.expectErr { + test.AssertError(t, err, "should have errored, but did not") + } else { + test.AssertNotError(t, err, "should not have errored") + test.AssertEquals(t, testCase.expectCounts, len(counts)) + } + }) + } +} diff --git a/cmd/akamai-purger/main.go b/cmd/akamai-purger/main.go deleted file mode 100644 index 2ca9855fcb9..00000000000 --- a/cmd/akamai-purger/main.go +++ /dev/null @@ -1,446 +0,0 @@ -package notmain - -import ( - "context" - "errors" - "flag" - "fmt" - "math" - "os" - "strings" - "sync" - "time" - - "github.com/honeycombio/beeline-go" - "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/protobuf/types/known/emptypb" - - "github.com/letsencrypt/boulder/akamai" - akamaipb "github.com/letsencrypt/boulder/akamai/proto" - "github.com/letsencrypt/boulder/cmd" - bgrpc "github.com/letsencrypt/boulder/grpc" - blog "github.com/letsencrypt/boulder/log" -) - -const ( - // TODO(#6003) remove entirely. - DeprecatedQueueEntriesPerBatch = 33 - - // akamaiBytesPerResponse is the total bytes of all 3 URLs associated with a - // single OCSP response cached by Akamai. Each response is composed of 3 - // URLs; the POST Cache Key URL is 61 bytes and the encoded and unencoded - // GET URLs are 163 bytes and 151 bytes respectively. This totals 375 bytes, - // which we round up to 400. - akamaiBytesPerResponse = 400 - - // urlsPerQueueEntry is the number of URLs associated with a single cached - // OCSP response. - urlsPerQueueEntry = 3 - - // defaultQueueEntriesPerBatch is the default value for - // 'queueEntriesPerBatch'. - defaultQueueEntriesPerBatch = 2 - - // defaultPurgeBatchInterval is the default value for 'purgeBatchInterval'. - defaultPurgeBatchInterval = time.Millisecond * 32 - - // defaultQueueSize is the default value for 'maxQueueSize'. A queue size of - // 1.25M cached OCSP responses, assuming 3 URLs per request, is about 6 - // hours of work using the default settings detailed above. - defaultQueueSize = 1250000 - - // akamaiBytesPerReqLimit is the limit of bytes allowed in a single request - // to the Fast-Purge API. With a limit of no more than 50,000 bytes, we - // subtract 1 byte to get the limit, and subtract an additional 19 bytes for - // overhead of the 'objects' key and array. - akamaiBytesPerReqLimit = 50000 - 1 - 19 - - // akamaiAPIReqPerSecondLimit is the limit of requests, per second, that - // we're allowed to make to the Fast-Purge API. - akamaiAPIReqPerSecondLimit = 50 - - // akamaiURLsPerSecondLimit is the limit of URLs, sent per second, that - // we're allowed to make to the Fast-Purge API. - akamaiURLsPerSecondLimit = 200 -) - -// Throughput is a container for all throuput related akamai-purger -// configuration settings. -type Throughput struct { - // QueueEntriesPerBatch the number of cached OCSP responses to included in each - // purge request. One cached OCSP response is composed of 3 URLs totaling < - // 400 bytes. If this value isn't provided it will default to - // 'defaultQueueEntriesPerBatch'. - QueueEntriesPerBatch int - - // PurgeBatchInterval is the duration waited between dispatching an Akamai - // purge request containing 'QueueEntriesPerBatch' * 3 URLs. If this value - // isn't provided it will default to 'defaultPurgeBatchInterval'. - PurgeBatchInterval cmd.ConfigDuration -} - -func (t *Throughput) useOptimizedDefaults() { - if t.QueueEntriesPerBatch == 0 { - t.QueueEntriesPerBatch = defaultQueueEntriesPerBatch - } - if t.PurgeBatchInterval.Duration == 0 { - t.PurgeBatchInterval.Duration = defaultPurgeBatchInterval - } -} - -// validate ensures that the provided throughput configuration will not violate -// the Akamai Fast-Purge API limits. For more information see the official -// documentation: -// https://techdocs.akamai.com/purge-cache/reference/rate-limiting -func (t *Throughput) validate() error { - if t.PurgeBatchInterval.Duration == 0 { - // TODO(#6003) remove /'purgeInterval'. - return errors.New("'purgeBatchInterval'/'purgeInterval' must be > 0 nanoseconds") - } - if t.QueueEntriesPerBatch <= 0 { - return errors.New("'queueEntriesPerBatch' must be > 0") - } - - // Send no more than the 50,000 bytes of objects we’re allotted per request. - bytesPerRequest := (t.QueueEntriesPerBatch * akamaiBytesPerResponse) - if bytesPerRequest > akamaiBytesPerReqLimit { - return fmt.Errorf("config exceeds Akamai's bytes per request limit (%d bytes) by %d", - akamaiBytesPerReqLimit, bytesPerRequest-akamaiBytesPerReqLimit) - } - - // Send no more than the 50 API requests we’re allotted each second. - requestsPerSecond := int(math.Ceil(float64(time.Second) / float64(t.PurgeBatchInterval.Duration))) - if requestsPerSecond > akamaiAPIReqPerSecondLimit { - return fmt.Errorf("config exceeds Akamai's requests per second limit (%d requests) by %d", - akamaiAPIReqPerSecondLimit, requestsPerSecond-akamaiAPIReqPerSecondLimit) - } - - // Purge no more than the 200 URLs we’re allotted each second. - urlsPurgedPerSecond := requestsPerSecond * (t.QueueEntriesPerBatch * urlsPerQueueEntry) - if urlsPurgedPerSecond > akamaiURLsPerSecondLimit { - return fmt.Errorf("config exceeds Akamai's URLs per second limit (%d URLs) by %d", - akamaiURLsPerSecondLimit, urlsPurgedPerSecond-akamaiURLsPerSecondLimit) - } - return nil -} - -type Config struct { - AkamaiPurger struct { - cmd.ServiceConfig - - // PurgeInterval is the duration waited between dispatching an Akamai - // purge request containing 'DepracatedQueueEntriesPerBatch' * 3 URLs. - // Deprecated: TODO(#6003) this field is can be removed in favor of the - // `Throughput.PurgeBatchInterval`. - PurgeInterval cmd.ConfigDuration - - // MaxQueueSize is the maximum size of the purger queue. If this value - // isn't provided it will default to `defaultQueueSize`. - MaxQueueSize int - - BaseURL string - ClientToken string - ClientSecret string - AccessToken string - V3Network string - - // Throughput is a container for all throughput related akamai-purger - // settings. - Throughput Throughput - - // PurgeRetries is the maximum number of attempts that will be made to purge a - // batch of URLs before the batch is added back to the queue. - PurgeRetries int - - // PurgeRetryBackoff is the base duration that will be waited before - // attempting to purge a batch of URLs which previously failed to be - // purged. - PurgeRetryBackoff cmd.ConfigDuration - } - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig -} - -// TODO(#6003) remove entirely. -func (c *Config) useDeprecatedSettings() { - c.AkamaiPurger.Throughput.PurgeBatchInterval = c.AkamaiPurger.PurgeInterval - c.AkamaiPurger.Throughput.QueueEntriesPerBatch = DeprecatedQueueEntriesPerBatch -} - -// akamaiPurger is a mutex protected container for a gRPC server which receives -// requests to purge the URLs associated with OCSP responses cached by Akamai, -// stores these URLs as a slice in an inner slice, and dispatches them to -// Akamai's Fast Purge API in batches. -type akamaiPurger struct { - sync.Mutex - akamaipb.UnimplementedAkamaiPurgerServer - - // toPurge functions as a queue where each entry contains the three OCSP response URLs - // associated with a given certificate. - toPurge [][]string - maxQueueSize int - client *akamai.CachePurgeClient - log blog.Logger -} - -func (ap *akamaiPurger) len() int { - ap.Lock() - defer ap.Unlock() - return len(ap.toPurge) -} - -func (ap *akamaiPurger) purge() error { - ap.Lock() - queueEntries := ap.toPurge[:] - ap.toPurge = [][]string{} - ap.Unlock() - if len(queueEntries) == 0 { - return nil - } - - stoppedAt, err := ap.client.Purge(queueEntries) - if err != nil { - ap.Lock() - - // Add the remaining queue entries back, but at the end of the queue. If somehow - // there's a URL which repeatedly results in error, it won't block the - // entire queue, only a single batch. - ap.toPurge = append(ap.toPurge, queueEntries[stoppedAt:]...) - ap.Unlock() - ap.log.Errf("Failed to purge OCSP responses for %d certificates: %s", len(queueEntries), err) - return err - } - return nil -} - -// Purge is an exported gRPC method which receives purge requests and appends -// them to the queue. -func (ap *akamaiPurger) Purge(ctx context.Context, req *akamaipb.PurgeRequest) (*emptypb.Empty, error) { - ap.Lock() - defer ap.Unlock() - if len(ap.toPurge) >= ap.maxQueueSize { - return nil, errors.New("akamai-purger queue too large") - } - ap.toPurge = append(ap.toPurge, req.Urls) - return &emptypb.Empty{}, nil -} - -func main() { - daemonFlags := flag.NewFlagSet("daemon", flag.ExitOnError) - grpcAddr := daemonFlags.String("addr", "", "gRPC listen address override") - debugAddr := daemonFlags.String("debug-addr", "", "Debug server address override") - configFile := daemonFlags.String("config", "", "File path to the configuration file for this service") - - manualFlags := flag.NewFlagSet("manual", flag.ExitOnError) - manualConfigFile := manualFlags.String("config", "", "File path to the configuration file for this service") - tag := manualFlags.String("tag", "", "Single cache tag to purge") - tagFile := manualFlags.String("tag-file", "", "File containing cache tags to purge, one per line") - - if len(os.Args) < 2 { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - daemonFlags.PrintDefaults() - fmt.Fprintln(os.Stderr, "OR:") - fmt.Fprintf(os.Stderr, "%s manual \n", os.Args[0]) - manualFlags.PrintDefaults() - os.Exit(1) - } - - // Check if the purger is being started in daemon (URL purging gRPC service) - // or manual (ad-hoc tag purging) mode. - var manualMode bool - if os.Args[1] == "manual" { - manualMode = true - _ = manualFlags.Parse(os.Args[2:]) - if *configFile == "" { - manualFlags.Usage() - os.Exit(1) - } - if *tag == "" && *tagFile == "" { - cmd.Fail("Must specify one of --tag or --tag-file for manual purge") - } else if *tag != "" && *tagFile != "" { - cmd.Fail("Cannot specify both of --tag and --tag-file for manual purge") - } - configFile = manualConfigFile - } else { - _ = daemonFlags.Parse(os.Args[1:]) - if *configFile == "" { - daemonFlags.Usage() - os.Exit(1) - } - } - - var c Config - err := cmd.ReadConfigFile(*configFile, &c) - cmd.FailOnError(err, "Reading JSON config file into config structure") - - // Make references to the service config cleaner. - apc := &c.AkamaiPurger - - if *grpcAddr != "" { - apc.GRPC.Address = *grpcAddr - } - if *debugAddr != "" { - apc.DebugAddr = *debugAddr - } - - bc, err := c.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() - - scope, logger := cmd.StatsAndLogging(c.Syslog, apc.DebugAddr) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) - - // TODO(#6003) This block satisfies our deployability guidelines and can be - // removed entirely once the 'purgeInterval' key has been removed from all - // staging and production configuration. - usingDeprecatedThroughput := apc.PurgeInterval.Duration != 0 - usingNewThroughput := apc.Throughput != Throughput{} - if usingDeprecatedThroughput && usingNewThroughput { - cmd.Fail("Config cannot specify both 'throughput': {...} AND 'purgeInterval'") - } - if usingDeprecatedThroughput && !usingNewThroughput { - c.useDeprecatedSettings() - } - - // When the operator hasn't specified any throughput settings, use the - // optimized defaults. TODO(#6003) remove 'usingDeprecatedThroughput'. - if !usingDeprecatedThroughput && !usingNewThroughput { - apc.Throughput.useOptimizedDefaults() - } - cmd.FailOnError(apc.Throughput.validate(), "") - - if apc.MaxQueueSize == 0 { - apc.MaxQueueSize = defaultQueueSize - } - - ccu, err := akamai.NewCachePurgeClient( - apc.BaseURL, - apc.ClientToken, - apc.ClientSecret, - apc.AccessToken, - apc.V3Network, - apc.Throughput.PurgeBatchInterval.Duration, - apc.Throughput.QueueEntriesPerBatch, - apc.PurgeRetries, - apc.PurgeRetryBackoff.Duration, - logger, - scope, - ) - cmd.FailOnError(err, "Failed to setup Akamai CCU client") - - ap := &akamaiPurger{ - maxQueueSize: apc.MaxQueueSize, - client: ccu, - log: logger, - } - - var gaugePurgeQueueLength = prometheus.NewGaugeFunc( - prometheus.GaugeOpts{ - Name: "ccu_purge_queue_length", - Help: "The length of the akamai-purger queue. Captured on each prometheus scrape.", - }, - func() float64 { return float64(ap.len()) }, - ) - scope.MustRegister(gaugePurgeQueueLength) - - if manualMode { - manualPurge(ccu, *tag, *tagFile, logger) - } else { - daemon(c, ap, logger, scope) - } -} - -// manualPurge is called ad-hoc to purge either a single tag, or a batch of tags, -// passed on the CLI. All tags will be added to a single request, please ensure -// that you don't violate the Fast-Purge API limits for tags detailed here: -// https://techdocs.akamai.com/purge-cache/reference/rate-limiting -func manualPurge(purgeClient *akamai.CachePurgeClient, tag, tagFile string, logger blog.Logger) { - var tags []string - if tag != "" { - tags = []string{tag} - } else { - contents, err := os.ReadFile(tagFile) - cmd.FailOnError(err, fmt.Sprintf("While reading %q", tagFile)) - tags = strings.Split(string(contents), "\n") - } - - err := purgeClient.PurgeTags(tags) - cmd.FailOnError(err, "Purging tags") -} - -// daemon initializes the akamai-purger gRPC service. -func daemon(c Config, ap *akamaiPurger, logger blog.Logger, scope prometheus.Registerer) { - clk := cmd.Clock() - - tlsConfig, err := c.AkamaiPurger.TLS.Load() - cmd.FailOnError(err, "tlsConfig config") - - stop, stopped := make(chan bool, 1), make(chan bool, 1) - ticker := time.NewTicker(c.AkamaiPurger.Throughput.PurgeBatchInterval.Duration) - go func() { - loop: - for { - select { - case <-ticker.C: - _ = ap.purge() - case <-stop: - break loop - } - } - - // As we may have missed a tick by calling ticker.Stop() and - // writing to the stop channel call ap.purge one last time just - // in case there is anything that still needs to be purged. - queueLen := ap.len() - if queueLen > 0 { - logger.Infof("Shutting down; purging OCSP responses for %d certificates before exit.", queueLen) - err := ap.purge() - cmd.FailOnError(err, fmt.Sprintf("Shutting down; failed to purge OCSP responses for %d certificates before exit", queueLen)) - logger.Infof("Shutting down; finished purging OCSP responses for %d certificates.", queueLen) - } else { - logger.Info("Shutting down; queue is already empty.") - } - stopped <- true - }() - - serverMetrics := bgrpc.NewServerMetrics(scope) - - grpcSrv, l, err := bgrpc.NewServer(c.AkamaiPurger.GRPC, tlsConfig, serverMetrics, clk) - cmd.FailOnError(err, "Unable to setup Akamai purger gRPC server") - - akamaipb.RegisterAkamaiPurgerServer(grpcSrv, ap) - hs := health.NewServer() - healthpb.RegisterHealthServer(grpcSrv, hs) - - go cmd.CatchSignals(logger, func() { - hs.Shutdown() - grpcSrv.GracefulStop() - - // Stop the ticker and signal that we want to shutdown by writing to the - // stop channel. We wait 15 seconds for any remaining URLs to be emptied - // from the current queue, if we pass that deadline we exit early. - ticker.Stop() - stop <- true - select { - case <-time.After(time.Second * 15): - cmd.Fail("Timed out waiting for purger to finish work") - case <-stopped: - } - }) - err = cmd.FilterShutdownErrors(grpcSrv.Serve(l)) - cmd.FailOnError(err, "akamai-purger gRPC service failed") - - // When we get a SIGTERM, we will exit from grpcSrv.Serve as soon as all - // extant RPCs have been processed, but we want the process to stick around - // while we still have a goroutine purging the last elements from the queue. - // Once that's done, CatchSignals will call os.Exit(). - select {} -} - -func init() { - cmd.RegisterCommand("akamai-purger", main) -} diff --git a/cmd/akamai-purger/main_test.go b/cmd/akamai-purger/main_test.go deleted file mode 100644 index 5816de8184b..00000000000 --- a/cmd/akamai-purger/main_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package notmain - -import ( - "testing" - "time" -) - -func TestThroughput_validate(t *testing.T) { - type fields struct { - QueueEntriesPerBatch int - PurgeBatchInterval time.Duration - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - // TODO(#6003) This test case can be removed entirely. It was added to - // prove that this change met our deployability guidelines. The existing - // test/config couldn't modified to reflect production without adding 10 - // seconds of wait to verify_akamai_purge() in test/helpers.py. - {"production configuration prior to this change", - fields{ - QueueEntriesPerBatch: DeprecatedQueueEntriesPerBatch, - PurgeBatchInterval: 10 * time.Second}, - false, - }, - {"optimized defaults, should succeed", - fields{ - QueueEntriesPerBatch: defaultQueueEntriesPerBatch, - PurgeBatchInterval: defaultPurgeBatchInterval}, - false, - }, - {"2ms faster than optimized defaults, should succeed", - fields{ - QueueEntriesPerBatch: defaultQueueEntriesPerBatch, - PurgeBatchInterval: defaultPurgeBatchInterval + 2*time.Millisecond}, - false, - }, - {"exceeds URLs per second by 4 URLs", - fields{ - QueueEntriesPerBatch: defaultQueueEntriesPerBatch, - PurgeBatchInterval: 29 * time.Millisecond}, - true, - }, - {"exceeds bytes per second by 20 bytes", - fields{ - QueueEntriesPerBatch: 125, - PurgeBatchInterval: 1 * time.Second}, - true, - }, - {"exceeds requests per second by 1 request", - fields{ - QueueEntriesPerBatch: 1, - PurgeBatchInterval: 19999 * time.Microsecond}, - true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tr := &Throughput{ - QueueEntriesPerBatch: tt.fields.QueueEntriesPerBatch, - } - tr.PurgeBatchInterval.Duration = tt.fields.PurgeBatchInterval - if err := tr.validate(); (err != nil) != tt.wantErr { - t.Errorf("Throughput.validate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/cmd/bad-key-revoker/main.go b/cmd/bad-key-revoker/main.go index 8ae11c24fc8..5ec0f2a1cef 100644 --- a/cmd/bad-key-revoker/main.go +++ b/cmd/bad-key-revoker/main.go @@ -1,46 +1,30 @@ package notmain import ( - "bytes" "context" - "crypto/x509" "flag" "fmt" - "html/template" - "io/ioutil" - netmail "net/mail" "os" - "strings" "time" - "github.com/honeycombio/beeline-go" "github.com/jmhodges/clock" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/core" "github.com/letsencrypt/boulder/db" bgrpc "github.com/letsencrypt/boulder/grpc" blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/mail" rapb "github.com/letsencrypt/boulder/ra/proto" + "github.com/letsencrypt/boulder/revocation" "github.com/letsencrypt/boulder/sa" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" ) -var keysProcessed = prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "bad_keys_processed", - Help: "A counter of blockedKeys rows processed labelled by processing state", -}, []string{"state"}) -var certsRevoked = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "bad_keys_certs_revoked", - Help: "A counter of certificates associated with rows in blockedKeys that have been revoked", -}) -var mailErrors = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "bad_keys_mail_errors", - Help: "A counter of email send errors", -}) +const blockedKeysGaugeLimit = 1000 // revoker is an interface used to reduce the scope of a RA gRPC client // to only the single method we need to use, this makes testing significantly @@ -50,19 +34,20 @@ type revoker interface { } type badKeyRevoker struct { - dbMap *db.WrappedMap - maxRevocations int - serialBatchSize int - raClient revoker - mailer mail.Mailer - emailSubject string - emailTemplate *template.Template - logger blog.Logger - clk clock.Clock - backoffIntervalBase time.Duration - backoffIntervalMax time.Duration - backoffFactor float64 - backoffTicker int + dbMap *db.WrappedMap + maxRevocations int + serialBatchSize int + raClient revoker + logger blog.Logger + clk clock.Clock + backoffIntervalBase time.Duration + backoffIntervalMax time.Duration + backoffFactor float64 + backoffTicker int + maxExpectedReplicationLag time.Duration + keysToProcess prometheus.Gauge + keysProcessed *prometheus.CounterVec + certsRevoked prometheus.Counter } // uncheckedBlockedKey represents a row in the blockedKeys table @@ -76,14 +61,33 @@ func (ubk uncheckedBlockedKey) String() string { ubk.RevokedBy, ubk.KeyHash) } -func (bkr *badKeyRevoker) selectUncheckedKey() (uncheckedBlockedKey, error) { +func (bkr *badKeyRevoker) countUncheckedKeys(ctx context.Context) (int, error) { + var count int + err := bkr.dbMap.SelectOne( + ctx, + &count, + `SELECT COUNT(*) + FROM (SELECT 1 FROM blockedKeys + WHERE extantCertificatesChecked = false AND added < ? - INTERVAL ? SECOND + LIMIT ?) AS a`, + bkr.clk.Now(), + bkr.maxExpectedReplicationLag.Seconds(), + blockedKeysGaugeLimit, + ) + return count, err +} + +func (bkr *badKeyRevoker) selectUncheckedKey(ctx context.Context) (uncheckedBlockedKey, error) { var row uncheckedBlockedKey err := bkr.dbMap.SelectOne( + ctx, &row, `SELECT keyHash, revokedBy FROM blockedKeys - WHERE extantCertificatesChecked = false + WHERE extantCertificatesChecked = false AND added < ? - INTERVAL ? SECOND LIMIT 1`, + bkr.clk.Now(), + bkr.maxExpectedReplicationLag.Seconds(), ) return row, err } @@ -106,7 +110,7 @@ func (uc unrevokedCertificate) String() string { // findUnrevoked looks for all unexpired, currently valid certificates which have a specific SPKI hash, // by looking first at the keyHashToSerial table and then the certificateStatus and certificates tables. // If the number of certificates it finds is larger than bkr.maxRevocations it'll error out. -func (bkr *badKeyRevoker) findUnrevoked(unchecked uncheckedBlockedKey) ([]unrevokedCertificate, error) { +func (bkr *badKeyRevoker) findUnrevoked(ctx context.Context, unchecked uncheckedBlockedKey) ([]unrevokedCertificate, error) { var unrevokedCerts []unrevokedCertificate initialID := 0 for { @@ -115,6 +119,7 @@ func (bkr *badKeyRevoker) findUnrevoked(unchecked uncheckedBlockedKey) ([]unrevo CertSerial string } _, err := bkr.dbMap.Select( + ctx, &batch, "SELECT id, certSerial FROM keyHashToSerial WHERE keyHash = ? AND id > ? AND certNotAfter > ? ORDER BY id LIMIT ?", unchecked.KeyHash, @@ -131,13 +136,19 @@ func (bkr *badKeyRevoker) findUnrevoked(unchecked uncheckedBlockedKey) ([]unrevo initialID = batch[len(batch)-1].ID for _, serial := range batch { var unrevokedCert unrevokedCertificate + // NOTE: This has a `LIMIT 1` because the certificateStatus and precertificates + // tables do not have a UNIQUE KEY on serial (for partitioning reasons). So it's + // possible we could get multiple results for a single serial number, but they + // would be duplicates. err = bkr.dbMap.SelectOne( + ctx, &unrevokedCert, `SELECT cs.id, cs.serial, c.registrationID, c.der, cs.status, cs.isExpired FROM certificateStatus AS cs JOIN precertificates AS c ON cs.serial = c.serial - WHERE cs.serial = ?`, + WHERE cs.serial = ? + LIMIT 1`, serial.CertSerial, ) if err != nil { @@ -157,186 +168,97 @@ func (bkr *badKeyRevoker) findUnrevoked(unchecked uncheckedBlockedKey) ([]unrevo // markRowChecked updates a row in the blockedKeys table to mark a keyHash // as having been checked for extant unrevoked certificates. -func (bkr *badKeyRevoker) markRowChecked(unchecked uncheckedBlockedKey) error { - _, err := bkr.dbMap.Exec("UPDATE blockedKeys SET extantCertificatesChecked = true WHERE keyHash = ?", unchecked.KeyHash) +func (bkr *badKeyRevoker) markRowChecked(ctx context.Context, unchecked uncheckedBlockedKey) error { + _, err := bkr.dbMap.ExecContext(ctx, "UPDATE blockedKeys SET extantCertificatesChecked = true WHERE keyHash = ?", unchecked.KeyHash) return err } -// resolveContacts builds a map of id -> email addresses -func (bkr *badKeyRevoker) resolveContacts(ids []int64) (map[int64][]string, error) { - idToEmail := map[int64][]string{} - for _, id := range ids { - var emails struct { - Contact []string - } - err := bkr.dbMap.SelectOne(&emails, "SELECT contact FROM registrations WHERE id = ?", id) +// revokeCerts revokes all the provided certificates. It uses reason +// keyCompromise and includes note indicating that they were revoked by +// bad-key-revoker. +func (bkr *badKeyRevoker) revokeCerts(certs []unrevokedCertificate) error { + for _, cert := range certs { + _, err := bkr.raClient.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{ + Cert: cert.DER, + Serial: cert.Serial, + Code: int64(revocation.KeyCompromise), + AdminName: "bad-key-revoker", + }) if err != nil { - // ErrNoRows is not acceptable here since there should always be a - // row for the registration, even if there are no contacts - return nil, err - } - if len(emails.Contact) != 0 { - for _, email := range emails.Contact { - idToEmail[id] = append(idToEmail[id], strings.TrimPrefix(email, "mailto:")) - } - } else { - // if the account has no contacts add a placeholder empty contact - // so that we don't skip any certificates - idToEmail[id] = append(idToEmail[id], "") - continue + return err } + bkr.certsRevoked.Inc() } - return idToEmail, nil + return nil } -var maxSerials = 100 - -// sendMessage sends a single email to the provided address with the revoked -// serials -func (bkr *badKeyRevoker) sendMessage(addr string, serials []string) error { - err := bkr.mailer.Connect() - if err != nil { - return err - } +// invoke exits early and returns true if there is no work to be done. +// Otherwise, it processes a single key in the blockedKeys table and returns false. +func (bkr *badKeyRevoker) invoke(ctx context.Context) (work bool, err error) { + logEvent := make(map[string]any) defer func() { - _ = bkr.mailer.Close() + if err != nil { + bkr.logger.AuditErr("Error while processing bad key", err, logEvent) + } else { + bkr.logger.AuditInfo("Processed bad key", logEvent) + } }() - mutSerials := make([]string, len(serials)) - copy(mutSerials, serials) - if len(mutSerials) > maxSerials { - more := len(mutSerials) - maxSerials - mutSerials = mutSerials[:maxSerials] - mutSerials = append(mutSerials, fmt.Sprintf("and %d more certificates.", more)) - } - message := bytes.NewBuffer(nil) - err = bkr.emailTemplate.Execute(message, mutSerials) - if err != nil { - return err - } - err = bkr.mailer.SendMail([]string{addr}, bkr.emailSubject, message.String()) + + // Gather a count of rows to be processed. + uncheckedCount, err := bkr.countUncheckedKeys(ctx) if err != nil { - return err + return false, err } - return nil -} + logEvent["keysToProcess"] = uncheckedCount -// revokeCerts revokes all the certificates associated with a particular key hash and sends -// emails to the users that issued the certificates. Emails are not sent to the user which -// requested revocation of the original certificate which marked the key as compromised. -func (bkr *badKeyRevoker) revokeCerts(revokerEmails []string, emailToCerts map[string][]unrevokedCertificate) error { - revokerEmailsMap := map[string]bool{} - for _, email := range revokerEmails { - revokerEmailsMap[email] = true - } + // Set the gauge to the number of rows to be processed (max: + // blockedKeysGaugeLimit). + bkr.keysToProcess.Set(float64(uncheckedCount)) - alreadyRevoked := map[int]bool{} - for email, certs := range emailToCerts { - var revokedSerials []string - for _, cert := range certs { - revokedSerials = append(revokedSerials, cert.Serial) - if alreadyRevoked[cert.ID] { - continue - } - _, err := bkr.raClient.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{ - Cert: cert.DER, - Code: int64(ocsp.KeyCompromise), - AdminName: "bad-key-revoker", - }) - if err != nil { - return err - } - certsRevoked.Inc() - alreadyRevoked[cert.ID] = true - } - // don't send emails to the person who revoked the certificate - if revokerEmailsMap[email] || email == "" { - continue - } - err := bkr.sendMessage(email, revokedSerials) - if err != nil { - mailErrors.Inc() - bkr.logger.Errf("failed to send message to %q: %s", email, err) - continue - } + if uncheckedCount >= blockedKeysGaugeLimit { + logEvent["keysToProcessOverflow"] = true } - return nil -} -// invoke processes a single key in the blockedKeys table and returns whether -// there were any rows to process or not. -func (bkr *badKeyRevoker) invoke() (bool, error) { // select a row to process - unchecked, err := bkr.selectUncheckedKey() + unchecked, err := bkr.selectUncheckedKey(ctx) if err != nil { if db.IsNoRows(err) { return true, nil } return false, err } - bkr.logger.AuditInfo(fmt.Sprintf("found unchecked block key to work on: %s", unchecked)) + logEvent["keyHash"] = fmt.Sprintf("%x", unchecked.KeyHash) + logEvent["revokedBy"] = unchecked.RevokedBy // select all unrevoked, unexpired serials associated with the blocked key hash - unrevokedCerts, err := bkr.findUnrevoked(unchecked) + unrevokedCerts, err := bkr.findUnrevoked(ctx, unchecked) if err != nil { - bkr.logger.AuditInfo(fmt.Sprintf("finding unrevoked certificates related to %s: %s", - unchecked, err)) return false, err } + logEvent["certsToProcess"] = len(unrevokedCerts) + if len(unrevokedCerts) == 0 { - bkr.logger.AuditInfo(fmt.Sprintf("found no certificates that need revoking related to %s, marking row as checked", unchecked)) - // mark row as checked - err = bkr.markRowChecked(unchecked) + err = bkr.markRowChecked(ctx, unchecked) if err != nil { return false, err } return false, nil } - // build a map of registration ID -> certificates, and collect a - // list of unique registration IDs - ownedBy := map[int64][]unrevokedCertificate{} - var ids []int64 + var serials []string for _, cert := range unrevokedCerts { - if ownedBy[cert.RegistrationID] == nil { - ids = append(ids, cert.RegistrationID) - } - ownedBy[cert.RegistrationID] = append(ownedBy[cert.RegistrationID], cert) - } - // if the account that revoked the original certificate isn't an owner of any - // extant certificates, still add them to ids so that we can resolve their - // email and avoid sending emails later. If RevokedBy == 0 it was a row - // inserted by admin-revoker with a dummy ID, since there won't be a registration - // to look up, don't bother adding it to ids. - if _, present := ownedBy[unchecked.RevokedBy]; !present && unchecked.RevokedBy != 0 { - ids = append(ids, unchecked.RevokedBy) - } - // get contact addresses for the list of IDs - idToEmails, err := bkr.resolveContacts(ids) - if err != nil { - return false, err - } - - // build a map of email -> certificates, this de-duplicates accounts with - // the same email addresses - emailsToCerts := map[string][]unrevokedCertificate{} - for id, emails := range idToEmails { - for _, email := range emails { - emailsToCerts[email] = append(emailsToCerts[email], ownedBy[id]...) - } + serials = append(serials, cert.Serial) } + logEvent["serials"] = serials - revokerEmails := idToEmails[unchecked.RevokedBy] - bkr.logger.AuditInfo(fmt.Sprintf("revoking certs. revoked emails=%v, emailsToCerts=%s", - revokerEmails, emailsToCerts)) - - // revoke each certificate and send emails to their owners - err = bkr.revokeCerts(idToEmails[unchecked.RevokedBy], emailsToCerts) + // revoke each certificate + err = bkr.revokeCerts(unrevokedCerts) if err != nil { return false, err } // mark the key as checked - err = bkr.markRowChecked(unchecked) + err = bkr.markRowChecked(ctx, unchecked) if err != nil { return false, err } @@ -346,7 +268,7 @@ func (bkr *badKeyRevoker) invoke() (bool, error) { type Config struct { BadKeyRevoker struct { DB cmd.DBConfig - DebugAddr string + DebugAddr string `validate:"omitempty,hostname_port"` TLS cmd.TLSConfig RAService *cmd.GRPCClientConfig @@ -355,38 +277,36 @@ type Config struct { // a key hash that bad-key-revoker will attempt to revoke. If the number of certificates // is higher than MaximumRevocations bad-key-revoker will error out and refuse to // progress until this is addressed. - MaximumRevocations int + MaximumRevocations int `validate:"gte=0"` + // FindCertificatesBatchSize specifies the maximum number of serials to select from the // keyHashToSerial table at once - FindCertificatesBatchSize int + FindCertificatesBatchSize int `validate:"required"` // Interval specifies the minimum duration bad-key-revoker // should sleep between attempting to find blockedKeys rows to // process when there is an error or no work to do. - Interval cmd.ConfigDuration + Interval config.Duration `validate:"-"` // BackoffIntervalMax specifies a maximum duration the backoff // algorithm will wait before retrying in the event of error // or no work to do. - BackoffIntervalMax cmd.ConfigDuration - - Mailer struct { - cmd.SMTPConfig - // Path to a file containing a list of trusted root certificates for use - // during the SMTP connection (as opposed to the gRPC connections). - SMTPTrustedRootFile string - - From string - EmailSubject string - EmailTemplate string - } + BackoffIntervalMax config.Duration `validate:"-"` + + // MaxExpectedReplicationLag specifies the minimum duration + // bad-key-revoker should wait before searching for certificates + // matching a blockedKeys row. This should be just slightly greater than + // the database's maximum replication lag, and always well under 24 + // hours. + MaxExpectedReplicationLag config.Duration `validate:"-"` } - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig } func main() { + debugAddr := flag.String("debug-addr", "", "Debug server address override") configPath := flag.String("config", "", "File path to the configuration file for this service") flag.Parse() @@ -398,78 +318,52 @@ func main() { err := cmd.ReadConfigFile(*configPath, &config) cmd.FailOnError(err, "Failed reading config file") - bc, err := config.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() - - scope, logger := cmd.StatsAndLogging(config.Syslog, config.BadKeyRevoker.DebugAddr) - clk := cmd.Clock() - - scope.MustRegister(keysProcessed) - scope.MustRegister(certsRevoked) - scope.MustRegister(mailErrors) + if *debugAddr != "" { + config.BadKeyRevoker.DebugAddr = *debugAddr + } - dbMap, err := sa.InitWrappedDb(config.BadKeyRevoker.DB, scope, logger) + stats, logger, oTelShutdown := cmd.StatsAndLogging(config.Syslog, config.OpenTelemetry, config.BadKeyRevoker.DebugAddr) + defer oTelShutdown(context.Background()) + cmd.LogStartup(logger) + clk := clock.New() + + keysToProcess := promauto.With(stats).NewGauge(prometheus.GaugeOpts{ + Name: "bad_keys_to_process", + Help: fmt.Sprintf("A gauge of blockedKeys rows to process (max: %d)", blockedKeysGaugeLimit), + }) + keysProcessed := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ + Name: "bad_keys_processed", + Help: "A counter of blockedKeys rows processed labelled by processing state", + }, []string{"state"}) + certsRevoked := promauto.With(stats).NewCounter(prometheus.CounterOpts{ + Name: "bad_keys_certs_revoked", + Help: "A counter of certificates associated with rows in blockedKeys that have been revoked", + }) + + dbMap, err := sa.InitWrappedDb(config.BadKeyRevoker.DB, stats, logger) cmd.FailOnError(err, "While initializing dbMap") - tlsConfig, err := config.BadKeyRevoker.TLS.Load() + tlsConfig, err := config.BadKeyRevoker.TLS.Load(stats) cmd.FailOnError(err, "TLS config") - clientMetrics := bgrpc.NewClientMetrics(scope) - conn, err := bgrpc.ClientSetup(config.BadKeyRevoker.RAService, tlsConfig, clientMetrics, clk) + conn, err := bgrpc.ClientSetup(config.BadKeyRevoker.RAService, tlsConfig, stats, clk) cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA") rac := rapb.NewRegistrationAuthorityClient(conn) - var smtpRoots *x509.CertPool - if config.BadKeyRevoker.Mailer.SMTPTrustedRootFile != "" { - pem, err := ioutil.ReadFile(config.BadKeyRevoker.Mailer.SMTPTrustedRootFile) - cmd.FailOnError(err, "Loading trusted roots file") - smtpRoots = x509.NewCertPool() - if !smtpRoots.AppendCertsFromPEM(pem) { - cmd.FailOnError(nil, "Failed to parse root certs PEM") - } - } - - fromAddress, err := netmail.ParseAddress(config.BadKeyRevoker.Mailer.From) - cmd.FailOnError(err, fmt.Sprintf("Could not parse from address: %s", config.BadKeyRevoker.Mailer.From)) - - smtpPassword, err := config.BadKeyRevoker.Mailer.PasswordConfig.Pass() - cmd.FailOnError(err, "Failed to load SMTP password") - mailClient := mail.New( - config.BadKeyRevoker.Mailer.Server, - config.BadKeyRevoker.Mailer.Port, - config.BadKeyRevoker.Mailer.Username, - smtpPassword, - smtpRoots, - *fromAddress, - logger, - scope, - 1*time.Second, // reconnection base backoff - 5*60*time.Second, // reconnection maximum backoff - ) - - if config.BadKeyRevoker.Mailer.EmailSubject == "" { - cmd.Fail("BadKeyRevoker.Mailer.EmailSubject must be populated") - } - templateBytes, err := ioutil.ReadFile(config.BadKeyRevoker.Mailer.EmailTemplate) - cmd.FailOnError(err, fmt.Sprintf("failed to read email template %q: %s", config.BadKeyRevoker.Mailer.EmailTemplate, err)) - emailTemplate, err := template.New("email").Parse(string(templateBytes)) - cmd.FailOnError(err, fmt.Sprintf("failed to parse email template %q: %s", config.BadKeyRevoker.Mailer.EmailTemplate, err)) - bkr := &badKeyRevoker{ - dbMap: dbMap, - maxRevocations: config.BadKeyRevoker.MaximumRevocations, - serialBatchSize: config.BadKeyRevoker.FindCertificatesBatchSize, - raClient: rac, - mailer: mailClient, - emailSubject: config.BadKeyRevoker.Mailer.EmailSubject, - emailTemplate: emailTemplate, - logger: logger, - clk: clk, - backoffIntervalMax: config.BadKeyRevoker.BackoffIntervalMax.Duration, - backoffIntervalBase: config.BadKeyRevoker.Interval.Duration, - backoffFactor: 1.3, + dbMap: dbMap, + maxRevocations: config.BadKeyRevoker.MaximumRevocations, + serialBatchSize: config.BadKeyRevoker.FindCertificatesBatchSize, + raClient: rac, + logger: logger, + clk: clk, + backoffIntervalMax: config.BadKeyRevoker.BackoffIntervalMax.Duration, + backoffIntervalBase: config.BadKeyRevoker.Interval.Duration, + backoffFactor: 1.3, + maxExpectedReplicationLag: config.BadKeyRevoker.MaxExpectedReplicationLag.Duration, + keysToProcess: keysToProcess, + keysProcessed: keysProcessed, + certsRevoked: certsRevoked, } // If `BackoffIntervalMax` was not set via the config, set it to 60 @@ -485,18 +379,24 @@ func main() { bkr.backoffIntervalBase = time.Second } + // If `MaxExpectedReplicationLag` was not set via the config, then set + // `bkr.maxExpectedReplicationLag` to a default 22 seconds. This is based on + // ProxySQL's max_replication_lag for bad-key-revoker (10s), times two, plus + // two seconds. + if bkr.maxExpectedReplicationLag == 0 { + bkr.maxExpectedReplicationLag = time.Second * 22 + } + // Run bad-key-revoker in a loop. Backoff if no work or errors. for { - noWork, err := bkr.invoke() + noWork, err := bkr.invoke(context.Background()) if err != nil { keysProcessed.WithLabelValues("error").Inc() - logger.AuditErrf("failed to process blockedKeys row: %s", err) // Calculate and sleep for a backoff interval bkr.backoff() continue } if noWork { - logger.Info("no work to do") // Calculate and sleep for a backoff interval bkr.backoff() } else { @@ -528,5 +428,5 @@ func (bkr *badKeyRevoker) backoffReset() { } func init() { - cmd.RegisterCommand("bad-key-revoker", main) + cmd.RegisterCommand("bad-key-revoker", main, &cmd.ConfigValidator{Config: &Config{}}) } diff --git a/cmd/bad-key-revoker/main_test.go b/cmd/bad-key-revoker/main_test.go index 1e04cc89983..9b6e97045ed 100644 --- a/cmd/bad-key-revoker/main_test.go +++ b/cmd/bad-key-revoker/main_test.go @@ -4,23 +4,22 @@ import ( "context" "crypto/rand" "fmt" - "html/template" - "strings" "sync" "testing" "time" "github.com/jmhodges/clock" + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + "github.com/letsencrypt/boulder/core" "github.com/letsencrypt/boulder/db" blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/mocks" rapb "github.com/letsencrypt/boulder/ra/proto" "github.com/letsencrypt/boulder/sa" "github.com/letsencrypt/boulder/test" "github.com/letsencrypt/boulder/test/vars" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" ) func randHash(t *testing.T) []byte { @@ -33,7 +32,7 @@ func randHash(t *testing.T) []byte { func insertBlockedRow(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock, hash []byte, by int64, checked bool) { t.Helper() - _, err := dbMap.Exec(`INSERT INTO blockedKeys + _, err := dbMap.ExecContext(context.Background(), `INSERT INTO blockedKeys (keyHash, added, source, revokedBy, extantCertificatesChecked) VALUES (?, ?, ?, ?, ?)`, @@ -46,55 +45,65 @@ func insertBlockedRow(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock, hash [ test.AssertNotError(t, err, "failed to add test row") } +func fcBeforeRepLag(clk clock.Clock, bkr *badKeyRevoker) clock.FakeClock { + fc := clock.NewFake() + fc.Set(clk.Now().Add(-bkr.maxExpectedReplicationLag - time.Second)) + return fc +} + func TestSelectUncheckedRows(t *testing.T) { - dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) + ctx := context.Background() + + dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms) test.AssertNotError(t, err, "failed setting up db client") - defer test.ResetSATestDatabase(t)() + defer test.ResetBoulderTestDatabase(t)() fc := clock.NewFake() bkr := &badKeyRevoker{ - dbMap: dbMap, - logger: blog.NewMock(), - clk: fc, + dbMap: dbMap, + logger: blog.NewMock(), + clk: fc, + maxExpectedReplicationLag: time.Second * 22, } hashA, hashB, hashC := randHash(t), randHash(t), randHash(t) + + // insert a blocked key that's marked as already checked insertBlockedRow(t, dbMap, fc, hashA, 1, true) - _, err = bkr.selectUncheckedKey() + count, err := bkr.countUncheckedKeys(ctx) + test.AssertNotError(t, err, "countUncheckedKeys failed") + test.AssertEquals(t, count, 0) + _, err = bkr.selectUncheckedKey(ctx) test.AssertError(t, err, "selectUncheckedKey didn't fail with no rows to process") test.Assert(t, db.IsNoRows(err), "returned error is not sql.ErrNoRows") - insertBlockedRow(t, dbMap, fc, hashB, 1, false) + + // insert a blocked key that's due to be checked + insertBlockedRow(t, dbMap, fcBeforeRepLag(fc, bkr), hashB, 1, false) + // insert a freshly blocked key, so it's not yet due to be checked insertBlockedRow(t, dbMap, fc, hashC, 1, false) - row, err := bkr.selectUncheckedKey() + count, err = bkr.countUncheckedKeys(ctx) + test.AssertNotError(t, err, "countUncheckedKeys failed") + test.AssertEquals(t, count, 1) + row, err := bkr.selectUncheckedKey(ctx) test.AssertNotError(t, err, "selectUncheckKey failed") test.AssertByteEquals(t, row.KeyHash, hashB) test.AssertEquals(t, row.RevokedBy, int64(1)) } -func insertRegistration(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock, addrs ...string) int64 { +func insertRegistration(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock) int64 { t.Helper() - jwkHash := make([]byte, 2) + jwkHash := make([]byte, 32) _, err := rand.Read(jwkHash) test.AssertNotError(t, err, "failed to read rand") - contactStr := "[]" - if len(addrs) > 0 { - contacts := []string{} - for _, addr := range addrs { - contacts = append(contacts, fmt.Sprintf(`"mailto:%s"`, addr)) - } - contactStr = fmt.Sprintf("[%s]", strings.Join(contacts, ",")) - } - res, err := dbMap.Exec( - "INSERT INTO registrations (jwk, jwk_sha256, contact, agreement, initialIP, createdAt, status, LockCol) VALUES (?, ?, ?, ?, ?, ?, ?, ?)", + res, err := dbMap.ExecContext( + context.Background(), + "INSERT INTO registrations (jwk, jwk_sha256, agreement, createdAt, status) VALUES (?, ?, ?, ?, ?)", []byte{}, fmt.Sprintf("%x", jwkHash), - contactStr, "yes", - []byte{}, fc.Now(), string(core.StatusValid), - 0, ) test.AssertNotError(t, err, "failed to insert test registrations row") regID, err := res.LastInsertId() @@ -117,33 +126,39 @@ func insertGoodCert(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock, keyHash func insertCert(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock, keyHash []byte, serial string, regID int64, expiredStatus ExpiredStatus, status core.OCSPStatus) { t.Helper() + ctx := context.Background() expiresOffset := 0 * time.Second if !expiredStatus { expiresOffset = 90*24*time.Hour - 1*time.Second // 90 days exclusive } - _, err := dbMap.Exec( - "INSERT INTO keyHashToSerial (keyHash, certNotAfter, certSerial) VALUES (?, ?, ?)", + _, err := dbMap.ExecContext( + ctx, + `INSERT IGNORE INTO keyHashToSerial + (keyHash, certNotAfter, certSerial) VALUES + (?, ?, ?)`, keyHash, fc.Now().Add(expiresOffset), serial, ) test.AssertNotError(t, err, "failed to insert test keyHashToSerial row") - _, err = dbMap.Exec( + _, err = dbMap.ExecContext( + ctx, "INSERT INTO certificateStatus (serial, status, isExpired, ocspLastUpdated, revokedDate, revokedReason, lastExpirationNagSent) VALUES (?, ?, ?, ?, ?, ?, ?)", serial, status, expiredStatus, fc.Now(), - time.Time{}, + time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), 0, - time.Time{}, + time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), ) test.AssertNotError(t, err, "failed to insert test certificateStatus row") - _, err = dbMap.Exec( + _, err = dbMap.ExecContext( + ctx, "INSERT INTO precertificates (serial, registrationID, der, issued, expires) VALUES (?, ?, ?, ?, ?)", serial, regID, @@ -153,7 +168,8 @@ func insertCert(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock, keyHash []by ) test.AssertNotError(t, err, "failed to insert test certificateStatus row") - _, err = dbMap.Exec( + _, err = dbMap.ExecContext( + ctx, "INSERT INTO certificates (serial, registrationID, der, digest, issued, expires) VALUES (?, ?, ?, ?, ?, ?)", serial, regID, @@ -169,14 +185,17 @@ func insertCert(t *testing.T, dbMap *db.WrappedMap, fc clock.Clock, keyHash []by // does not have a corresponding entry in the certificateStatus and // precertificates table. func TestFindUnrevokedNoRows(t *testing.T) { - dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) + ctx := context.Background() + + dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms) test.AssertNotError(t, err, "failed setting up db client") - defer test.ResetSATestDatabase(t)() + defer test.ResetBoulderTestDatabase(t)() fc := clock.NewFake() hashA := randHash(t) - _, err = dbMap.Exec( + _, err = dbMap.ExecContext( + ctx, "INSERT INTO keyHashToSerial (keyHash, certNotAfter, certSerial) VALUES (?, ?, ?)", hashA, fc.Now().Add(90*24*time.Hour-1*time.Second), // 90 days exclusive @@ -184,84 +203,59 @@ func TestFindUnrevokedNoRows(t *testing.T) { ) test.AssertNotError(t, err, "failed to insert test keyHashToSerial row") - bkr := &badKeyRevoker{dbMap: dbMap, serialBatchSize: 1, maxRevocations: 10, clk: fc} - _, err = bkr.findUnrevoked(uncheckedBlockedKey{KeyHash: hashA}) + bkr := &badKeyRevoker{ + dbMap: dbMap, + serialBatchSize: 1, + maxRevocations: 10, + clk: fc, + maxExpectedReplicationLag: time.Second * 22, + } + _, err = bkr.findUnrevoked(ctx, uncheckedBlockedKey{KeyHash: hashA}) test.Assert(t, db.IsNoRows(err), "expected NoRows error") } func TestFindUnrevoked(t *testing.T) { - dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) + ctx := context.Background() + + dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms) test.AssertNotError(t, err, "failed setting up db client") - defer test.ResetSATestDatabase(t)() + defer test.ResetBoulderTestDatabase(t)() fc := clock.NewFake() regID := insertRegistration(t, dbMap, fc) - bkr := &badKeyRevoker{dbMap: dbMap, serialBatchSize: 1, maxRevocations: 10, clk: fc} + bkr := &badKeyRevoker{ + dbMap: dbMap, + serialBatchSize: 1, + maxRevocations: 10, + clk: fc, + maxExpectedReplicationLag: time.Second * 22, + } hashA := randHash(t) // insert valid, unexpired insertCert(t, dbMap, fc, hashA, "ff", regID, Unexpired, Unrevoked) + // insert valid, unexpired, duplicate + insertCert(t, dbMap, fc, hashA, "ff", regID, Unexpired, Unrevoked) // insert valid, expired insertCert(t, dbMap, fc, hashA, "ee", regID, Expired, Unrevoked) // insert revoked insertCert(t, dbMap, fc, hashA, "dd", regID, Unexpired, Revoked) - rows, err := bkr.findUnrevoked(uncheckedBlockedKey{KeyHash: hashA}) + rows, err := bkr.findUnrevoked(ctx, uncheckedBlockedKey{KeyHash: hashA}) test.AssertNotError(t, err, "findUnrevoked failed") test.AssertEquals(t, len(rows), 1) test.AssertEquals(t, rows[0].Serial, "ff") - test.AssertEquals(t, rows[0].RegistrationID, int64(1)) + test.AssertEquals(t, rows[0].RegistrationID, regID) test.AssertByteEquals(t, rows[0].DER, []byte{1, 2, 3}) bkr.maxRevocations = 0 - _, err = bkr.findUnrevoked(uncheckedBlockedKey{KeyHash: hashA}) + _, err = bkr.findUnrevoked(ctx, uncheckedBlockedKey{KeyHash: hashA}) test.AssertError(t, err, "findUnrevoked didn't fail with 0 maxRevocations") test.AssertEquals(t, err.Error(), fmt.Sprintf("too many certificates to revoke associated with %x: got 1, max 0", hashA)) } -func TestResolveContacts(t *testing.T) { - dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) - test.AssertNotError(t, err, "failed setting up db client") - defer test.ResetSATestDatabase(t)() - - fc := clock.NewFake() - - bkr := &badKeyRevoker{dbMap: dbMap, clk: fc} - - regIDA := insertRegistration(t, dbMap, fc) - regIDB := insertRegistration(t, dbMap, fc, "example.com", "example-2.com") - regIDC := insertRegistration(t, dbMap, fc, "example.com") - regIDD := insertRegistration(t, dbMap, fc, "example-2.com") - - idToEmail, err := bkr.resolveContacts([]int64{regIDA, regIDB, regIDC, regIDD}) - test.AssertNotError(t, err, "resolveContacts failed") - test.AssertDeepEquals(t, idToEmail, map[int64][]string{ - regIDA: {""}, - regIDB: {"example.com", "example-2.com"}, - regIDC: {"example.com"}, - regIDD: {"example-2.com"}, - }) -} - -var testTemplate = template.Must(template.New("testing").Parse("{{range .}}{{.}}\n{{end}}")) - -func TestSendMessage(t *testing.T) { - mm := &mocks.Mailer{} - fc := clock.NewFake() - bkr := &badKeyRevoker{mailer: mm, emailSubject: "testing", emailTemplate: testTemplate, clk: fc} - - maxSerials = 2 - err := bkr.sendMessage("example.com", []string{"a", "b", "c"}) - test.AssertNotError(t, err, "sendMessages failed") - test.AssertEquals(t, len(mm.Messages), 1) - test.AssertEquals(t, mm.Messages[0].To, "example.com") - test.AssertEquals(t, mm.Messages[0].Subject, bkr.emailSubject) - test.AssertEquals(t, mm.Messages[0].Body, "a\nb\nand 1 more certificates.\n") - -} - type mockRevoker struct { revoked int mu sync.Mutex @@ -275,42 +269,55 @@ func (mr *mockRevoker) AdministrativelyRevokeCertificate(ctx context.Context, in } func TestRevokeCerts(t *testing.T) { - dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) + dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms) test.AssertNotError(t, err, "failed setting up db client") - defer test.ResetSATestDatabase(t)() + defer test.ResetBoulderTestDatabase(t)() fc := clock.NewFake() - mm := &mocks.Mailer{} mr := &mockRevoker{} - bkr := &badKeyRevoker{dbMap: dbMap, raClient: mr, mailer: mm, emailSubject: "testing", emailTemplate: testTemplate, clk: fc} + bkr := &badKeyRevoker{ + dbMap: dbMap, + raClient: mr, + clk: fc, + certsRevoked: prometheus.NewCounter(prometheus.CounterOpts{}), + } - err = bkr.revokeCerts([]string{"revoker@example.com", "revoker-b@example.com"}, map[string][]unrevokedCertificate{ - "revoker@example.com": {{ID: 0, Serial: "ff"}}, - "revoker-b@example.com": {{ID: 0, Serial: "ff"}}, - "other@example.com": {{ID: 1, Serial: "ee"}}, + err = bkr.revokeCerts([]unrevokedCertificate{ + {ID: 0, Serial: "ff"}, + {ID: 1, Serial: "ee"}, }) test.AssertNotError(t, err, "revokeCerts failed") - test.AssertEquals(t, len(mm.Messages), 1) - test.AssertEquals(t, mm.Messages[0].To, "other@example.com") - test.AssertEquals(t, mm.Messages[0].Subject, bkr.emailSubject) - test.AssertEquals(t, mm.Messages[0].Body, "ee\n") + test.AssertEquals(t, mr.revoked, 2) } func TestCertificateAbsent(t *testing.T) { - dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) + ctx := context.Background() + + dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms) test.AssertNotError(t, err, "failed setting up db client") - defer test.ResetSATestDatabase(t)() + defer test.ResetBoulderTestDatabase(t)() fc := clock.NewFake() + bkr := &badKeyRevoker{ + dbMap: dbMap, + maxRevocations: 1, + serialBatchSize: 1, + raClient: &mockRevoker{}, + logger: blog.NewMock(), + clk: fc, + maxExpectedReplicationLag: time.Second * 22, + keysToProcess: prometheus.NewGauge(prometheus.GaugeOpts{}), + } // populate DB with all the test data - regIDA := insertRegistration(t, dbMap, fc, "example.com") + regIDA := insertRegistration(t, dbMap, fc) hashA := randHash(t) - insertBlockedRow(t, dbMap, fc, hashA, regIDA, false) + insertBlockedRow(t, dbMap, fcBeforeRepLag(fc, bkr), hashA, regIDA, false) // Add an entry to keyHashToSerial but not to certificateStatus or certificate // status, and expect an error. - _, err = dbMap.Exec( + _, err = dbMap.ExecContext( + ctx, "INSERT INTO keyHashToSerial (keyHash, certNotAfter, certSerial) VALUES (?, ?, ?)", hashA, fc.Now().Add(90*24*time.Hour-1*time.Second), // 90 days exclusive @@ -318,83 +325,72 @@ func TestCertificateAbsent(t *testing.T) { ) test.AssertNotError(t, err, "failed to insert test keyHashToSerial row") - bkr := &badKeyRevoker{ - dbMap: dbMap, - maxRevocations: 1, - serialBatchSize: 1, - raClient: &mockRevoker{}, - mailer: &mocks.Mailer{}, - emailSubject: "testing", - emailTemplate: testTemplate, - logger: blog.NewMock(), - clk: fc, - } - _, err = bkr.invoke() + _, err = bkr.invoke(ctx) test.AssertError(t, err, "expected error when row in keyHashToSerial didn't have a matching cert") } func TestInvoke(t *testing.T) { - dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) + ctx := context.Background() + + dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms) test.AssertNotError(t, err, "failed setting up db client") - defer test.ResetSATestDatabase(t)() + defer test.ResetBoulderTestDatabase(t)() fc := clock.NewFake() - mm := &mocks.Mailer{} mr := &mockRevoker{} bkr := &badKeyRevoker{ - dbMap: dbMap, - maxRevocations: 10, - serialBatchSize: 1, - raClient: mr, - mailer: mm, - emailSubject: "testing", - emailTemplate: testTemplate, - logger: blog.NewMock(), - clk: fc, + dbMap: dbMap, + maxRevocations: 10, + serialBatchSize: 1, + raClient: mr, + logger: blog.NewMock(), + clk: fc, + maxExpectedReplicationLag: time.Second * 22, + keysToProcess: prometheus.NewGauge(prometheus.GaugeOpts{}), + certsRevoked: prometheus.NewCounter(prometheus.CounterOpts{}), } // populate DB with all the test data - regIDA := insertRegistration(t, dbMap, fc, "example.com") - regIDB := insertRegistration(t, dbMap, fc, "example.com") - regIDC := insertRegistration(t, dbMap, fc, "other.example.com", "uno.example.com") + regIDA := insertRegistration(t, dbMap, fc) + regIDB := insertRegistration(t, dbMap, fc) + regIDC := insertRegistration(t, dbMap, fc) regIDD := insertRegistration(t, dbMap, fc) hashA := randHash(t) - insertBlockedRow(t, dbMap, fc, hashA, regIDC, false) + insertBlockedRow(t, dbMap, fcBeforeRepLag(fc, bkr), hashA, regIDC, false) insertGoodCert(t, dbMap, fc, hashA, "ff", regIDA) insertGoodCert(t, dbMap, fc, hashA, "ee", regIDB) insertGoodCert(t, dbMap, fc, hashA, "dd", regIDC) insertGoodCert(t, dbMap, fc, hashA, "cc", regIDD) - noWork, err := bkr.invoke() + noWork, err := bkr.invoke(ctx) test.AssertNotError(t, err, "invoke failed") test.AssertEquals(t, noWork, false) test.AssertEquals(t, mr.revoked, 4) - test.AssertEquals(t, len(mm.Messages), 1) - test.AssertEquals(t, mm.Messages[0].To, "example.com") + test.AssertMetricWithLabelsEquals(t, bkr.keysToProcess, prometheus.Labels{}, 1) var checked struct { ExtantCertificatesChecked bool } - err = dbMap.SelectOne(&checked, "SELECT extantCertificatesChecked FROM blockedKeys WHERE keyHash = ?", hashA) + err = dbMap.SelectOne(ctx, &checked, "SELECT extantCertificatesChecked FROM blockedKeys WHERE keyHash = ?", hashA) test.AssertNotError(t, err, "failed to select row from blockedKeys") test.AssertEquals(t, checked.ExtantCertificatesChecked, true) // add a row with no associated valid certificates hashB := randHash(t) - insertBlockedRow(t, dbMap, fc, hashB, regIDC, false) + insertBlockedRow(t, dbMap, fcBeforeRepLag(fc, bkr), hashB, regIDC, false) insertCert(t, dbMap, fc, hashB, "bb", regIDA, Expired, Revoked) - noWork, err = bkr.invoke() + noWork, err = bkr.invoke(ctx) test.AssertNotError(t, err, "invoke failed") test.AssertEquals(t, noWork, false) checked.ExtantCertificatesChecked = false - err = dbMap.SelectOne(&checked, "SELECT extantCertificatesChecked FROM blockedKeys WHERE keyHash = ?", hashB) + err = dbMap.SelectOne(ctx, &checked, "SELECT extantCertificatesChecked FROM blockedKeys WHERE keyHash = ?", hashB) test.AssertNotError(t, err, "failed to select row from blockedKeys") test.AssertEquals(t, checked.ExtantCertificatesChecked, true) - noWork, err = bkr.invoke() + noWork, err = bkr.invoke(ctx) test.AssertNotError(t, err, "invoke failed") test.AssertEquals(t, noWork, true) } @@ -405,45 +401,42 @@ func TestInvokeRevokerHasNoExtantCerts(t *testing.T) { // extant certificates themselves their contact email is still // resolved and we avoid sending any emails to accounts that // share the same email. - dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) + dbMap, err := sa.DBMapForTest(vars.DBConnSAFullPerms) test.AssertNotError(t, err, "failed setting up db client") - defer test.ResetSATestDatabase(t)() + defer test.ResetBoulderTestDatabase(t)() fc := clock.NewFake() - mm := &mocks.Mailer{} mr := &mockRevoker{} bkr := &badKeyRevoker{dbMap: dbMap, - maxRevocations: 10, - serialBatchSize: 1, - raClient: mr, - mailer: mm, - emailSubject: "testing", - emailTemplate: testTemplate, - logger: blog.NewMock(), - clk: fc, + maxRevocations: 10, + serialBatchSize: 1, + raClient: mr, + logger: blog.NewMock(), + clk: fc, + maxExpectedReplicationLag: time.Second * 22, + keysToProcess: prometheus.NewGauge(prometheus.GaugeOpts{}), + certsRevoked: prometheus.NewCounter(prometheus.CounterOpts{}), } // populate DB with all the test data - regIDA := insertRegistration(t, dbMap, fc, "a@example.com") - regIDB := insertRegistration(t, dbMap, fc, "a@example.com") - regIDC := insertRegistration(t, dbMap, fc, "b@example.com") + regIDA := insertRegistration(t, dbMap, fc) + regIDB := insertRegistration(t, dbMap, fc) + regIDC := insertRegistration(t, dbMap, fc) hashA := randHash(t) - insertBlockedRow(t, dbMap, fc, hashA, regIDA, false) + insertBlockedRow(t, dbMap, fcBeforeRepLag(fc, bkr), hashA, regIDA, false) insertGoodCert(t, dbMap, fc, hashA, "ee", regIDB) insertGoodCert(t, dbMap, fc, hashA, "dd", regIDB) insertGoodCert(t, dbMap, fc, hashA, "cc", regIDC) insertGoodCert(t, dbMap, fc, hashA, "bb", regIDC) - noWork, err := bkr.invoke() + noWork, err := bkr.invoke(context.Background()) test.AssertNotError(t, err, "invoke failed") test.AssertEquals(t, noWork, false) test.AssertEquals(t, mr.revoked, 4) - test.AssertEquals(t, len(mm.Messages), 1) - test.AssertEquals(t, mm.Messages[0].To, "b@example.com") } func TestBackoffPolicy(t *testing.T) { diff --git a/cmd/boulder-ca/main.go b/cmd/boulder-ca/main.go index 2a5334f0712..a28aad48d50 100644 --- a/cmd/boulder-ca/main.go +++ b/cmd/boulder-ca/main.go @@ -1,26 +1,25 @@ package notmain import ( + "context" "flag" "fmt" "os" - "sync" + "strconv" - "github.com/beeker1121/goque" - "github.com/honeycombio/beeline-go" - "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" + "github.com/jmhodges/clock" "github.com/letsencrypt/boulder/ca" capb "github.com/letsencrypt/boulder/ca/proto" "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/ctpolicy/loglist" "github.com/letsencrypt/boulder/features" "github.com/letsencrypt/boulder/goodkey" + "github.com/letsencrypt/boulder/goodkey/sagoodkey" bgrpc "github.com/letsencrypt/boulder/grpc" "github.com/letsencrypt/boulder/issuance" - "github.com/letsencrypt/boulder/linter" "github.com/letsencrypt/boulder/policy" + rapb "github.com/letsencrypt/boulder/ra/proto" sapb "github.com/letsencrypt/boulder/sa/proto" ) @@ -28,102 +27,90 @@ type Config struct { CA struct { cmd.ServiceConfig - DB cmd.DBConfig cmd.HostnamePolicyConfig - GRPCCA *cmd.GRPCServerConfig - GRPCOCSPGenerator *cmd.GRPCServerConfig + GRPCCA *cmd.GRPCServerConfig SAService *cmd.GRPCClientConfig + SCTService *cmd.GRPCClientConfig + // Issuance contains all information necessary to load and initialize issuers. Issuance struct { - Profile issuance.ProfileConfig - Issuers []issuance.IssuerConfig - IgnoredLints []string + // The name of the certificate profile to use if one wasn't provided + // by the RA during NewOrder and Finalize requests. Must match a + // configured certificate profile or boulder-ca will fail to start. + // + // Deprecated: set the defaultProfileName in the RA config instead. + DefaultCertificateProfileName string `validate:"omitempty,alphanum,min=1,max=32"` + + // One of the profile names must match the value of ra.defaultProfileName + // or large amounts of issuance will fail. + CertProfiles map[string]issuance.ProfileConfig `validate:"required,dive,keys,alphanum,min=1,max=32,endkeys"` + + // TODO(#7159): Make this required once all live configs are using it. + CRLProfile issuance.CRLProfileConfig `validate:"-"` + Issuers []issuance.IssuerConfig `validate:"min=1,dive"` } - // How long issued certificates are valid for. - Expiry cmd.ConfigDuration - - // How far back certificates should be backdated. - Backdate cmd.ConfigDuration - // What digits we should prepend to serials after randomly generating them. - SerialPrefix int - - // The maximum number of subjectAltNames in a single certificate - MaxNames int - - // LifespanOCSP is how long OCSP responses are valid for; It should be longer - // than the minTimeToExpiry field for the OCSP Updater. - LifespanOCSP cmd.ConfigDuration + // Deprecated: Use SerialPrefixHex instead. + SerialPrefix int `validate:"required_without=SerialPrefixHex,omitempty,min=1,max=127"` + + // SerialPrefixHex is the hex string to prepend to serials after randomly + // generating them. The minimum value is "01" to ensure that at least + // one bit in the prefix byte is set. The maximum value is "7f" to + // ensure that the first bit in the prefix byte is not set. The validate + // library cannot enforce mix/max values on strings, so that is done in + // NewCertificateAuthorityImpl. + // + // TODO(#7213): Replace `required_without` with `required` when SerialPrefix is removed. + SerialPrefixHex string `validate:"required_without=SerialPrefix,omitempty,hexadecimal,len=2"` + + // MaxNames is the maximum number of subjectAltNames in a single cert. + // The value supplied MUST be greater than 0 and no more than 100. These + // limits are per section 7.1 of our combined CP/CPS, under "DV-SSL + // Subscriber Certificate". The value must match the RA and WFE + // configurations. + MaxNames int `validate:"required,min=1,max=100"` // GoodKey is an embedded config stanza for the goodkey library. GoodKey goodkey.Config - // Path to directory holding orphan queue files, if not provided an orphan queue - // is not used. - OrphanQueueDir string - - // Maximum length (in bytes) of a line accumulating OCSP audit log entries. - // Recommended to be around 4000. If this is 0, do not perform OCSP audit - // logging. + // Maximum length (in bytes) of a line documenting the signing of a CRL. + // The name is a carryover from when this config was shared between both + // OCSP and CRL audit log emission. Recommended to be around 4000. OCSPLogMaxLength int - // Maximum period (in Go duration format) to wait to accumulate a max-length - // OCSP audit log line. We will emit a log line at least once per period, - // if there is anything to be logged. Keeping this low minimizes the risk - // of losing logs during a catastrophic failure. Making it too high - // means logging more often than necessary, which is inefficient in terms - // of bytes and log system resources. - // Recommended to be around 500ms. - OCSPLogPeriod cmd.ConfigDuration + // CTLogListFile is the path to a JSON file on disk containing the set of + // all logs trusted by Chrome. The file must match the v3 log list schema: + // https://www.gstatic.com/ct/log_list/v3/log_list_schema.json + CTLogListFile string - // Path of a YAML file containing the list of int64 RegIDs - // allowed to request ECDSA issuance - ECDSAAllowListFilename string + // CTIncludeTestLogs allows logs marked as "test" to be included in the + // CT log list used for linting. This should be enabled in environments + // configured to submit SCTs to test logs. + CTIncludeTestLogs bool - Features map[string]bool - } + // DisableCertService causes the CertificateAuthority gRPC service to not + // start, preventing any certificates or precertificates from being issued. + DisableCertService bool - PA cmd.PAConfig + // DisableCRLService causes the CRLGenerator gRPC service to not start, + // preventing any CRLs from being issued. + DisableCRLService bool - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig -} - -func loadBoulderIssuers(profileConfig issuance.ProfileConfig, issuerConfigs []issuance.IssuerConfig, ignoredLints []string) ([]*issuance.Issuer, error) { - issuers := make([]*issuance.Issuer, 0, len(issuerConfigs)) - for _, issuerConfig := range issuerConfigs { - profile, err := issuance.NewProfile(profileConfig, issuerConfig) - if err != nil { - return nil, err - } - - cert, signer, err := issuance.LoadIssuer(issuerConfig.Location) - if err != nil { - return nil, err - } - - linter, err := linter.New(cert.Certificate, signer, ignoredLints) - if err != nil { - return nil, err - } + Features features.Config + } - issuer, err := issuance.NewIssuer(cert, signer, profile, linter, cmd.Clock()) - if err != nil { - return nil, err - } + PA cmd.PAConfig - issuers = append(issuers, issuer) - } - return issuers, nil + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig } func main() { - caAddr := flag.String("ca-addr", "", "CA gRPC listen address override") - ocspAddr := flag.String("ocsp-addr", "", "OCSP gRPC listen address override") + grpcAddr := flag.String("addr", "", "gRPC listen address override") debugAddr := flag.String("debug-addr", "", "Debug server address override") configFile := flag.String("config", "", "File path to the configuration file for this service") flag.Parse() @@ -136,176 +123,143 @@ func main() { err := cmd.ReadConfigFile(*configFile, &c) cmd.FailOnError(err, "Reading JSON config file into config structure") - err = features.Set(c.CA.Features) - cmd.FailOnError(err, "Failed to set feature flags") + features.Set(c.CA.Features) - if *caAddr != "" { - c.CA.GRPCCA.Address = *caAddr - } - if *ocspAddr != "" { - c.CA.GRPCOCSPGenerator.Address = *ocspAddr + if *grpcAddr != "" { + c.CA.GRPCCA.Address = *grpcAddr } if *debugAddr != "" { c.CA.DebugAddr = *debugAddr } + serialPrefix := byte(c.CA.SerialPrefix) + if c.CA.SerialPrefixHex != "" { + parsedSerialPrefix, err := strconv.ParseUint(c.CA.SerialPrefixHex, 16, 8) + cmd.FailOnError(err, "Couldn't convert SerialPrefixHex to int") + serialPrefix = byte(parsedSerialPrefix) + } + if c.CA.MaxNames == 0 { cmd.Fail("Error in CA config: MaxNames must not be 0") } - bc, err := c.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() - - scope, logger := cmd.StatsAndLogging(c.Syslog, c.CA.DebugAddr) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) - - // These two metrics are created and registered here so they can be shared - // between NewCertificateAuthorityImpl and NewOCSPImpl. - signatureCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "signatures", - Help: "Number of signatures", - }, - []string{"purpose", "issuer"}) - scope.MustRegister(signatureCount) - - signErrorCount := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "signature_errors", - Help: "A counter of signature errors labelled by error type", - }, []string{"type"}) - scope.MustRegister(signErrorCount) + scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.CA.DebugAddr) + defer oTelShutdown(context.Background()) + cmd.LogStartup(logger) + + metrics := ca.NewCAMetrics(scope) cmd.FailOnError(c.PA.CheckChallenges(), "Invalid PA configuration") + cmd.FailOnError(c.PA.CheckIdentifiers(), "Invalid PA configuration") - pa, err := policy.New(c.PA.Challenges) + pa, err := policy.New(c.PA.Identifiers, c.PA.Challenges, logger) cmd.FailOnError(err, "Couldn't create PA") if c.CA.HostnamePolicyFile == "" { - cmd.FailOnError(fmt.Errorf("HostnamePolicyFile was empty."), "") + cmd.Fail("HostnamePolicyFile was empty") + } + err = pa.LoadIdentPolicyFile(c.CA.HostnamePolicyFile) + cmd.FailOnError(err, "Couldn't load identifier policy file") + + // Do this before creating the issuers to ensure the log list is loaded before + // the linters are initialized. + if c.CA.CTLogListFile != "" { + err = loglist.InitLintList(c.CA.CTLogListFile, c.CA.CTIncludeTestLogs) + cmd.FailOnError(err, "Failed to load CT Log List") + } + + profiles := make(map[string]*issuance.Profile) + for name, profileConfig := range c.CA.Issuance.CertProfiles { + profile, err := issuance.NewProfile(profileConfig) + cmd.FailOnError(err, "Loading profile") + profiles[name] = profile } - err = pa.SetHostnamePolicyFile(c.CA.HostnamePolicyFile) - cmd.FailOnError(err, "Couldn't load hostname policy file") - var boulderIssuers []*issuance.Issuer - boulderIssuers, err = loadBoulderIssuers(c.CA.Issuance.Profile, c.CA.Issuance.Issuers, c.CA.Issuance.IgnoredLints) - cmd.FailOnError(err, "Couldn't load issuers") + clk := clock.New() + var crlShards int + issuers := make([]*issuance.Issuer, 0, len(c.CA.Issuance.Issuers)) + for i, issuerConfig := range c.CA.Issuance.Issuers { + // Double check that all issuers have the same number of CRL shards, because + // crl-updater relies upon that invariant. + if issuerConfig.CRLShards != 0 && crlShards == 0 { + crlShards = issuerConfig.CRLShards + } + if issuerConfig.CRLShards != crlShards { + cmd.Fail(fmt.Sprintf("issuer %d has %d shards, want %d", i, issuerConfig.CRLShards, crlShards)) + } + // Also check that all the profiles they list actually exist. + for _, profile := range issuerConfig.Profiles { + _, found := profiles[profile] + if !found { + cmd.Fail(fmt.Sprintf("issuer %d lists unrecognized profile %q", i, profile)) + } + } - tlsConfig, err := c.CA.TLS.Load() - cmd.FailOnError(err, "TLS config") + issuer, err := issuance.LoadIssuer(issuerConfig, clk) + cmd.FailOnError(err, "Loading issuer") + issuers = append(issuers, issuer) + } - clk := cmd.Clock() - clientMetrics := bgrpc.NewClientMetrics(scope) + if len(c.CA.Issuance.CertProfiles) == 0 { + cmd.Fail("At least one profile must be configured") + } + + tlsConfig, err := c.CA.TLS.Load(scope) + cmd.FailOnError(err, "TLS config") - conn, err := bgrpc.ClientSetup(c.CA.SAService, tlsConfig, clientMetrics, clk) + saConn, err := bgrpc.ClientSetup(c.CA.SAService, tlsConfig, scope, clk) cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA") - sa := sapb.NewStorageAuthorityClient(conn) + sa := sapb.NewStorageAuthorityClient(saConn) - kp, err := goodkey.NewKeyPolicy(&c.CA.GoodKey, sa.KeyBlocked) + var sctService rapb.SCTProviderClient + if c.CA.SCTService != nil { + sctConn, err := bgrpc.ClientSetup(c.CA.SCTService, tlsConfig, scope, clk) + cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA for SCTs") + sctService = rapb.NewSCTProviderClient(sctConn) + } + + kp, err := sagoodkey.NewPolicy(&c.CA.GoodKey, sa.KeyBlocked) cmd.FailOnError(err, "Unable to create key policy") - var orphanQueue *goque.Queue - if c.CA.OrphanQueueDir != "" { - orphanQueue, err = goque.OpenQueue(c.CA.OrphanQueueDir) - cmd.FailOnError(err, "Failed to open orphaned certificate queue") - defer func() { _ = orphanQueue.Close() }() - } + srv := bgrpc.NewServer(c.CA.GRPCCA, logger) - var ecdsaAllowList *ca.ECDSAAllowList - if c.CA.ECDSAAllowListFilename != "" { - // Create a gauge vector to track allow list reloads. - allowListGauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ecdsa_allow_list_status", - Help: "Number of ECDSA allow list entries and status of most recent update attempt", - }, []string{"result"}) - scope.MustRegister(allowListGauge) - - // Create a reloadable allow list object. - var entries int - ecdsaAllowList, entries, err = ca.NewECDSAAllowListFromFile(c.CA.ECDSAAllowListFilename, logger, allowListGauge) - cmd.FailOnError(err, "Unable to load ECDSA allow list from YAML file") - logger.Infof("Created a reloadable allow list, it was initialized with %d entries", entries) + if !c.CA.DisableCRLService { + crli, err := ca.NewCRLImpl( + issuers, + c.CA.Issuance.CRLProfile, + c.CA.OCSPLogMaxLength, + logger, + metrics, + ) + cmd.FailOnError(err, "Failed to create CRL impl") + srv = srv.Add(&capb.CRLGenerator_ServiceDesc, crli) } - serverMetrics := bgrpc.NewServerMetrics(scope) - var wg sync.WaitGroup - - ocspi, err := ca.NewOCSPImpl( - boulderIssuers, - c.CA.LifespanOCSP.Duration, - c.CA.OCSPLogMaxLength, - c.CA.OCSPLogPeriod.Duration, - logger, - scope, - signatureCount, - signErrorCount, - clk, - ) - cmd.FailOnError(err, "Failed to create OCSP impl") - go ocspi.LogOCSPLoop() - - ocspSrv, ocspListener, err := bgrpc.NewServer(c.CA.GRPCOCSPGenerator, tlsConfig, serverMetrics, clk) - cmd.FailOnError(err, "Unable to setup CA gRPC server") - capb.RegisterOCSPGeneratorServer(ocspSrv, ocspi) - ocspHealth := health.NewServer() - healthpb.RegisterHealthServer(ocspSrv, ocspHealth) - wg.Add(1) - go func() { - cmd.FailOnError(cmd.FilterShutdownErrors(ocspSrv.Serve(ocspListener)), - "OCSPGenerator gRPC service failed") - wg.Done() - }() - - cai, err := ca.NewCertificateAuthorityImpl( - sa, - pa, - ocspi, - boulderIssuers, - ecdsaAllowList, - c.CA.Expiry.Duration, - c.CA.Backdate.Duration, - c.CA.SerialPrefix, - c.CA.MaxNames, - kp, - orphanQueue, - logger, - scope, - signatureCount, - signErrorCount, - clk) - cmd.FailOnError(err, "Failed to create CA impl") - - if orphanQueue != nil { - go cai.OrphanIntegrationLoop() + if !c.CA.DisableCertService { + cai, err := ca.NewCertificateAuthorityImpl( + sa, + sctService, + pa, + issuers, + profiles, + serialPrefix, + c.CA.MaxNames, + kp, + logger, + metrics, + clk) + cmd.FailOnError(err, "Failed to create CA impl") + + srv = srv.Add(&capb.CertificateAuthority_ServiceDesc, cai) } - caSrv, caListener, err := bgrpc.NewServer(c.CA.GRPCCA, tlsConfig, serverMetrics, clk) + start, err := srv.Build(tlsConfig, scope, clk) cmd.FailOnError(err, "Unable to setup CA gRPC server") - capb.RegisterCertificateAuthorityServer(caSrv, cai) - caHealth := health.NewServer() - healthpb.RegisterHealthServer(caSrv, caHealth) - wg.Add(1) - go func() { - cmd.FailOnError(cmd.FilterShutdownErrors(caSrv.Serve(caListener)), "CA gRPC service failed") - wg.Done() - }() - - go cmd.CatchSignals(logger, func() { - caHealth.Shutdown() - ocspHealth.Shutdown() - ecdsaAllowList.Stop() - caSrv.GracefulStop() - ocspSrv.GracefulStop() - wg.Wait() - ocspi.Stop() - }) - - select {} + + cmd.FailOnError(start(), "CA gRPC service failed") } func init() { - cmd.RegisterCommand("boulder-ca", main) + cmd.RegisterCommand("boulder-ca", main, &cmd.ConfigValidator{Config: &Config{}}) } diff --git a/cmd/boulder-ca/main_test.go b/cmd/boulder-ca/main_test.go deleted file mode 100644 index 227a9d4affb..00000000000 --- a/cmd/boulder-ca/main_test.go +++ /dev/null @@ -1 +0,0 @@ -package notmain diff --git a/cmd/boulder-observer/README.md b/cmd/boulder-observer/README.md index 3534986578f..13256531268 100644 --- a/cmd/boulder-observer/README.md +++ b/cmd/boulder-observer/README.md @@ -22,9 +22,23 @@ Prometheus. * [HTTP](#http) * [Schema](#schema-3) * [Example](#example-3) + * [CRL](#crl) + * [Schema](#schema-4) + * [Example](#example-4) + * [TLS](#tls) + * [Schema](#schema-5) + * [Example](#example-5) * [Metrics](#metrics) - * [obs_monitors](#obs_monitors) - * [obs_observations](#obs_observations) + * [Global Metrics](#global-metrics) + * [obs_monitors](#obs_monitors) + * [obs_observations](#obs_observations) + * [CRL Metrics](#crl-metrics) + * [obs_crl_this_update](#obs_crl_this_update) + * [obs_crl_next_update](#obs_crl_next_update) + * [obs_crl_revoked_cert_count](#obs_crl_revoked_cert_count) + * [TLS Metrics](#tls-metrics) + * [obs_crl_this_update](#obs_tls_not_after) + * [obs_crl_next_update](#obs_tls_reason) * [Development](#development) * [Starting Prometheus locally](#starting-prometheus-locally) * [Viewing metrics locally](#viewing-metrics-locally) @@ -166,17 +180,64 @@ monitors: - period: 2s kind: HTTP - settings: + settings: url: http://letsencrypt.org/FOO rcodes: [200, 404] useragent: letsencrypt/boulder-observer-http-client ``` +#### CRL + +##### Schema + +`url`: Scheme + Hostname to grab the CRL from (e.g. `http://x1.c.lencr.org/`). + +##### Example + +```yaml +monitors: + - + period: 1h + kind: CRL + settings: + url: http://x1.c.lencr.org/ +``` + +#### TLS + +##### Schema + +`hostname`: Hostname to run TLS check on (e.g. `valid-isrgrootx1.letsencrypt.org`). + +`rootOrg`: Organization to check against the root certificate Organization (e.g. `Internet Security Research Group`). + +`rootCN`: Name to check against the root certificate Common Name (e.g. `ISRG Root X1`). If not provided, root comparison will be skipped. + +`response`: Expected site response; must be one of: `valid`, `revoked` or `expired`. + +##### Example + +```yaml +monitors: + - + period: 1h + kind: TLS + settings: + hostname: valid-isrgrootx1.letsencrypt.org + rootOrg: "Internet Security Research Group" + rootCN: "ISRG Root X1" + response: valid +``` + ## Metrics Observer provides the following metrics. -### obs_monitors +### Global Metrics + +These metrics will always be available. + +#### obs_monitors Count of configured monitors. @@ -187,7 +248,7 @@ Count of configured monitors. `valid`: Bool indicating whether settings provided could be validated for the `kind` of Prober specified. -### obs_observations +#### obs_observations **Labels:** @@ -204,6 +265,111 @@ successful. This is configurable, see `buckets` under [root/schema](#schema). +### CRL Metrics + +These metrics will be available whenever a valid CRL prober is configured. + +#### obs_crl_this_update + +Unix timestamp value (in seconds) of the thisUpdate field for a CRL. + +**Labels:** + +`url`: Url of the CRL + +**Example Usage:** + +This is a sample rule that alerts when a CRL has a thisUpdate timestamp in the future, signalling that something may have gone wrong during its creation: + +```yaml +- alert: CRLThisUpdateInFuture + expr: obs_crl_this_update{url="http://x1.c.lencr.org/"} > time() + labels: + severity: critical + annotations: + description: 'CRL thisUpdate is in the future' +``` + +#### obs_crl_next_update + +Unix timestamp value (in seconds) of the nextUpdate field for a CRL. + +**Labels:** + +`url`: Url of the CRL + +**Example Usage:** + +This is a sample rule that alerts when a CRL has a nextUpdate timestamp in the past, signalling that the CRL was not updated on time: + +```yaml +- alert: CRLNextUpdateInPast + expr: obs_crl_next_update{url="http://x1.c.lencr.org/"} < time() + labels: + severity: critical + annotations: + description: 'CRL nextUpdate is in the past' +``` + +Another potentially useful rule would be to notify when nextUpdate is within X days from the current time, as a reminder that the update is coming up soon. + +#### obs_crl_revoked_cert_count + +Count of revoked certificates in a CRL. + +**Labels:** + +`url`: Url of the CRL + +### TLS Metrics + +These metrics will be available whenever a valid TLS prober is configured. + +#### obs_tls_not_after + +Unix timestamp value (in seconds) of the notAfter field for a subscriber certificate. + +**Labels:** + +`hostname`: Hostname of the site of the subscriber certificate + +**Example Usage:** + +This is a sample rule that alerts when a site has a notAfter timestamp indicating that the certificate will expire within the next 20 days: + +```yaml + - alert: CertExpiresSoonWarning + annotations: + description: "The certificate at {{ $labels.hostname }} expires within 20 days, on: {{ $value | humanizeTimestamp }}" + expr: (obs_tls_not_after{hostname=~"^[^e][a-zA-Z]*-isrgrootx[12][.]letsencrypt[.]org"}) <= time() + 1728000 + for: 60m + labels: + severity: warning +``` + +#### obs_tls_reason + +This is a count that increments by one for each resulting reason of a TSL check. The reason is `nil` if the TLS Prober returns `true` and one of the following otherwise: `internalError`, `ocspError`, `rootDidNotMatch`, `responseDidNotMatch`. + +**Labels:** + +`hostname`: Hostname of the site of the subscriber certificate +`reason`: The reason for TLS Probe returning false, and `nil` if it returns true + +**Example Usage:** + +This is a sample rule that alerts when TLS Prober returns false, providing insight on the reason for failure. + +```yaml + - alert: TLSCertCheckFailed + annotations: + description: "The TLS probe for {{ $labels.hostname }} failed for reason: {{ $labels.reason }}. This potentially violents CP 2.2." + expr: (rate(obs_observations_count{success="false",name=~"[a-zA-Z]*-isrgrootx[12][.]letsencrypt[.]org"}[5m])) > 0 + for: 5m + labels: + severity: critical +``` + ## Development ### Starting Prometheus locally diff --git a/cmd/boulder-observer/main.go b/cmd/boulder-observer/main.go index cf7f9f01a03..bbce31cafb9 100644 --- a/cmd/boulder-observer/main.go +++ b/cmd/boulder-observer/main.go @@ -2,38 +2,55 @@ package notmain import ( "flag" - "io/ioutil" + "os" + + "github.com/letsencrypt/validator/v10" "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/observer" - "gopkg.in/yaml.v2" + "github.com/letsencrypt/boulder/strictyaml" ) func main() { + debugAddr := flag.String("debug-addr", "", "Debug server address override") configPath := flag.String( "config", "config.yml", "Path to boulder-observer configuration file") flag.Parse() - configYAML, err := ioutil.ReadFile(*configPath) + configYAML, err := os.ReadFile(*configPath) cmd.FailOnError(err, "failed to read config file") // Parse the YAML config file. - var config observer.ObsConf - err = yaml.Unmarshal(configYAML, &config) + var obsConf observer.ObsConf + err = strictyaml.Unmarshal(configYAML, &obsConf) + + if *debugAddr != "" { + obsConf.DebugAddr = *debugAddr + } + if err != nil { cmd.FailOnError(err, "failed to parse YAML config") } + // Validate config using struct tags. + validate := validator.New() + validate.RegisterCustomTypeFunc(config.DurationCustomTypeFunc, config.Duration{}) + err = validate.Struct(obsConf) + if err != nil { + cmd.FailOnError(err, "config validation failed") + } + // Make an `Observer` object. - observer, err := config.MakeObserver() + obs, err := obsConf.MakeObserver() if err != nil { cmd.FailOnError(err, "config failed validation") } // Start the `Observer` daemon. - observer.Start() + obs.Start() } func init() { - cmd.RegisterCommand("boulder-observer", main) + cmd.RegisterCommand("boulder-observer", main, &cmd.ConfigValidator{Config: &observer.ObsConf{}}) } diff --git a/cmd/boulder-publisher/main.go b/cmd/boulder-publisher/main.go index 61a7748f0df..dceeb6074e6 100644 --- a/cmd/boulder-publisher/main.go +++ b/cmd/boulder-publisher/main.go @@ -1,14 +1,14 @@ package notmain import ( + "context" "flag" + "fmt" "os" "runtime" ct "github.com/google/certificate-transparency-go" - "github.com/honeycombio/beeline-go" - "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" + "github.com/jmhodges/clock" "github.com/letsencrypt/boulder/cmd" "github.com/letsencrypt/boulder/features" @@ -21,7 +21,7 @@ import ( type Config struct { Publisher struct { cmd.ServiceConfig - Features map[string]bool + Features features.Config // If this is non-zero, profile blocking events such that one even is // sampled every N nanoseconds. @@ -32,11 +32,11 @@ type Config struct { // Chains is a list of lists of certificate filenames. Each inner list is // a chain, starting with the issuing intermediate, followed by one or // more additional certificates, up to and including a root. - Chains [][]string + Chains [][]string `validate:"min=1,dive,min=2,dive,required"` } - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig } func main() { @@ -52,8 +52,7 @@ func main() { var c Config err := cmd.ReadConfigFile(*configFile, &c) cmd.FailOnError(err, "Reading JSON config file into config structure") - err = features.Set(c.Publisher.Features) - cmd.FailOnError(err, "Failed to set feature flags") + features.Set(c.Publisher.Features) runtime.SetBlockProfileRate(c.Publisher.BlockProfileRate) @@ -66,53 +65,40 @@ func main() { if c.Publisher.UserAgent == "" { c.Publisher.UserAgent = "certificate-transparency-go/1.0" } - - bc, err := c.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() - - scope, logger := cmd.StatsAndLogging(c.Syslog, c.Publisher.DebugAddr) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) + scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.Publisher.DebugAddr) + defer oTelShutdown(context.Background()) + cmd.LogStartup(logger) if c.Publisher.Chains == nil { - logger.AuditErr("No chain files provided") - os.Exit(1) + cmd.Fail("No chain files provided") } - bundles := make(map[issuance.IssuerNameID][]ct.ASN1Cert) + bundles := make(map[issuance.NameID][]ct.ASN1Cert) for _, files := range c.Publisher.Chains { chain, err := issuance.LoadChain(files) cmd.FailOnError(err, "failed to load chain.") issuer := chain[0] id := issuer.NameID() + if _, exists := bundles[id]; exists { + cmd.Fail(fmt.Sprintf("Got multiple chains configured for issuer %q", issuer.Subject.CommonName)) + } bundles[id] = publisher.GetCTBundleForChain(chain) } - tlsConfig, err := c.Publisher.TLS.Load() + tlsConfig, err := c.Publisher.TLS.Load(scope) cmd.FailOnError(err, "TLS config") - clk := cmd.Clock() + clk := clock.New() pubi := publisher.New(bundles, c.Publisher.UserAgent, logger, scope) - serverMetrics := bgrpc.NewServerMetrics(scope) - grpcSrv, l, err := bgrpc.NewServer(c.Publisher.GRPC, tlsConfig, serverMetrics, clk) + start, err := bgrpc.NewServer(c.Publisher.GRPC, logger).Add( + &pubpb.Publisher_ServiceDesc, pubi).Build(tlsConfig, scope, clk) cmd.FailOnError(err, "Unable to setup Publisher gRPC server") - pubpb.RegisterPublisherServer(grpcSrv, pubi) - hs := health.NewServer() - healthpb.RegisterHealthServer(grpcSrv, hs) - - go cmd.CatchSignals(logger, func() { - hs.Shutdown() - grpcSrv.GracefulStop() - }) - err = cmd.FilterShutdownErrors(grpcSrv.Serve(l)) - cmd.FailOnError(err, "Publisher gRPC service failed") + cmd.FailOnError(start(), "Publisher gRPC service failed") } func init() { - cmd.RegisterCommand("boulder-publisher", main) + cmd.RegisterCommand("boulder-publisher", main, &cmd.ConfigValidator{Config: &Config{}}) } diff --git a/cmd/boulder-ra/main.go b/cmd/boulder-ra/main.go index 59e302d3d4f..44af08a39b7 100644 --- a/cmd/boulder-ra/main.go +++ b/cmd/boulder-ra/main.go @@ -1,29 +1,32 @@ package notmain import ( + "context" "flag" - "fmt" "os" "time" - "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" + "github.com/jmhodges/clock" - "github.com/honeycombio/beeline-go" - akamaipb "github.com/letsencrypt/boulder/akamai/proto" capb "github.com/letsencrypt/boulder/ca/proto" "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/ctpolicy" "github.com/letsencrypt/boulder/ctpolicy/ctconfig" + "github.com/letsencrypt/boulder/ctpolicy/loglist" "github.com/letsencrypt/boulder/features" "github.com/letsencrypt/boulder/goodkey" + "github.com/letsencrypt/boulder/goodkey/sagoodkey" bgrpc "github.com/letsencrypt/boulder/grpc" "github.com/letsencrypt/boulder/issuance" "github.com/letsencrypt/boulder/policy" pubpb "github.com/letsencrypt/boulder/publisher/proto" "github.com/letsencrypt/boulder/ra" rapb "github.com/letsencrypt/boulder/ra/proto" + "github.com/letsencrypt/boulder/ratelimits" + bredis "github.com/letsencrypt/boulder/redis" sapb "github.com/letsencrypt/boulder/sa/proto" + "github.com/letsencrypt/boulder/va" vapb "github.com/letsencrypt/boulder/va/proto" ) @@ -32,69 +35,106 @@ type Config struct { cmd.ServiceConfig cmd.HostnamePolicyConfig + // RateLimitPoliciesFilename is deprecated. RateLimitPoliciesFilename string MaxContactsPerRegistration int - SAService *cmd.GRPCClientConfig - VAService *cmd.GRPCClientConfig - CAService *cmd.GRPCClientConfig - PublisherService *cmd.GRPCClientConfig - AkamaiPurgerService *cmd.GRPCClientConfig - - MaxNames int - - // Controls behaviour of the RA when asked to create a new authz for - // a name/regID that already has a valid authz. False preserves historic - // behaviour and ignores the existing authz and creates a new one. True - // instructs the RA to reuse the previously created authz in lieu of - // creating another. - ReuseValidAuthz bool - - // AuthorizationLifetimeDays defines how long authorizations will be - // considered valid for. Given a value of 300 days when used with a 90-day - // cert lifetime, this allows creation of certs that will cover a whole - // year, plus a grace period of a month. - AuthorizationLifetimeDays int + SAService *cmd.GRPCClientConfig + VAService *cmd.GRPCClientConfig + CAService *cmd.GRPCClientConfig + PublisherService *cmd.GRPCClientConfig + + // Deprecated: TODO(#8349): Remove this when removing the corresponding + // service from the CA. + OCSPService *cmd.GRPCClientConfig + + Limiter struct { + // Redis contains the configuration necessary to connect to Redis + // for rate limiting. This field is required to enable rate + // limiting. + Redis *bredis.Config `validate:"required_with=Defaults"` + + // Defaults is a path to a YAML file containing default rate limits. + // See: ratelimits/README.md for details. This field is required to + // enable rate limiting. If any individual rate limit is not set, + // that limit will be disabled. Limits passed in this file must be + // identical to those in the WFE. + // + // Note: At this time, only the Failed Authorizations rate limit is + // necessary in the RA. + Defaults string `validate:"required_with=Redis"` + + // Overrides is a path to a YAML file containing overrides for the + // default rate limits. See: ratelimits/README.md for details. If + // neither this field nor OverridesFromDB is set, all requesters + // will be subject to the default rate limits. Overrides passed in + // this file must be identical to those in the WFE. + // + // Note: At this time, only the Failed Authorizations overrides are + // necessary in the RA. + Overrides string + + // OverridesFromDB causes the WFE and RA to retrieve rate limit overrides + // from the database, instead of from a file. + OverridesFromDB bool + } - // PendingAuthorizationLifetimeDays defines how long authorizations may be in - // the pending state. If you can't respond to a challenge this quickly, then - // you need to request a new challenge. - PendingAuthorizationLifetimeDays int + // MaxNames is the maximum number of subjectAltNames in a single cert. + // The value supplied MUST be greater than 0 and no more than 100. These + // limits are per section 7.1 of our combined CP/CPS, under "DV-SSL + // Subscriber Certificate". The value must match the CA and WFE + // configurations. + // + // Deprecated: Set ValidationProfiles[*].MaxNames instead. + MaxNames int `validate:"omitempty,min=1,max=100"` + + // ValidationProfiles is a map of validation profiles to their + // respective issuance allow lists. If a profile is not included in this + // mapping, it cannot be used by any account. If this field is left + // empty, all profiles are open to all accounts. + ValidationProfiles map[string]*ra.ValidationProfileConfig `validate:"required"` + + // DefaultProfileName sets the profile to use if one wasn't provided by the + // client in the new-order request. Must match a configured validation + // profile or the RA will fail to start. Must match a certificate profile + // configured in the CA or finalization will fail for orders using this + // default. + DefaultProfileName string `validate:"required"` // GoodKey is an embedded config stanza for the goodkey library. GoodKey goodkey.Config - OrderLifetime cmd.ConfigDuration - - // CTLogGroups contains groupings of CT logs which we want SCTs from. - // When we retrieve SCTs we will submit the certificate to each log - // in a group and the first SCT returned will be used. This allows - // us to comply with Chrome CT policy which requires one SCT from a - // Google log and one SCT from any other log included in their policy. - CTLogGroups2 []ctconfig.CTGroup - // InformationalCTLogs are a set of CT logs we will always submit to - // but won't ever use the SCTs from. This may be because we want to - // test them or because they are not yet approved by a browser/root - // program but we still want our certs to end up there. - InformationalCTLogs []ctconfig.LogDescription - - // IssuerCertPath is the path to the intermediate used to issue certificates. - // It is used to generate OCSP URLs to purge at revocation time. - // TODO(#5162): DEPRECATED. Remove this field entirely. - IssuerCertPath string + // FinalizeTimeout is how long the RA is willing to wait for the Order + // finalization process to take. This config parameter only has an effect + // if the AsyncFinalization feature flag is enabled. Any systems which + // manage the shutdown of an RA must be willing to wait at least this long + // after sending the shutdown signal, to allow background goroutines to + // complete. + FinalizeTimeout config.Duration `validate:"-"` + + // CTLogs contains groupings of CT logs organized by what organization + // operates them. When we submit precerts to logs in order to get SCTs, we + // will submit the cert to one randomly-chosen log from each group, and use + // the SCTs from the first two groups which reply. This allows us to comply + // with various CT policies that require (for certs with short lifetimes + // like ours) two SCTs from logs run by different operators. It also holds + // a `Stagger` value controlling how long we wait for one operator group + // to respond before trying a different one. + CTLogs ctconfig.CTConfig + // IssuerCerts are paths to all intermediate certificates which may have // been used to issue certificates in the last 90 days. These are used to // generate OCSP URLs to purge during revocation. - IssuerCerts []string + IssuerCerts []string `validate:"min=1,dive,required"` - Features map[string]bool + Features features.Config } PA cmd.PAConfig - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig } func main() { @@ -111,8 +151,7 @@ func main() { err := cmd.ReadConfigFile(*configFile, &c) cmd.FailOnError(err, "Reading JSON config file into config structure") - err = features.Set(c.RA.Features) - cmd.FailOnError(err, "Failed to set feature flags") + features.Set(c.RA.Features) if *grpcAddr != "" { c.RA.GRPC.Address = *grpcAddr @@ -121,59 +160,46 @@ func main() { c.RA.DebugAddr = *debugAddr } - bc, err := c.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() - - scope, logger := cmd.StatsAndLogging(c.Syslog, c.RA.DebugAddr) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) + scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.RA.DebugAddr) + defer oTelShutdown(context.Background()) + cmd.LogStartup(logger) // Validate PA config and set defaults if needed cmd.FailOnError(c.PA.CheckChallenges(), "Invalid PA configuration") + cmd.FailOnError(c.PA.CheckIdentifiers(), "Invalid PA configuration") - pa, err := policy.New(c.PA.Challenges) + pa, err := policy.New(c.PA.Identifiers, c.PA.Challenges, logger) cmd.FailOnError(err, "Couldn't create PA") if c.RA.HostnamePolicyFile == "" { cmd.Fail("HostnamePolicyFile must be provided.") } - err = pa.SetHostnamePolicyFile(c.RA.HostnamePolicyFile) - cmd.FailOnError(err, "Couldn't load hostname policy file") + err = pa.LoadIdentPolicyFile(c.RA.HostnamePolicyFile) + cmd.FailOnError(err, "Couldn't load identifier policy file") - tlsConfig, err := c.RA.TLS.Load() + tlsConfig, err := c.RA.TLS.Load(scope) cmd.FailOnError(err, "TLS config") - clk := cmd.Clock() - clientMetrics := bgrpc.NewClientMetrics(scope) + clk := clock.New() - vaConn, err := bgrpc.ClientSetup(c.RA.VAService, tlsConfig, clientMetrics, clk) + vaConn, err := bgrpc.ClientSetup(c.RA.VAService, tlsConfig, scope, clk) cmd.FailOnError(err, "Unable to create VA client") vac := vapb.NewVAClient(vaConn) caaClient := vapb.NewCAAClient(vaConn) - caConn, err := bgrpc.ClientSetup(c.RA.CAService, tlsConfig, clientMetrics, clk) + caConn, err := bgrpc.ClientSetup(c.RA.CAService, tlsConfig, scope, clk) cmd.FailOnError(err, "Unable to create CA client") cac := capb.NewCertificateAuthorityClient(caConn) - saConn, err := bgrpc.ClientSetup(c.RA.SAService, tlsConfig, clientMetrics, clk) + saConn, err := bgrpc.ClientSetup(c.RA.SAService, tlsConfig, scope, clk) cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA") sac := sapb.NewStorageAuthorityClient(saConn) - var ctp *ctpolicy.CTPolicy - conn, err := bgrpc.ClientSetup(c.RA.PublisherService, tlsConfig, clientMetrics, clk) + conn, err := bgrpc.ClientSetup(c.RA.PublisherService, tlsConfig, scope, clk) cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to Publisher") pubc := pubpb.NewPublisherClient(conn) - apConn, err := bgrpc.ClientSetup(c.RA.AkamaiPurgerService, tlsConfig, clientMetrics, clk) - cmd.FailOnError(err, "Unable to create a Akamai Purger client") - apc := akamaipb.NewAkamaiPurgerClient(apConn) - issuerCertPaths := c.RA.IssuerCerts - if len(issuerCertPaths) == 0 { - issuerCertPaths = []string{c.RA.IssuerCertPath} - } issuerCerts := make([]*issuance.Certificate, len(issuerCertPaths)) for i, issuerCertPath := range issuerCertPaths { issuerCerts[i], err = issuance.LoadCertificate(issuerCertPath) @@ -181,51 +207,78 @@ func main() { } // Boulder's components assume that there will always be CT logs configured. - // Issuing a certificate without SCTs embedded is a miss-issuance event in the - // environment Boulder is built for. Exit early if there is no CTLogGroups2 - // configured. - if len(c.RA.CTLogGroups2) == 0 { - cmd.Fail("CTLogGroups2 must not be empty") + // Issuing a certificate without SCTs embedded is a misissuance event as per + // our CPS 4.4.2, which declares we will always include at least two SCTs. + // Exit early if no groups are configured. + var ctp *ctpolicy.CTPolicy + if len(c.RA.CTLogs.SCTLogs) <= 0 { + cmd.Fail("Must configure CTLogs") } - for i, g := range c.RA.CTLogGroups2 { - // Exit early if any of the log groups specify no logs - if len(g.Logs) == 0 { - cmd.Fail( - fmt.Sprintf("CTLogGroups2 index %d specifies no logs", i)) - } - for _, l := range g.Logs { - if l.TemporalSet != nil { - err := l.Setup() - cmd.FailOnError(err, "Failed to setup a temporal log set") - } - } + allLogs, err := loglist.New(c.RA.CTLogs.LogListFile) + cmd.FailOnError(err, "Failed to parse log list") + + sctLogs, err := allLogs.SubsetForPurpose(c.RA.CTLogs.SCTLogs, loglist.Issuance, c.RA.CTLogs.SubmitToTestLogs) + cmd.FailOnError(err, "Failed to load SCT logs") + + infoLogs, err := allLogs.SubsetForPurpose(c.RA.CTLogs.InfoLogs, loglist.Informational, true) + cmd.FailOnError(err, "Failed to load informational logs") + + finalLogs, err := allLogs.SubsetForPurpose(c.RA.CTLogs.FinalLogs, loglist.Informational, true) + cmd.FailOnError(err, "Failed to load final logs") + + ctp = ctpolicy.New(pubc, sctLogs, infoLogs, finalLogs, c.RA.CTLogs.Stagger.Duration, logger, scope) + + if len(c.RA.ValidationProfiles) == 0 { + cmd.Fail("At least one profile must be configured") } - ctp = ctpolicy.New(pubc, c.RA.CTLogGroups2, c.RA.InformationalCTLogs, logger, scope) - - // Baseline Requirements v1.8.1 section 4.2.1: "any reused data, document, - // or completed validation MUST be obtained no more than 398 days prior - // to issuing the Certificate". If unconfigured or the configured value is - // greater than 397 days, bail out. - if c.RA.AuthorizationLifetimeDays <= 0 || c.RA.AuthorizationLifetimeDays > 397 { - cmd.Fail("authorizationLifetimeDays value must be greater than 0 and less than 398") + + // TODO(#7993): Remove this fallback and make ValidationProfile.MaxNames a + // required config field. We don't do any validation on the value of this + // top-level MaxNames because that happens inside the call to + // NewValidationProfiles below. + for _, pc := range c.RA.ValidationProfiles { + if pc.MaxNames == 0 { + pc.MaxNames = c.RA.MaxNames + } } - authorizationLifetime := time.Duration(c.RA.AuthorizationLifetimeDays) * 24 * time.Hour - - // The Baseline Requirements v1.8.1 state that validation tokens "MUST - // NOT be used for more than 30 days from its creation". If unconfigured - // or the configured value pendingAuthorizationLifetimeDays is greater - // than 29 days, bail out. - if c.RA.PendingAuthorizationLifetimeDays <= 0 || c.RA.PendingAuthorizationLifetimeDays > 29 { - cmd.Fail("pendingAuthorizationLifetimeDays value must be greater than 0 and less than 30") + + validationProfiles, err := ra.NewValidationProfiles(c.RA.DefaultProfileName, c.RA.ValidationProfiles) + cmd.FailOnError(err, "Failed to load validation profiles") + + if features.Get().AsyncFinalize && c.RA.FinalizeTimeout.Duration == 0 { + cmd.Fail("finalizeTimeout must be supplied when AsyncFinalize feature is enabled") } - pendingAuthorizationLifetime := time.Duration(c.RA.PendingAuthorizationLifetimeDays) * 24 * time.Hour - kp, err := goodkey.NewKeyPolicy(&c.RA.GoodKey, sac.KeyBlocked) + kp, err := sagoodkey.NewPolicy(&c.RA.GoodKey, sac.KeyBlocked) cmd.FailOnError(err, "Unable to create key policy") - if c.RA.MaxNames == 0 { - cmd.Fail("Error in RA config: MaxNames must not be 0") + var limiter *ratelimits.Limiter + var txnBuilder *ratelimits.TransactionBuilder + var limiterRedis *bredis.Ring + if c.RA.Limiter.Defaults != "" { + // Setup rate limiting. + limiterRedis, err = bredis.NewRingFromConfig(*c.RA.Limiter.Redis, scope, logger) + cmd.FailOnError(err, "Failed to create Redis ring") + + source := ratelimits.NewRedisSource(limiterRedis.Ring, clk, scope) + limiter, err = ratelimits.NewLimiter(clk, source, scope) + cmd.FailOnError(err, "Failed to create rate limiter") + if c.RA.Limiter.OverridesFromDB { + if c.RA.Limiter.Overrides != "" { + cmd.Fail("OverridesFromDB and an overrides file were both defined, but are mutually exclusive") + } + saroc := sapb.NewStorageAuthorityReadOnlyClient(saConn) + txnBuilder, err = ratelimits.NewTransactionBuilderFromDatabase(c.RA.Limiter.Defaults, saroc.GetEnabledRateLimitOverrides, scope, logger) + } else { + txnBuilder, err = ratelimits.NewTransactionBuilderFromFiles(c.RA.Limiter.Defaults, c.RA.Limiter.Overrides, scope, logger) + } + cmd.FailOnError(err, "Failed to create rate limits transaction builder") + + // The 30 minute period here must be kept in sync with the promise + // (successCommentBody) made to requesters in sfe/overridesimporter.go + overrideRefresherShutdown := txnBuilder.NewRefresher(30 * time.Minute) + defer overrideRefresherShutdown() } rai := ra.NewRegistrationAuthorityImpl( @@ -234,42 +287,35 @@ func main() { scope, c.RA.MaxContactsPerRegistration, kp, + limiter, + txnBuilder, c.RA.MaxNames, - c.RA.ReuseValidAuthz, - authorizationLifetime, - pendingAuthorizationLifetime, + validationProfiles, pubc, - caaClient, - c.RA.OrderLifetime.Duration, + c.RA.FinalizeTimeout.Duration, ctp, - apc, issuerCerts, ) + defer rai.Drain() - policyErr := rai.SetRateLimitPoliciesFile(c.RA.RateLimitPoliciesFilename) - cmd.FailOnError(policyErr, "Couldn't load rate limit policies file") rai.PA = pa - rai.VA = vac + rai.VA = va.RemoteClients{ + VAClient: vac, + CAAClient: caaClient, + } rai.CA = cac rai.SA = sac - serverMetrics := bgrpc.NewServerMetrics(scope) - grpcSrv, listener, err := bgrpc.NewServer(c.RA.GRPC, tlsConfig, serverMetrics, clk) + start, err := bgrpc.NewServer(c.RA.GRPC, logger).Add( + &rapb.RegistrationAuthority_ServiceDesc, rai).Add( + &rapb.SCTProvider_ServiceDesc, rai). + Build(tlsConfig, scope, clk) cmd.FailOnError(err, "Unable to setup RA gRPC server") - rapb.RegisterRegistrationAuthorityServer(grpcSrv, rai) - hs := health.NewServer() - healthpb.RegisterHealthServer(grpcSrv, hs) - - go cmd.CatchSignals(logger, func() { - hs.Shutdown() - grpcSrv.GracefulStop() - }) - err = cmd.FilterShutdownErrors(grpcSrv.Serve(listener)) - cmd.FailOnError(err, "RA gRPC service failed") + cmd.FailOnError(start(), "RA gRPC service failed") } func init() { - cmd.RegisterCommand("boulder-ra", main) + cmd.RegisterCommand("boulder-ra", main, &cmd.ConfigValidator{Config: &Config{}}) } diff --git a/cmd/boulder-sa/main.go b/cmd/boulder-sa/main.go index 8abf08f2597..a45ea5e46ee 100644 --- a/cmd/boulder-sa/main.go +++ b/cmd/boulder-sa/main.go @@ -1,19 +1,16 @@ package notmain import ( + "context" "flag" "os" - "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" + "github.com/jmhodges/clock" - "github.com/honeycombio/beeline-go" "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/db" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/features" bgrpc "github.com/letsencrypt/boulder/grpc" - "github.com/letsencrypt/boulder/rocsp" - rocsp_config "github.com/letsencrypt/boulder/rocsp/config" "github.com/letsencrypt/boulder/sa" sapb "github.com/letsencrypt/boulder/sa/proto" ) @@ -21,19 +18,21 @@ import ( type Config struct { SA struct { cmd.ServiceConfig - DB cmd.DBConfig - ReadOnlyDB cmd.DBConfig - Redis *rocsp_config.RedisConfig - Issuers map[string]int + DB cmd.DBConfig + ReadOnlyDB cmd.DBConfig `validate:"-"` + IncidentsDB cmd.DBConfig `validate:"-"` - Features map[string]bool + Features features.Config // Max simultaneous SQL queries caused by a single RPC. - ParallelismPerRPC int + ParallelismPerRPC int `validate:"omitempty,min=1"` + // LagFactor is how long to sleep before retrying a read request that may + // have failed solely due to replication lag. + LagFactor config.Duration `validate:"-"` } - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig } func main() { @@ -50,8 +49,7 @@ func main() { err := cmd.ReadConfigFile(*configFile, &c) cmd.FailOnError(err, "Reading JSON config file into config structure") - err = features.Set(c.SA.Features) - cmd.FailOnError(err, "Failed to set feature flags") + features.Set(c.SA.Features) if *grpcAddr != "" { c.SA.GRPC.Address = *grpcAddr @@ -60,65 +58,48 @@ func main() { c.SA.DebugAddr = *debugAddr } - bc, err := c.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() - - scope, logger := cmd.StatsAndLogging(c.Syslog, c.SA.DebugAddr) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) + scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.SA.DebugAddr) + defer oTelShutdown(context.Background()) + cmd.LogStartup(logger) dbMap, err := sa.InitWrappedDb(c.SA.DB, scope, logger) cmd.FailOnError(err, "While initializing dbMap") - dbReadOnlyURL, err := c.SA.ReadOnlyDB.URL() - cmd.FailOnError(err, "Couldn't load read-only DB URL") - - var dbReadOnlyMap *db.WrappedMap - if dbReadOnlyURL == "" { - dbReadOnlyMap = dbMap - } else { + dbReadOnlyMap := dbMap + if c.SA.ReadOnlyDB != (cmd.DBConfig{}) { dbReadOnlyMap, err = sa.InitWrappedDb(c.SA.ReadOnlyDB, scope, logger) - cmd.FailOnError(err, "While initializing dbMap") + cmd.FailOnError(err, "While initializing dbReadOnlyMap") } - clk := cmd.Clock() - - redisConf := c.SA.Redis - var rocspWriteClient *rocsp.WritingClient - if redisConf != nil { - rocspWriteClient, err = rocsp_config.MakeClient(redisConf, clk, scope) - cmd.FailOnError(err, "making Redis client") + dbIncidentsMap := dbMap + if c.SA.IncidentsDB != (cmd.DBConfig{}) { + dbIncidentsMap, err = sa.InitWrappedDb(c.SA.IncidentsDB, scope, logger) + cmd.FailOnError(err, "While initializing dbIncidentsMap") } - shortIssuers, err := rocsp_config.LoadIssuers(c.SA.Issuers) - cmd.FailOnError(err, "loading issuers") - parallel := c.SA.ParallelismPerRPC - if parallel < 1 { - parallel = 1 - } - sai, err := sa.NewSQLStorageAuthority(dbMap, dbReadOnlyMap, rocspWriteClient, shortIssuers, clk, logger, scope, parallel) - cmd.FailOnError(err, "Failed to create SA impl") + clk := clock.New() + + parallel := max(c.SA.ParallelismPerRPC, 1) - tls, err := c.SA.TLS.Load() + tls, err := c.SA.TLS.Load(scope) cmd.FailOnError(err, "TLS config") - serverMetrics := bgrpc.NewServerMetrics(scope) - grpcSrv, listener, err := bgrpc.NewServer(c.SA.GRPC, tls, serverMetrics, clk, bgrpc.NoCancelInterceptor) - cmd.FailOnError(err, "Unable to setup SA gRPC server") - sapb.RegisterStorageAuthorityServer(grpcSrv, sai) - hs := health.NewServer() - healthpb.RegisterHealthServer(grpcSrv, hs) - go cmd.CatchSignals(logger, func() { - hs.Shutdown() - grpcSrv.GracefulStop() - }) + saroi, err := sa.NewSQLStorageAuthorityRO( + dbReadOnlyMap, dbIncidentsMap, scope, parallel, c.SA.LagFactor.Duration, clk, logger) + cmd.FailOnError(err, "Failed to create read-only SA impl") + + sai, err := sa.NewSQLStorageAuthorityWrapping(saroi, dbMap, scope) + cmd.FailOnError(err, "Failed to create SA impl") + + start, err := bgrpc.NewServer(c.SA.GRPC, logger).WithCheckInterval(c.SA.HealthCheckInterval.Duration).Add( + &sapb.StorageAuthorityReadOnly_ServiceDesc, saroi).Add( + &sapb.StorageAuthority_ServiceDesc, sai).Build( + tls, scope, clk) + cmd.FailOnError(err, "Unable to setup SA gRPC server") - err = cmd.FilterShutdownErrors(grpcSrv.Serve(listener)) - cmd.FailOnError(err, "SA gRPC service failed") + cmd.FailOnError(start(), "SA gRPC service failed") } func init() { - cmd.RegisterCommand("boulder-sa", main) + cmd.RegisterCommand("boulder-sa", main, &cmd.ConfigValidator{Config: &Config{}}) } diff --git a/cmd/boulder-va/main.go b/cmd/boulder-va/main.go index 54c572e119f..594221dab0c 100644 --- a/cmd/boulder-va/main.go +++ b/cmd/boulder-va/main.go @@ -1,65 +1,67 @@ package notmain import ( + "context" "flag" "os" "time" - "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" + "github.com/jmhodges/clock" - "github.com/honeycombio/beeline-go" "github.com/letsencrypt/boulder/bdns" "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/features" bgrpc "github.com/letsencrypt/boulder/grpc" + "github.com/letsencrypt/boulder/iana" "github.com/letsencrypt/boulder/va" + vaConfig "github.com/letsencrypt/boulder/va/config" vapb "github.com/letsencrypt/boulder/va/proto" ) +// RemoteVAGRPCClientConfig contains the information necessary to setup a gRPC +// client connection. The following GRPC client configuration field combinations +// are allowed: +// +// ServerAddress, DNSAuthority, [Timeout], [HostOverride] +// SRVLookup, DNSAuthority, [Timeout], [HostOverride], [SRVResolver] +// SRVLookups, DNSAuthority, [Timeout], [HostOverride], [SRVResolver] +type RemoteVAGRPCClientConfig struct { + cmd.GRPCClientConfig + // Perspective uniquely identifies the Network Perspective used to + // perform the validation, as specified in BRs Section 5.4.1, + // Requirement 2.7 ("Multi-Perspective Issuance Corroboration attempts + // from each Network Perspective"). It should uniquely identify a group + // of RVAs deployed in the same datacenter. + Perspective string `validate:"required"` + + // RIR indicates the Regional Internet Registry where this RVA is + // located. This field is used to identify the RIR region from which a + // given validation was performed, as specified in the "Phased + // Implementation Timeline" in BRs Section 3.2.2.9. It must be one of + // the following values: + // - ARIN + // - RIPE + // - APNIC + // - LACNIC + // - AFRINIC + RIR string `validate:"required,oneof=ARIN RIPE APNIC LACNIC AFRINIC"` +} + type Config struct { VA struct { - cmd.ServiceConfig - - UserAgent string - - IssuerDomain string - - PortConfig cmd.PortConfig - - // CAADistributedResolverConfig specifies the HTTP client setup and interfaces - // needed to resolve CAA addresses over multiple paths - CAADistributedResolver struct { - Timeout cmd.ConfigDuration - MaxFailures int - Proxies []string - } - - // The number of times to try a DNS query (that has a temporary error) - // before giving up. May be short-circuited by deadlines. A zero value - // will be turned into 1. - DNSTries int - DNSResolver string - // Deprecated, replaced by singular DNSResolver above. - DNSResolvers []string - DNSTimeout string - DNSAllowLoopbackAddresses bool - - RemoteVAs []cmd.GRPCClientConfig - MaxRemoteValidationFailures int - - Features map[string]bool - - AccountURIPrefixes []string + vaConfig.Common + RemoteVAs []RemoteVAGRPCClientConfig `validate:"omitempty,dive"` + // SlowRemoteTimeout sets how long the VA is willing to wait for slow + // RemoteVA instances to finish their work. It starts counting from + // when the VA first gets a quorum of (un)successful remote results. + // Leaving this value zero means the VA won't early-cancel slow remotes. + SlowRemoteTimeout config.Duration + Features features.Config } - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig - - Common struct { - DNSTimeout string - DNSAllowLoopbackAddresses bool - } + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig } func main() { @@ -75,138 +77,83 @@ func main() { var c Config err := cmd.ReadConfigFile(*configFile, &c) cmd.FailOnError(err, "Reading JSON config file into config structure") + err = c.VA.SetDefaultsAndValidate(grpcAddr, debugAddr) + cmd.FailOnError(err, "Setting and validating default config values") - err = features.Set(c.VA.Features) - cmd.FailOnError(err, "Failed to set feature flags") - - if *grpcAddr != "" { - c.VA.GRPC.Address = *grpcAddr - } - if *debugAddr != "" { - c.VA.DebugAddr = *debugAddr - } - - bc, err := c.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() - - scope, logger := cmd.StatsAndLogging(c.Syslog, c.VA.DebugAddr) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) - - pc := &cmd.PortConfig{ - HTTPPort: 80, - HTTPSPort: 443, - TLSPort: 443, - } - if c.VA.PortConfig.HTTPPort != 0 { - pc.HTTPPort = c.VA.PortConfig.HTTPPort - } - if c.VA.PortConfig.HTTPSPort != 0 { - pc.HTTPSPort = c.VA.PortConfig.HTTPSPort - } - if c.VA.PortConfig.TLSPort != 0 { - pc.TLSPort = c.VA.PortConfig.TLSPort - } - - var dnsTimeout time.Duration - if c.VA.DNSTimeout != "" { - dnsTimeout, err = time.ParseDuration(c.VA.DNSTimeout) - } else { - dnsTimeout, err = time.ParseDuration(c.Common.DNSTimeout) - } - cmd.FailOnError(err, "Couldn't parse DNS timeout") - dnsTries := c.VA.DNSTries - if dnsTries < 1 { - dnsTries = 1 - } - clk := cmd.Clock() + features.Set(c.VA.Features) + scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.VA.DebugAddr) + defer oTelShutdown(context.Background()) + cmd.LogStartup(logger) + clk := clock.New() var servers bdns.ServerProvider - if c.VA.DNSResolver != "" { - servers, err = bdns.StartDynamicProvider(c.VA.DNSResolver, 60*time.Second) - cmd.FailOnError(err, "Couldn't start dynamic DNS server resolver") - } else { - servers, err = bdns.NewStaticProvider(c.VA.DNSResolvers) - cmd.FailOnError(err, "Couldn't parse static DNS server(s)") - } - var resolver bdns.Client - if !(c.VA.DNSAllowLoopbackAddresses || c.Common.DNSAllowLoopbackAddresses) { - resolver = bdns.New( - dnsTimeout, - servers, - scope, - clk, - dnsTries, - logger) + if len(c.VA.DNSStaticResolvers) != 0 { + servers, err = bdns.NewStaticProvider(c.VA.DNSStaticResolvers) + cmd.FailOnError(err, "Couldn't start static DNS server resolver") } else { - resolver = bdns.NewTest( - dnsTimeout, - servers, - scope, - clk, - dnsTries, - logger) + servers, err = bdns.StartDynamicProvider(c.VA.DNSProvider, 60*time.Second, "tcp") + cmd.FailOnError(err, "Couldn't start dynamic DNS server resolver") } + defer servers.Stop() - tlsConfig, err := c.VA.TLS.Load() + tlsConfig, err := c.VA.TLS.Load(scope) cmd.FailOnError(err, "tlsConfig config") - clientMetrics := bgrpc.NewClientMetrics(scope) + resolver := bdns.New( + c.VA.DNSTimeout.Duration, + servers, + scope, + clk, + c.VA.DNSTries, + c.VA.UserAgent, + logger, + tlsConfig) + var remotes []va.RemoteVA if len(c.VA.RemoteVAs) > 0 { for _, rva := range c.VA.RemoteVAs { - rva := rva - vaConn, err := bgrpc.ClientSetup(&rva, tlsConfig, clientMetrics, clk) + vaConn, err := bgrpc.ClientSetup(&rva.GRPCClientConfig, tlsConfig, scope, clk) cmd.FailOnError(err, "Unable to create remote VA client") remotes = append( remotes, va.RemoteVA{ - VAClient: vapb.NewVAClient(vaConn), - Address: rva.ServerAddress, + RemoteClients: va.RemoteClients{ + VAClient: vapb.NewVAClient(vaConn), + CAAClient: vapb.NewCAAClient(vaConn), + }, + Address: rva.ServerAddress, + Perspective: rva.Perspective, + RIR: rva.RIR, }, ) } } vai, err := va.NewValidationAuthorityImpl( - pc, resolver, remotes, - c.VA.MaxRemoteValidationFailures, c.VA.UserAgent, c.VA.IssuerDomain, scope, clk, logger, - c.VA.AccountURIPrefixes) + c.VA.AccountURIPrefixes, + va.PrimaryPerspective, + "", + iana.IsReservedAddr, + c.VA.SlowRemoteTimeout.Duration, + c.VA.DNSAllowLoopbackAddresses, + ) cmd.FailOnError(err, "Unable to create VA server") - serverMetrics := bgrpc.NewServerMetrics(scope) - grpcSrv, l, err := bgrpc.NewServer(c.VA.GRPC, tlsConfig, serverMetrics, clk) + start, err := bgrpc.NewServer(c.VA.GRPC, logger).Add( + &vapb.VA_ServiceDesc, vai).Add( + &vapb.CAA_ServiceDesc, vai).Build(tlsConfig, scope, clk) cmd.FailOnError(err, "Unable to setup VA gRPC server") - vapb.RegisterVAServer(grpcSrv, vai) - cmd.FailOnError(err, "Unable to register VA gRPC server") - vapb.RegisterCAAServer(grpcSrv, vai) - cmd.FailOnError(err, "Unable to register CAA gRPC server") - hs := health.NewServer() - healthpb.RegisterHealthServer(grpcSrv, hs) - - go cmd.CatchSignals(logger, func() { - servers.Stop() - hs.Shutdown() - grpcSrv.GracefulStop() - }) - - err = cmd.FilterShutdownErrors(grpcSrv.Serve(l)) - cmd.FailOnError(err, "VA gRPC service failed") + cmd.FailOnError(start(), "VA gRPC service failed") } func init() { - cmd.RegisterCommand("boulder-va", main) - // We register under two different names, because it's convenient for the - // remote VAs to show up under a different program name when looking at logs. - cmd.RegisterCommand("boulder-remoteva", main) + cmd.RegisterCommand("boulder-va", main, &cmd.ConfigValidator{Config: &Config{}}) } diff --git a/cmd/boulder-va/main_test.go b/cmd/boulder-va/main_test.go deleted file mode 100644 index 227a9d4affb..00000000000 --- a/cmd/boulder-va/main_test.go +++ /dev/null @@ -1 +0,0 @@ -package notmain diff --git a/cmd/boulder-wfe2/main.go b/cmd/boulder-wfe2/main.go index 7ab09326e3f..e50e9bae7db 100644 --- a/cmd/boulder-wfe2/main.go +++ b/cmd/boulder-wfe2/main.go @@ -3,72 +3,88 @@ package notmain import ( "bytes" "context" - "crypto/x509" "encoding/pem" "flag" "fmt" - "io/ioutil" - "log" "net/http" "os" "time" - "github.com/honeycombio/beeline-go" "github.com/jmhodges/clock" + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/features" "github.com/letsencrypt/boulder/goodkey" + "github.com/letsencrypt/boulder/goodkey/sagoodkey" bgrpc "github.com/letsencrypt/boulder/grpc" + "github.com/letsencrypt/boulder/grpc/noncebalancer" "github.com/letsencrypt/boulder/issuance" - blog "github.com/letsencrypt/boulder/log" - noncepb "github.com/letsencrypt/boulder/nonce/proto" + "github.com/letsencrypt/boulder/nonce" rapb "github.com/letsencrypt/boulder/ra/proto" + "github.com/letsencrypt/boulder/ratelimits" + bredis "github.com/letsencrypt/boulder/redis" sapb "github.com/letsencrypt/boulder/sa/proto" + salesforcepb "github.com/letsencrypt/boulder/salesforce/proto" + "github.com/letsencrypt/boulder/unpause" + "github.com/letsencrypt/boulder/web" "github.com/letsencrypt/boulder/wfe2" - "github.com/prometheus/client_golang/prometheus" ) type Config struct { WFE struct { - cmd.ServiceConfig - ListenAddress string - TLSListenAddress string + DebugAddr string `validate:"omitempty,hostname_port"` - ServerCertificatePath string - ServerKeyPath string + // ListenAddress is the address:port on which to listen for incoming + // HTTP requests. Defaults to ":80". + ListenAddress string `validate:"omitempty,hostname_port"` - AllowOrigins []string + // TLSListenAddress is the address:port on which to listen for incoming + // HTTPS requests. If none is provided the WFE will not listen for HTTPS + // requests. + TLSListenAddress string `validate:"omitempty,hostname_port"` + + // Timeout is the per-request overall timeout. This should be slightly + // lower than the upstream's timeout when making requests to this service. + Timeout config.Duration `validate:"-"` - ShutdownStopTimeout cmd.ConfigDuration + // ShutdownStopTimeout determines the maximum amount of time to wait + // for extant request handlers to complete before exiting. It should be + // greater than Timeout. + ShutdownStopTimeout config.Duration + + ServerCertificatePath string `validate:"required_with=TLSListenAddress"` + ServerKeyPath string `validate:"required_with=TLSListenAddress"` + + AllowOrigins []string SubscriberAgreementURL string TLS cmd.TLSConfig - RAService *cmd.GRPCClientConfig - SAService *cmd.GRPCClientConfig - // GetNonceService contains a gRPC config for any nonce-service instances - // which we want to retrieve nonces from. In a multi-DC deployment this - // should refer to any local nonce-service instances. - GetNonceService *cmd.GRPCClientConfig - // RedeemNonceServices contains a map of nonce-service prefixes to - // gRPC configs we want to use to redeem nonces. In a multi-DC deployment - // this should contain all nonce-services from all DCs as we want to be - // able to redeem nonces generated at any DC. - RedeemNonceServices map[string]cmd.GRPCClientConfig - - // CertificateChains maps AIA issuer URLs to certificate filenames. - // Certificates are read into the chain in the order they are defined in the - // slice of filenames. - // DEPRECATED: See Chains, below. - // TODO(5164): Remove this after all configs have migrated to `Chains`. - CertificateChains map[string][]string - - // AlternateCertificateChains maps AIA issuer URLs to an optional alternate - // certificate chain, represented by an ordered slice of certificate filenames. - // DEPRECATED: See Chains, below. - // TODO(5164): Remove this after all configs have migrated to `Chains`. - AlternateCertificateChains map[string][]string + RAService *cmd.GRPCClientConfig + SAService *cmd.GRPCClientConfig + EmailExporter *cmd.GRPCClientConfig + + // GetNonceService is a gRPC config which contains a single SRV name + // used to lookup nonce-service instances used exclusively for nonce + // creation. In a multi-DC deployment this should refer to local + // nonce-service instances only. + GetNonceService *cmd.GRPCClientConfig `validate:"required"` + + // RedeemNonceService is a gRPC config which contains a list of SRV + // names used to lookup nonce-service instances used exclusively for + // nonce redemption. In a multi-DC deployment this should contain both + // local and remote nonce-service instances. + RedeemNonceService *cmd.GRPCClientConfig `validate:"required"` + + // NonceHMACKey is a path to a file containing an HMAC key which is a + // secret used for deriving the prefix of each nonce instance. It should + // contain 256 bits (32 bytes) of random data to be suitable as an + // HMAC-SHA256 key (e.g. the output of `openssl rand -hex 32`). In a + // multi-DC deployment this value should be the same across all + // boulder-wfe and nonce-service instances. + NonceHMACKey cmd.HMACKeyConfig `validate:"-"` // Chains is a list of lists of certificate filenames. Each inner list is // a chain (starting with the issuing intermediate, followed by one or @@ -78,20 +94,17 @@ type Config struct { // by that intermediate. The first chain representing any given issuing // key pair will be the default for that issuer, served if the client does // not request a specific chain. - // NOTE: This config field deprecates the CertificateChains and - // AlternateCertificateChains fields. If it is present, those fields are - // ignored. They will be removed in a future release. - Chains [][]string + Chains [][]string `validate:"required,min=1,dive,min=2,dive,required"` - Features map[string]bool + Features features.Config // DirectoryCAAIdentity is used for the /directory response's "meta" // element's "caaIdentities" field. It should match the VA's "issuerDomain" // configuration value (this value is the one used to enforce CAA) - DirectoryCAAIdentity string + DirectoryCAAIdentity string `validate:"required,fqdn"` // DirectoryWebsite is used for the /directory response's "meta" element's // "website" field. - DirectoryWebsite string + DirectoryWebsite string `validate:"required,url"` // ACMEv2 requests (outside some registration/revocation messages) use a JWS with // a KeyID header containing the full account URL. For new accounts this @@ -100,160 +113,91 @@ type Config struct { // ID prefix that legacy accounts would have been using based on the Host // header of the WFE1 instance and the legacy 'reg' path component. This // will differ in configuration for production and staging. - LegacyKeyIDPrefix string + LegacyKeyIDPrefix string `validate:"required,url"` // GoodKey is an embedded config stanza for the goodkey library. GoodKey goodkey.Config // StaleTimeout determines how old should data be to be accessed via Boulder-specific GET-able APIs - StaleTimeout cmd.ConfigDuration - - // AuthorizationLifetimeDays defines how long authorizations will be - // considered valid for. The WFE uses this to find the creation date of - // authorizations by subtracing this value from the expiry. It should match - // the value configured in the RA. - AuthorizationLifetimeDays int - - // PendingAuthorizationLifetimeDays defines how long authorizations may be in - // the pending state before expiry. The WFE uses this to find the creation - // date of pending authorizations by subtracting this value from the expiry. - // It should match the value configured in the RA. - PendingAuthorizationLifetimeDays int - - AccountCache *CacheConfig - } + StaleTimeout config.Duration `validate:"-"` - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig -} - -type CacheConfig struct { - Size int - TTL cmd.ConfigDuration -} - -// loadCertificateFile loads a PEM certificate from the certFile provided. It -// validates that the PEM is well-formed with no leftover bytes, and contains -// only a well-formed X509 CA certificate. If the cert file meets these -// requirements the PEM bytes from the file are returned along with the parsed -// certificate, otherwise an error is returned. If the PEM contents of -// a certFile do not have a trailing newline one is added. -// TODO(5164): Remove this after all configs have migrated to `Chains`. -func loadCertificateFile(aiaIssuerURL, certFile string) ([]byte, *issuance.Certificate, error) { - pemBytes, err := ioutil.ReadFile(certFile) - if err != nil { - return nil, nil, fmt.Errorf( - "CertificateChain entry for AIA issuer url %q has an "+ - "invalid chain file: %q - error reading contents: %w", - aiaIssuerURL, certFile, err) - } - if bytes.Contains(pemBytes, []byte("\r\n")) { - return nil, nil, fmt.Errorf( - "CertificateChain entry for AIA issuer url %q has an "+ - "invalid chain file: %q - contents had CRLF line endings", - aiaIssuerURL, certFile) - } - // Try to decode the contents as PEM - certBlock, rest := pem.Decode(pemBytes) - if certBlock == nil { - return nil, nil, fmt.Errorf( - "CertificateChain entry for AIA issuer url %q has an "+ - "invalid chain file: %q - contents did not decode as PEM", - aiaIssuerURL, certFile) - } - // The PEM contents must be a CERTIFICATE - if certBlock.Type != "CERTIFICATE" { - return nil, nil, fmt.Errorf( - "CertificateChain entry for AIA issuer url %q has an "+ - "invalid chain file: %q - PEM block type incorrect, found "+ - "%q, expected \"CERTIFICATE\"", - aiaIssuerURL, certFile, certBlock.Type) - } - // The PEM Certificate must successfully parse - cert, err := x509.ParseCertificate(certBlock.Bytes) - if err != nil { - return nil, nil, fmt.Errorf( - "CertificateChain entry for AIA issuer url %q has an "+ - "invalid chain file: %q - certificate bytes failed to parse: %w", - aiaIssuerURL, certFile, err) - } - // If there are bytes leftover we must reject the file otherwise these - // leftover bytes will end up in a served certificate chain. - if len(rest) != 0 { - return nil, nil, fmt.Errorf( - "CertificateChain entry for AIA issuer url %q has an "+ - "invalid chain file: %q - PEM contents had unused remainder "+ - "input (%d bytes)", - aiaIssuerURL, certFile, len(rest)) - } - // If the PEM contents don't end in a \n, add it. - if pemBytes[len(pemBytes)-1] != '\n' { - pemBytes = append(pemBytes, '\n') - } - ic, err := issuance.NewCertificate(cert) - if err != nil { - return nil, nil, fmt.Errorf( - "CertificateChain entry for AIA issuer url %q has an "+ - "invalid chain file: %q - unable to load issuer certificate: %w", - aiaIssuerURL, certFile, err) - } - return pemBytes, ic, nil -} + // AuthorizationLifetimeDays duplicates the RA's config of the same name. + // Deprecated: This field no longer has any effect. + AuthorizationLifetimeDays int `validate:"-"` -// loadCertificateChains processes the provided chainConfig of AIA Issuer URLs -// and cert filenames. For each AIA issuer URL all of its cert filenames are -// read, validated as PEM certificates, and concatenated together separated by -// newlines. The combined PEM certificate chain contents for each are returned -// in the results map, keyed by the IssuerNameID. Additionally the first -// certificate in each chain is parsed and returned in a slice of issuer -// certificates. -// TODO(5164): Remove this after all configs have migrated to `Chains`. -func loadCertificateChains(chainConfig map[string][]string, requireAtLeastOneChain bool) (map[issuance.IssuerNameID][]byte, map[issuance.IssuerNameID]*issuance.Certificate, error) { - results := make(map[issuance.IssuerNameID][]byte, len(chainConfig)) - issuerCerts := make(map[issuance.IssuerNameID]*issuance.Certificate, len(chainConfig)) - - // For each AIA Issuer URL we need to read the chain cert files - for aiaIssuerURL, certFiles := range chainConfig { - var buffer bytes.Buffer - - // There must be at least one chain file specified - if requireAtLeastOneChain && len(certFiles) == 0 { - return nil, nil, fmt.Errorf( - "CertificateChain entry for AIA issuer url %q has no chain "+ - "file names configured", - aiaIssuerURL) - } + // PendingAuthorizationLifetimeDays duplicates the RA's config of the same name. + // Deprecated: This field no longer has any effect. + PendingAuthorizationLifetimeDays int `validate:"-"` - // certFiles are read and appended in the order they appear in the - // configuration - var id issuance.IssuerNameID - for i, c := range certFiles { - // Prepend a newline before each chain entry - buffer.Write([]byte("\n")) - - // Read and validate the chain file contents - pemBytes, cert, err := loadCertificateFile(aiaIssuerURL, c) - if err != nil { - return nil, nil, err - } + // MaxContactsPerRegistration limits the number of contact addresses which + // can be provided in a single NewAccount request. Requests containing more + // contacts than this are rejected. Default: 10. + MaxContactsPerRegistration int `validate:"omitempty,min=1"` - // Save the first certificate as a direct issuer certificate - if i == 0 { - id = cert.NameID() - issuerCerts[id] = cert - } + AccountCache *CacheConfig - // Write the PEM bytes to the result buffer for this AIAIssuer - buffer.Write(pemBytes) + Limiter struct { + // Redis contains the configuration necessary to connect to Redis + // for rate limiting. This field is required to enable rate + // limiting. + Redis *bredis.Config `validate:"required_with=Defaults"` + + // Defaults is a path to a YAML file containing default rate limits. + // See: ratelimits/README.md for details. This field is required to + // enable rate limiting. If any individual rate limit is not set, + // that limit will be disabled. Failed Authorizations limits passed + // in this file must be identical to those in the RA. + Defaults string `validate:"required_with=Redis"` + + // Overrides is a path to a YAML file containing overrides for the + // default rate limits. See: ratelimits/README.md for details. If + // neither this field nor OverridesFromDB is set, all requesters + // will be subject to the default rate limits. Overrides for the + // Failed Authorizations overrides passed in this file must be + // identical to those in the RA. + Overrides string + + // OverridesFromDB causes the WFE and RA to retrieve rate limit + // overrides from the database, instead of from a file. + OverridesFromDB bool } - // Save the full PEM chain contents, if any - if buffer.Len() > 0 { - results[id] = buffer.Bytes() + // CertProfiles is a map of acceptable certificate profile names to + // descriptions (perhaps including URLs) of those profiles. NewOrder + // Requests with a profile name not present in this map will be rejected. + // This field is optional; if unset, no profile names are accepted. + CertProfiles map[string]string `validate:"omitempty,dive,keys,alphanum,min=1,max=32,endkeys"` + + Unpause struct { + // HMACKey signs outgoing JWTs for redemption at the unpause + // endpoint. This key must match the one configured for all SFEs. + // This field is required to enable the pausing feature. + HMACKey cmd.HMACKeyConfig `validate:"required_with=JWTLifetime URL,structonly"` + + // JWTLifetime is the lifetime of the unpause JWTs generated by the + // WFE for redemption at the SFE. The minimum value for this field + // is 336h (14 days). This field is required to enable the pausing + // feature. + JWTLifetime config.Duration `validate:"omitempty,required_with=HMACKey URL,min=336h"` + + // URL is the URL of the Self-Service Frontend (SFE). This is used + // to build URLs sent to end-users in error messages. This field + // must be a URL with a scheme of 'https://' This field is required + // to enable the pausing feature. + URL string `validate:"omitempty,required_with=HMACKey JWTLifetime,url,startswith=https://,endsnotwith=/"` } } - return results, issuerCerts, nil + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig + + // OpenTelemetryHTTPConfig configures tracing on incoming HTTP requests + OpenTelemetryHTTPConfig cmd.OpenTelemetryHTTPConfig +} + +type CacheConfig struct { + Size int + TTL config.Duration } // loadChain takes a list of filenames containing pem-formatted certificates, @@ -277,52 +221,10 @@ func loadChain(certFiles []string) (*issuance.Certificate, []byte, error) { return certs[0], buf.Bytes(), nil } -func setupWFE(c Config, logger blog.Logger, stats prometheus.Registerer, clk clock.Clock) (rapb.RegistrationAuthorityClient, sapb.StorageAuthorityClient, noncepb.NonceServiceClient, map[string]noncepb.NonceServiceClient) { - tlsConfig, err := c.WFE.TLS.Load() - cmd.FailOnError(err, "TLS config") - clientMetrics := bgrpc.NewClientMetrics(stats) - raConn, err := bgrpc.ClientSetup(c.WFE.RAService, tlsConfig, clientMetrics, clk, bgrpc.CancelTo408Interceptor) - cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA") - rac := rapb.NewRegistrationAuthorityClient(raConn) - - saConn, err := bgrpc.ClientSetup(c.WFE.SAService, tlsConfig, clientMetrics, clk, bgrpc.CancelTo408Interceptor) - cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA") - sac := sapb.NewStorageAuthorityClient(saConn) - - var rns noncepb.NonceServiceClient - npm := map[string]noncepb.NonceServiceClient{} - if c.WFE.GetNonceService != nil { - rnsConn, err := bgrpc.ClientSetup(c.WFE.GetNonceService, tlsConfig, clientMetrics, clk, bgrpc.CancelTo408Interceptor) - cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to get nonce service") - rns = noncepb.NewNonceServiceClient(rnsConn) - for prefix, serviceConfig := range c.WFE.RedeemNonceServices { - serviceConfig := serviceConfig - conn, err := bgrpc.ClientSetup(&serviceConfig, tlsConfig, clientMetrics, clk, bgrpc.CancelTo408Interceptor) - cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to redeem nonce service") - npm[prefix] = noncepb.NewNonceServiceClient(conn) - } - } - - return rac, sac, rns, npm -} - -type errorWriter struct { - blog.Logger -} - -func (ew errorWriter) Write(p []byte) (n int, err error) { - // log.Logger will append a newline to all messages before calling - // Write. Our log checksum checker doesn't like newlines, because - // syslog will strip them out so the calculated checksums will - // differ. So that we don't hit this corner case for every line - // logged from inside net/http.Server we strip the newline before - // we get to the checksum generator. - p = bytes.TrimRight(p, "\n") - ew.Logger.Err(fmt.Sprintf("net/http.Server: %s", string(p))) - return -} - func main() { + listenAddr := flag.String("addr", "", "HTTP listen address override") + tlsAddr := flag.String("tls-addr", "", "HTTPS listen address override") + debugAddr := flag.String("debug-addr", "", "Debug server address override") configFile := flag.String("config", "", "File path to the configuration file for this service") flag.Parse() if *configFile == "" { @@ -334,86 +236,123 @@ func main() { err := cmd.ReadConfigFile(*configFile, &c) cmd.FailOnError(err, "Reading JSON config file into config structure") - err = features.Set(c.WFE.Features) - cmd.FailOnError(err, "Failed to set feature flags") - - allCertChains := map[issuance.IssuerNameID][][]byte{} - issuerCerts := map[issuance.IssuerNameID]*issuance.Certificate{} - if c.WFE.Chains != nil { - for _, files := range c.WFE.Chains { - issuer, chain, err := loadChain(files) - cmd.FailOnError(err, "Failed to load chain") - - id := issuer.NameID() - allCertChains[id] = append(allCertChains[id], chain) - // This may overwrite a previously-set issuerCert (e.g. if there are two - // chains for the same issuer, but with different versions of the same - // same intermediate issued by different roots). This is okay, as the - // only truly important content here is the public key to verify other - // certs. - issuerCerts[id] = issuer - } - } else { - // TODO(5164): Remove this after all configs have migrated to `Chains`. - var certChains map[issuance.IssuerNameID][]byte - certChains, issuerCerts, err = loadCertificateChains(c.WFE.CertificateChains, true) - cmd.FailOnError(err, "Couldn't read configured CertificateChains") + features.Set(c.WFE.Features) - for nameID, chainPEM := range certChains { - allCertChains[nameID] = [][]byte{chainPEM} - } + if *listenAddr != "" { + c.WFE.ListenAddress = *listenAddr + } + if *tlsAddr != "" { + c.WFE.TLSListenAddress = *tlsAddr + } + if *debugAddr != "" { + c.WFE.DebugAddr = *debugAddr + } - if c.WFE.AlternateCertificateChains != nil { - altCertChains, _, err := loadCertificateChains(c.WFE.AlternateCertificateChains, false) - cmd.FailOnError(err, "Couldn't read configured AlternateCertificateChains") + certChains := map[issuance.NameID][][]byte{} + issuerCerts := map[issuance.NameID]*issuance.Certificate{} + for _, files := range c.WFE.Chains { + issuer, chain, err := loadChain(files) + cmd.FailOnError(err, "Failed to load chain") + + id := issuer.NameID() + certChains[id] = append(certChains[id], chain) + // This may overwrite a previously-set issuerCert (e.g. if there are two + // chains for the same issuer, but with different versions of the same + // same intermediate issued by different roots). This is okay, as the + // only truly important content here is the public key to verify other + // certs. + issuerCerts[id] = issuer + } - for nameID, chainPEM := range altCertChains { - if _, ok := allCertChains[nameID]; !ok { - cmd.Fail(fmt.Sprintf("IssuerNameId %q appeared in AlternateCertificateChains, "+ - "but does not exist in CertificateChains", nameID)) - } - allCertChains[nameID] = append(allCertChains[nameID], chainPEM) - } - } + stats, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.WFE.DebugAddr) + cmd.LogStartup(logger) + + clk := clock.New() + + var unpauseSigner unpause.JWTSigner + if features.Get().CheckIdentifiersPaused { + unpauseSigner, err = unpause.NewJWTSigner(c.WFE.Unpause.HMACKey) + cmd.FailOnError(err, "Failed to create unpause signer from HMACKey") } - bc, err := c.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() + tlsConfig, err := c.WFE.TLS.Load(stats) + cmd.FailOnError(err, "TLS config") - stats, logger := cmd.StatsAndLogging(c.Syslog, c.WFE.DebugAddr) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) + raConn, err := bgrpc.ClientSetup(c.WFE.RAService, tlsConfig, stats, clk) + cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA") + rac := rapb.NewRegistrationAuthorityClient(raConn) - clk := cmd.Clock() + saConn, err := bgrpc.ClientSetup(c.WFE.SAService, tlsConfig, stats, clk) + cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA") + sac := sapb.NewStorageAuthorityReadOnlyClient(saConn) - rac, sac, rns, npm := setupWFE(c, logger, stats, clk) + var eec salesforcepb.ExporterClient + if c.WFE.EmailExporter != nil { + emailExporterConn, err := bgrpc.ClientSetup(c.WFE.EmailExporter, tlsConfig, stats, clk) + cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to email-exporter") + eec = salesforcepb.NewExporterClient(emailExporterConn) + } - kp, err := goodkey.NewKeyPolicy(&c.WFE.GoodKey, sac.KeyBlocked) + if c.WFE.RedeemNonceService == nil { + cmd.Fail("'redeemNonceService' must be configured.") + } + if c.WFE.GetNonceService == nil { + cmd.Fail("'getNonceService' must be configured") + } + + noncePrefixKey, err := c.WFE.NonceHMACKey.Load() + cmd.FailOnError(err, "Failed to load nonceHMACKey file") + + getNonceConn, err := bgrpc.ClientSetup(c.WFE.GetNonceService, tlsConfig, stats, clk) + cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to get nonce service") + gnc := nonce.NewGetter(getNonceConn) + + if c.WFE.RedeemNonceService.SRVResolver != noncebalancer.SRVResolverScheme { + cmd.Fail(fmt.Sprintf( + "'redeemNonceService.SRVResolver' must be set to %q", noncebalancer.SRVResolverScheme), + ) + } + redeemNonceConn, err := bgrpc.ClientSetup(c.WFE.RedeemNonceService, tlsConfig, stats, clk) + cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to redeem nonce service") + rnc := nonce.NewRedeemer(redeemNonceConn) + + kp, err := sagoodkey.NewPolicy(&c.WFE.GoodKey, sac.KeyBlocked) cmd.FailOnError(err, "Unable to create key policy") if c.WFE.StaleTimeout.Duration == 0 { c.WFE.StaleTimeout.Duration = time.Minute * 10 } - // Baseline Requirements v1.8.1 section 4.2.1: "any reused data, document, - // or completed validation MUST be obtained no more than 398 days prior - // to issuing the Certificate". If unconfigured or the configured value is - // greater than 397 days, bail out. - if c.WFE.AuthorizationLifetimeDays <= 0 || c.WFE.AuthorizationLifetimeDays > 397 { - cmd.Fail("authorizationLifetimeDays value must be greater than 0 and less than 398") + if c.WFE.MaxContactsPerRegistration == 0 { + c.WFE.MaxContactsPerRegistration = 10 } - authorizationLifetime := time.Duration(c.WFE.AuthorizationLifetimeDays) * 24 * time.Hour - - // The Baseline Requirements v1.8.1 state that validation tokens "MUST - // NOT be used for more than 30 days from its creation". If unconfigured - // or the configured value pendingAuthorizationLifetimeDays is greater - // than 29 days, bail out. - if c.WFE.PendingAuthorizationLifetimeDays <= 0 || c.WFE.PendingAuthorizationLifetimeDays > 29 { - cmd.Fail("pendingAuthorizationLifetimeDays value must be greater than 0 and less than 30") + + var limiter *ratelimits.Limiter + var txnBuilder *ratelimits.TransactionBuilder + var limiterRedis *bredis.Ring + overridesRefresherShutdown := func() {} + if c.WFE.Limiter.Defaults != "" { + // Setup rate limiting. + limiterRedis, err = bredis.NewRingFromConfig(*c.WFE.Limiter.Redis, stats, logger) + cmd.FailOnError(err, "Failed to create Redis ring") + + source := ratelimits.NewRedisSource(limiterRedis.Ring, clk, stats) + limiter, err = ratelimits.NewLimiter(clk, source, stats) + cmd.FailOnError(err, "Failed to create rate limiter") + if c.WFE.Limiter.OverridesFromDB { + if c.WFE.Limiter.Overrides != "" { + cmd.Fail("OverridesFromDB and an overrides file were both defined, but are mutually exclusive") + } + txnBuilder, err = ratelimits.NewTransactionBuilderFromDatabase(c.WFE.Limiter.Defaults, sac.GetEnabledRateLimitOverrides, stats, logger) + } else { + txnBuilder, err = ratelimits.NewTransactionBuilderFromFiles(c.WFE.Limiter.Defaults, c.WFE.Limiter.Overrides, stats, logger) + } + cmd.FailOnError(err, "Failed to create rate limits transaction builder") + + // The 30 minute period here must be kept in sync with the promise + // (successCommentBody) made to requesters in sfe/overridesimporter.go + overridesRefresherShutdown = txnBuilder.NewRefresher(30 * time.Minute) } - pendingAuthorizationLifetime := time.Duration(c.WFE.PendingAuthorizationLifetimeDays) * 24 * time.Hour var accountGetter wfe2.AccountGetter if c.WFE.AccountCache != nil { @@ -429,17 +368,25 @@ func main() { stats, clk, kp, - allCertChains, + certChains, issuerCerts, - rns, - npm, logger, + c.WFE.Timeout.Duration, c.WFE.StaleTimeout.Duration, - authorizationLifetime, - pendingAuthorizationLifetime, + c.WFE.MaxContactsPerRegistration, rac, sac, + eec, + gnc, + rnc, + noncePrefixKey, accountGetter, + limiter, + txnBuilder, + c.WFE.CertProfiles, + unpauseSigner, + c.WFE.Unpause.JWTLifetime.Duration, + c.WFE.Unpause.URL, ) cmd.FailOnError(err, "Unable to create WFE") @@ -449,19 +396,14 @@ func main() { wfe.DirectoryWebsite = c.WFE.DirectoryWebsite wfe.LegacyKeyIDPrefix = c.WFE.LegacyKeyIDPrefix - logger.Infof("WFE using key policy: %#v", kp) + if c.WFE.ListenAddress == "" { + cmd.Fail("HTTP listen address is not configured") + } logger.Infof("Server running, listening on %s....", c.WFE.ListenAddress) - handler := wfe.Handler(stats) - srv := http.Server{ - ReadTimeout: 30 * time.Second, - WriteTimeout: 120 * time.Second, - IdleTimeout: 120 * time.Second, - Addr: c.WFE.ListenAddress, - ErrorLog: log.New(errorWriter{logger}, "", 0), - Handler: handler, - } + handler := wfe.Handler(stats, c.OpenTelemetryHTTPConfig.Options()...) + srv := web.NewServer(c.WFE.ListenAddress, handler, logger) go func() { err := srv.ListenAndServe() if err != nil && err != http.ErrServerClosed { @@ -469,16 +411,10 @@ func main() { } }() - tlsSrv := http.Server{ - ReadTimeout: 30 * time.Second, - WriteTimeout: 120 * time.Second, - IdleTimeout: 120 * time.Second, - Addr: c.WFE.TLSListenAddress, - ErrorLog: log.New(errorWriter{logger}, "", 0), - Handler: handler, - } + tlsSrv := web.NewServer(c.WFE.TLSListenAddress, handler, logger) if tlsSrv.Addr != "" { go func() { + logger.Infof("TLS server listening on %s", tlsSrv.Addr) err := tlsSrv.ListenAndServeTLS(c.WFE.ServerCertificatePath, c.WFE.ServerKeyPath) if err != nil && err != http.ErrServerClosed { cmd.FailOnError(err, "Running TLS server") @@ -486,22 +422,23 @@ func main() { }() } - done := make(chan bool) - go cmd.CatchSignals(logger, func() { + // When main is ready to exit (because it has received a shutdown signal), + // gracefully shutdown the servers. Calling these shutdown functions causes + // ListenAndServe() and ListenAndServeTLS() to immediately return, then waits + // for any lingering connection-handling goroutines to finish their work. + defer func() { ctx, cancel := context.WithTimeout(context.Background(), c.WFE.ShutdownStopTimeout.Duration) defer cancel() + overridesRefresherShutdown() _ = srv.Shutdown(ctx) _ = tlsSrv.Shutdown(ctx) - done <- true - }) - - // https://godoc.org/net/http#Server.Shutdown: - // When Shutdown is called, Serve, ListenAndServe, and ListenAndServeTLS - // immediately return ErrServerClosed. Make sure the program doesn't exit and - // waits instead for Shutdown to return. - <-done + limiterRedis.StopLookups() + oTelShutdown(ctx) + }() + + cmd.WaitForSignal() } func init() { - cmd.RegisterCommand("boulder-wfe2", main) + cmd.RegisterCommand("boulder-wfe2", main, &cmd.ConfigValidator{Config: &Config{}}) } diff --git a/cmd/boulder-wfe2/main_test.go b/cmd/boulder-wfe2/main_test.go index d48b96af5f9..a1f79af8de4 100644 --- a/cmd/boulder-wfe2/main_test.go +++ b/cmd/boulder-wfe2/main_test.go @@ -1,263 +1,38 @@ package notmain import ( - "bytes" "crypto/x509" "encoding/pem" - "fmt" - "io/ioutil" - "strings" "testing" - "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/issuance" "github.com/letsencrypt/boulder/test" ) -func TestLoadChain_Valid(t *testing.T) { - issuer, chainPEM, err := loadChain([]string{ - "../../test/test-ca-cross.pem", - "../../test/test-root2.pem", +func TestLoadChain(t *testing.T) { + // Most of loadChain's logic is implemented in issuance.LoadChain, so this + // test only covers the construction of the PEM bytes. + _, chainPEM, err := loadChain([]string{ + "../../test/hierarchy/int-e1.cert.pem", + "../../test/hierarchy/root-x2-cross.cert.pem", + "../../test/hierarchy/root-x1.cert.pem", }) test.AssertNotError(t, err, "Should load valid chain") - expectedIssuer, err := core.LoadCert("../../test/test-ca-cross.pem") - test.AssertNotError(t, err, "Failed to load test issuer") - - chainIssuerPEM, rest := pem.Decode(chainPEM) - test.AssertNotNil(t, chainIssuerPEM, "Failed to decode chain PEM") - parsedIssuer, err := x509.ParseCertificate(chainIssuerPEM.Bytes) + // Parse the first certificate in the PEM blob. + certPEM, rest := pem.Decode(chainPEM) + test.AssertNotNil(t, certPEM, "Failed to decode chain PEM") + _, err = x509.ParseCertificate(certPEM.Bytes) test.AssertNotError(t, err, "Failed to parse chain PEM") - // The three versions of the intermediate (the one loaded by us, the one - // returned by loadChain, and the one parsed from the chain) should be equal. - test.AssertByteEquals(t, issuer.Raw, expectedIssuer.Raw) - test.AssertByteEquals(t, parsedIssuer.Raw, expectedIssuer.Raw) + // Parse the second certificate in the PEM blob. + certPEM, rest = pem.Decode(rest) + test.AssertNotNil(t, certPEM, "Failed to decode chain PEM") + _, err = x509.ParseCertificate(certPEM.Bytes) + test.AssertNotError(t, err, "Failed to parse chain PEM") // The chain should contain nothing else. - rootIssuerPEM, _ := pem.Decode(rest) - if rootIssuerPEM != nil { + certPEM, rest = pem.Decode(rest) + if certPEM != nil || len(rest) != 0 { t.Error("Expected chain PEM to contain one cert and nothing else") } } - -func TestLoadChain_TooShort(t *testing.T) { - _, _, err := loadChain([]string{"/path/to/one/cert.pem"}) - test.AssertError(t, err, "Should reject too-short chain") -} - -func TestLoadChain_Unloadable(t *testing.T) { - _, _, err := loadChain([]string{ - "does-not-exist.pem", - "../../test/test-root2.pem", - }) - test.AssertError(t, err, "Should reject unloadable chain") - - _, _, err = loadChain([]string{ - "../../test/test-ca-cross.pem", - "does-not-exist.pem", - }) - test.AssertError(t, err, "Should reject unloadable chain") - - invalidPEMFile, _ := ioutil.TempFile("", "invalid.pem") - err = ioutil.WriteFile(invalidPEMFile.Name(), []byte(""), 0640) - test.AssertNotError(t, err, "Error writing invalid PEM tmp file") - _, _, err = loadChain([]string{ - invalidPEMFile.Name(), - "../../test/test-root2.pem", - }) - test.AssertError(t, err, "Should reject unloadable chain") -} - -func TestLoadChain_InvalidSig(t *testing.T) { - _, _, err := loadChain([]string{ - "../../test/test-root2.pem", - "../../test/test-ca-cross.pem", - }) - test.AssertError(t, err, "Should reject invalid signature") -} - -func TestLoadChain_NoRoot(t *testing.T) { - // TODO(#5251): Implement this when we have a hierarchy which includes two - // CA certs, neither of which is a root. -} - -func TestLoadCertificateChains(t *testing.T) { - // Read some cert bytes to use for expected chain content - certBytesA, err := ioutil.ReadFile("../../test/test-ca.pem") - test.AssertNotError(t, err, "Error reading../../test/test-ca.pem") - certBytesB, err := ioutil.ReadFile("../../test/test-ca2.pem") - test.AssertNotError(t, err, "Error reading../../test/test-ca2.pem") - - // Make a .pem file with invalid contents - invalidPEMFile, _ := ioutil.TempFile("", "invalid.pem") - err = ioutil.WriteFile(invalidPEMFile.Name(), []byte(""), 0640) - test.AssertNotError(t, err, "Error writing invalid PEM tmp file") - - // Make a .pem file with a valid cert but also some leftover bytes - leftoverPEMFile, _ := ioutil.TempFile("", "leftovers.pem") - leftovers := "vegan curry, cold rice, soy milk" - leftoverBytes := append(certBytesA, []byte(leftovers)...) - err = ioutil.WriteFile(leftoverPEMFile.Name(), leftoverBytes, 0640) - test.AssertNotError(t, err, "Error writing leftover PEM tmp file") - - // Make a .pem file that is test-ca2.pem but with Windows/DOS CRLF line - // endings - crlfPEM, _ := ioutil.TempFile("", "crlf.pem") - crlfPEMBytes := []byte(strings.Replace(string(certBytesB), "\n", "\r\n", -1)) - err = ioutil.WriteFile(crlfPEM.Name(), crlfPEMBytes, 0640) - test.AssertNotError(t, err, "ioutil.WriteFile failed") - - // Make a .pem file that is test-ca.pem but with no trailing newline - abruptPEM, _ := ioutil.TempFile("", "abrupt.pem") - abruptPEMBytes := certBytesA[:len(certBytesA)-1] - err = ioutil.WriteFile(abruptPEM.Name(), abruptPEMBytes, 0640) - test.AssertNotError(t, err, "ioutil.WriteFile failed") - - testCases := []struct { - Name string - Input map[string][]string - ExpectedMap map[issuance.IssuerNameID][]byte - ExpectedError error - AllowEmptyChain bool - }{ - { - Name: "No input", - Input: nil, - }, - { - Name: "AIA Issuer without chain files", - Input: map[string][]string{ - "http://break.the.chain.com": {}, - }, - ExpectedError: fmt.Errorf( - "CertificateChain entry for AIA issuer url \"http://break.the.chain.com\" " + - "has no chain file names configured"), - }, - { - Name: "Missing chain file", - Input: map[string][]string{ - "http://where.is.my.mind": {"/tmp/does.not.exist.pem"}, - }, - ExpectedError: fmt.Errorf("CertificateChain entry for AIA issuer url \"http://where.is.my.mind\" " + - "has an invalid chain file: \"/tmp/does.not.exist.pem\" - error reading " + - "contents: open /tmp/does.not.exist.pem: no such file or directory"), - }, - { - Name: "PEM chain file with Windows CRLF line endings", - Input: map[string][]string{ - "http://windows.sad.zone": {crlfPEM.Name()}, - }, - ExpectedError: fmt.Errorf("CertificateChain entry for AIA issuer url \"http://windows.sad.zone\" "+ - "has an invalid chain file: %q - contents had CRLF line endings", crlfPEM.Name()), - }, - { - Name: "Invalid PEM chain file", - Input: map[string][]string{ - "http://ok.go": {invalidPEMFile.Name()}, - }, - ExpectedError: fmt.Errorf( - "CertificateChain entry for AIA issuer url \"http://ok.go\" has an "+ - "invalid chain file: %q - contents did not decode as PEM", - invalidPEMFile.Name()), - }, - { - Name: "PEM chain file that isn't a cert", - Input: map[string][]string{ - "http://not-a-cert.com": {"../../test/test-root.key"}, - }, - ExpectedError: fmt.Errorf( - "CertificateChain entry for AIA issuer url \"http://not-a-cert.com\" has " + - "an invalid chain file: \"../../test/test-root.key\" - PEM block type " + - "incorrect, found \"PRIVATE KEY\", expected \"CERTIFICATE\""), - }, - { - Name: "PEM chain file with leftover bytes", - Input: map[string][]string{ - "http://tasty.leftovers.com": {leftoverPEMFile.Name()}, - }, - ExpectedError: fmt.Errorf( - "CertificateChain entry for AIA issuer url \"http://tasty.leftovers.com\" "+ - "has an invalid chain file: %q - PEM contents had unused remainder input "+ - "(%d bytes)", - leftoverPEMFile.Name(), - len([]byte(leftovers)), - ), - }, - { - Name: "One PEM file chain", - Input: map[string][]string{ - "http://single-cert-chain.com": {"../../test/test-ca.pem"}, - }, - ExpectedMap: map[issuance.IssuerNameID][]byte{ - issuance.IssuerNameID(37287262753088952): []byte(fmt.Sprintf("\n%s", string(certBytesA))), - }, - }, - { - Name: "Two PEM file chain", - Input: map[string][]string{ - "http://two-cert-chain.com": {"../../test/test-ca.pem", "../../test/test-ca2.pem"}, - }, - ExpectedMap: map[issuance.IssuerNameID][]byte{ - issuance.IssuerNameID(37287262753088952): []byte(fmt.Sprintf("\n%s\n%s", string(certBytesA), string(certBytesB))), - }, - }, - { - Name: "One PEM file chain, no trailing newline", - Input: map[string][]string{ - "http://single-cert-chain.nonewline.com": {abruptPEM.Name()}, - }, - ExpectedMap: map[issuance.IssuerNameID][]byte{ - // NOTE(@cpu): There should be a trailing \n added by the WFE that we - // expect in the format specifier below. - issuance.IssuerNameID(37287262753088952): []byte(fmt.Sprintf("\n%s\n", string(abruptPEMBytes))), - }, - }, - { - Name: "Two PEM file chain, don't require at least one chain", - AllowEmptyChain: true, - Input: map[string][]string{ - "http://two-cert-chain.com": {"../../test/test-ca.pem", "../../test/test-ca2.pem"}, - }, - ExpectedMap: map[issuance.IssuerNameID][]byte{ - issuance.IssuerNameID(37287262753088952): []byte(fmt.Sprintf("\n%s\n%s", string(certBytesA), string(certBytesB))), - }, - }, - { - Name: "Empty chain, don't require at least one chain", - AllowEmptyChain: true, - Input: map[string][]string{ - "http://two-cert-chain.com": {}, - }, - ExpectedMap: map[issuance.IssuerNameID][]byte{}, - }, - { - Name: "Empty chain", - Input: map[string][]string{ - "http://two-cert-chain.com": {}, - }, - ExpectedError: fmt.Errorf( - "CertificateChain entry for AIA issuer url %q has no chain "+ - "file names configured", - "http://two-cert-chain.com"), - }, - } - - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - resultMap, issuers, err := loadCertificateChains(tc.Input, !tc.AllowEmptyChain) - if tc.ExpectedError == nil && err != nil { - t.Errorf("Expected nil error, got %#v\n", err) - } else if tc.ExpectedError != nil && err == nil { - t.Errorf("Expected non-nil error, got nil err") - } else if tc.ExpectedError != nil { - test.AssertEquals(t, err.Error(), tc.ExpectedError.Error()) - } - test.AssertEquals(t, len(resultMap), len(tc.ExpectedMap)) - test.AssertEquals(t, len(issuers), len(tc.ExpectedMap)) - for nameid, chain := range resultMap { - test.Assert(t, bytes.Equal(chain, tc.ExpectedMap[nameid]), "Chain bytes did not match expected") - } - }) - } -} diff --git a/cmd/boulder/main.go b/cmd/boulder/main.go index 34d45b2c63a..7b386bec34e 100644 --- a/cmd/boulder/main.go +++ b/cmd/boulder/main.go @@ -3,10 +3,8 @@ package main import ( "fmt" "os" - "path" + "strings" - _ "github.com/letsencrypt/boulder/cmd/admin-revoker" - _ "github.com/letsencrypt/boulder/cmd/akamai-purger" _ "github.com/letsencrypt/boulder/cmd/bad-key-revoker" _ "github.com/letsencrypt/boulder/cmd/boulder-ca" _ "github.com/letsencrypt/boulder/cmd/boulder-observer" @@ -15,38 +13,112 @@ import ( _ "github.com/letsencrypt/boulder/cmd/boulder-sa" _ "github.com/letsencrypt/boulder/cmd/boulder-va" _ "github.com/letsencrypt/boulder/cmd/boulder-wfe2" - _ "github.com/letsencrypt/boulder/cmd/caa-log-checker" - _ "github.com/letsencrypt/boulder/cmd/ceremony" _ "github.com/letsencrypt/boulder/cmd/cert-checker" - _ "github.com/letsencrypt/boulder/cmd/contact-auditor" - _ "github.com/letsencrypt/boulder/cmd/expiration-mailer" - _ "github.com/letsencrypt/boulder/cmd/id-exporter" + _ "github.com/letsencrypt/boulder/cmd/crl-checker" + _ "github.com/letsencrypt/boulder/cmd/crl-storer" + _ "github.com/letsencrypt/boulder/cmd/crl-updater" + _ "github.com/letsencrypt/boulder/cmd/email-exporter" _ "github.com/letsencrypt/boulder/cmd/log-validator" _ "github.com/letsencrypt/boulder/cmd/nonce-service" - _ "github.com/letsencrypt/boulder/cmd/notify-mailer" - _ "github.com/letsencrypt/boulder/cmd/ocsp-responder" - _ "github.com/letsencrypt/boulder/cmd/ocsp-updater" - _ "github.com/letsencrypt/boulder/cmd/orphan-finder" + _ "github.com/letsencrypt/boulder/cmd/remoteva" _ "github.com/letsencrypt/boulder/cmd/reversed-hostname-checker" - _ "github.com/letsencrypt/boulder/cmd/rocsp-tool" + _ "github.com/letsencrypt/boulder/cmd/sfe" + "github.com/letsencrypt/boulder/core" "github.com/letsencrypt/boulder/cmd" ) -func main() { - cmd.LookupCommand(path.Base(os.Args[0]))() +// readAndValidateConfigFile uses the ConfigValidator registered for the given +// command to validate the provided config file. If the command does not have a +// registered ConfigValidator, this function does nothing. +func readAndValidateConfigFile(name, filename string) error { + cv := cmd.LookupConfigValidator(name) + if cv == nil { + return nil + } + file, err := os.Open(filename) + if err != nil { + return err + } + defer file.Close() + if name == "boulder-observer" { + // Only the boulder-observer uses YAML config files. + return cmd.ValidateYAMLConfig(cv, file) + } + return cmd.ValidateJSONConfig(cv, file) } -func init() { - cmd.RegisterCommand("boulder", func() { - if len(os.Args) > 1 && os.Args[1] == "--list" { - for _, c := range cmd.AvailableCommands() { - if c != "boulder" { - fmt.Println(c) - } +// getConfigPath returns the path to the config file if it was provided as a +// command line flag. If the flag was not provided, it returns an empty string. +func getConfigPath() string { + for i := range len(os.Args) { + arg := os.Args[i] + if arg == "--config" || arg == "-config" { + if i+1 < len(os.Args) { + return os.Args[i+1] } - } else { - fmt.Fprintf(os.Stderr, "Call with --list to list available subcommands. Symlink and run as a subcommand to run that subcommand.\n") } - }) + config, ok := strings.CutPrefix(arg, "--config=") + if ok { + return config + } + config, ok = strings.CutPrefix(arg, "-config=") + if ok { + return config + } + } + return "" +} + +var boulderUsage = fmt.Sprintf(`Usage: %s [flags] + + Each boulder component has its own subcommand. Use --list to see + a list of the available components. Use --help to + see the usage for a specific component. +`, + core.Command()) + +func main() { + defer cmd.AuditPanic() + + if len(os.Args) <= 1 { + // No arguments passed. + fmt.Fprint(os.Stderr, boulderUsage) + return + } + + if os.Args[1] == "--help" || os.Args[1] == "-help" { + // Help flag passed. + fmt.Fprint(os.Stderr, boulderUsage) + return + } + + if os.Args[1] == "--list" || os.Args[1] == "-list" { + // List flag passed. + for _, c := range cmd.AvailableCommands() { + fmt.Println(c) + } + return + } + + // Remove the subcommand from the arguments. + command := os.Args[1] + os.Args = os.Args[1:] + + config := getConfigPath() + if config != "" { + // Config flag passed. + err := readAndValidateConfigFile(command, config) + if err != nil { + fmt.Fprintf(os.Stderr, "Error validating config file %q for command %q: %s\n", config, command, err) + os.Exit(1) + } + } + + commandFunc := cmd.LookupCommand(command) + if commandFunc == nil { + fmt.Fprintf(os.Stderr, "Unknown subcommand %q.\n", command) + os.Exit(1) + } + commandFunc() } diff --git a/cmd/boulder/main_test.go b/cmd/boulder/main_test.go new file mode 100644 index 00000000000..1dbcb25b0bc --- /dev/null +++ b/cmd/boulder/main_test.go @@ -0,0 +1,72 @@ +package main + +import ( + "fmt" + "os" + "testing" + + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/test" +) + +// TestConfigValidation checks that each of the components which register a +// validation tagged Config struct at init time can be used to successfully +// validate their corresponding test configuration files. +func TestConfigValidation(t *testing.T) { + configPath := "../../test/config" + if os.Getenv("BOULDER_CONFIG_DIR") == "test/config-next" { + configPath = "../../test/config-next" + } + + // Each component is a set of `cmd` package name and a list of paths to + // configuration files to validate. + components := make(map[string][]string) + + // For each component, add the paths to the configuration files to validate. + // By default we assume that the configuration file is named after the + // component. However, there are some exceptions to this rule. We've added + // special cases for these components. + for _, cmdName := range cmd.AvailableConfigValidators() { + var fileNames []string + switch cmdName { + case "boulder-ca": + fileNames = []string{"ca.json"} + case "boulder-observer": + fileNames = []string{"observer.yml"} + case "boulder-publisher": + fileNames = []string{"publisher.json"} + case "boulder-ra": + fileNames = []string{"ra.json"} + case "boulder-sa": + fileNames = []string{"sa.json"} + case "boulder-va": + fileNames = []string{"va.json"} + case "remoteva": + fileNames = []string{ + "remoteva-a.json", + "remoteva-b.json", + } + case "boulder-wfe2": + fileNames = []string{"wfe2.json"} + case "sfe": + fileNames = []string{"sfe.json"} + case "nonce-service": + fileNames = []string{ + "nonce-a.json", + "nonce-b.json", + } + default: + fileNames = []string{cmdName + ".json"} + } + components[cmdName] = append(components[cmdName], fileNames...) + } + t.Parallel() + for cmdName, paths := range components { + for _, path := range paths { + t.Run(path, func(t *testing.T) { + err := readAndValidateConfigFile(cmdName, fmt.Sprintf("%s/%s", configPath, path)) + test.AssertNotError(t, err, fmt.Sprintf("Failed to validate config file %q", path)) + }) + } + } +} diff --git a/cmd/caa-log-checker/main.go b/cmd/caa-log-checker/main.go deleted file mode 100644 index 1ab282b0ecb..00000000000 --- a/cmd/caa-log-checker/main.go +++ /dev/null @@ -1,261 +0,0 @@ -package notmain - -import ( - "bufio" - "compress/gzip" - "encoding/json" - "errors" - "flag" - "fmt" - "io" - "os" - "regexp" - "strings" - "time" - - "github.com/letsencrypt/boulder/cmd" - blog "github.com/letsencrypt/boulder/log" -) - -var raIssuanceLineRE = regexp.MustCompile(`Certificate request - successful JSON=(.*)`) - -// TODO: Extract the "Valid for issuance: (true|false)" field too. -var vaCAALineRE = regexp.MustCompile(`Checked CAA records for ([a-z0-9-.*]+), \[Present: (true|false)`) - -type issuanceEvent struct { - SerialNumber string - Names []string - Requester int64 - - issuanceTime time.Time -} - -func openFile(path string) (*bufio.Scanner, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - var reader io.Reader - reader = f - if strings.HasSuffix(path, ".gz") { - reader, err = gzip.NewReader(f) - if err != nil { - return nil, err - } - } - scanner := bufio.NewScanner(reader) - return scanner, nil -} - -func parseTimestamp(line []byte) (time.Time, error) { - datestamp, err := time.Parse(time.RFC3339, string(line[0:32])) - if err != nil { - return time.Time{}, err - } - return datestamp, nil -} - -// loadIssuanceLog processes a single issuance (RA) log file. It returns a map -// of names to slices of timestamps at which certificates for those names were -// issued. It also returns the earliest and latest timestamps seen, to allow -// CAA log processing to quickly skip irrelevant entries. -func loadIssuanceLog(path string) (map[string][]time.Time, time.Time, time.Time, error) { - scanner, err := openFile(path) - if err != nil { - return nil, time.Time{}, time.Time{}, fmt.Errorf("failed to open %q: %w", path, err) - } - - linesCount := 0 - earliest := time.Time{} - latest := time.Time{} - - issuanceMap := map[string][]time.Time{} - for scanner.Scan() { - line := scanner.Bytes() - linesCount++ - - matches := raIssuanceLineRE.FindSubmatch(line) - if matches == nil { - continue - } - if len(matches) != 2 { - return nil, earliest, latest, fmt.Errorf("line %d: unexpected number of regex matches", linesCount) - } - - var ie issuanceEvent - err := json.Unmarshal(matches[1], &ie) - if err != nil { - return nil, earliest, latest, fmt.Errorf("line %d: failed to unmarshal JSON: %w", linesCount, err) - } - - // Populate the issuance time from the syslog timestamp, rather than the - // ResponseTime member of the JSON. This makes testing a lot simpler because - // of how we mess with time sometimes. Given that these timestamps are - // generated on the same system, they should be tightly coupled anyway. - ie.issuanceTime, err = parseTimestamp(line) - if err != nil { - return nil, earliest, latest, fmt.Errorf("line %d: failed to parse timestamp: %w", linesCount, err) - } - - if earliest.IsZero() || ie.issuanceTime.Before(earliest) { - earliest = ie.issuanceTime - } - if latest.IsZero() || ie.issuanceTime.After(latest) { - latest = ie.issuanceTime - } - for _, name := range ie.Names { - issuanceMap[name] = append(issuanceMap[name], ie.issuanceTime) - } - } - err = scanner.Err() - if err != nil { - return nil, earliest, latest, err - } - - return issuanceMap, earliest, latest, nil -} - -// processCAALog processes a single CAA (VA) log file. It modifies the input map -// (of issuance names to times, as returned by `loadIssuanceLog`) to remove any -// timestamps which are covered by (i.e. less than 8 hours after) a CAA check -// for that name in the log file. It also prunes any names whose slice of -// issuance times becomes empty. -func processCAALog(path string, issuances map[string][]time.Time, earliest time.Time, latest time.Time, tolerance time.Duration) error { - scanner, err := openFile(path) - if err != nil { - return fmt.Errorf("failed to open %q: %w", path, err) - } - - linesCount := 0 - - for scanner.Scan() { - line := scanner.Bytes() - linesCount++ - - matches := vaCAALineRE.FindSubmatch(line) - if matches == nil { - continue - } - if len(matches) != 3 { - return fmt.Errorf("line %d: unexpected number of regex matches", linesCount) - } - name := string(matches[1]) - present := string(matches[2]) - - checkTime, err := parseTimestamp(line) - if err != nil { - return fmt.Errorf("line %d: failed to parse timestamp: %w", linesCount, err) - } - - // Don't bother processing rows that definitely fall outside the period we - // care about. - if checkTime.After(latest) || checkTime.Before(earliest.Add(-8*time.Hour)) { - continue - } - - // TODO: Only remove covered issuance timestamps if the CAA check actually - // said that we're allowed to issue (i.e. had "Valid for issuance: true"). - issuances[name] = removeCoveredTimestamps(issuances[name], checkTime, tolerance) - if len(issuances[name]) == 0 { - delete(issuances, name) - } - - // If the CAA check didn't find any CAA records for w.x.y.z, then that means - // that we checked the CAA records for x.y.z, y.z, and z as well, and are - // covered for any issuance for those names. - if present == "false" { - labels := strings.Split(name, ".") - for i := 1; i < len(labels)-1; i++ { - tailName := strings.Join(labels[i:], ".") - issuances[tailName] = removeCoveredTimestamps(issuances[tailName], checkTime, tolerance) - if len(issuances[tailName]) == 0 { - delete(issuances, tailName) - } - } - } - } - - return scanner.Err() -} - -// removeCoveredTimestamps returns a new slice of timestamps which contains all -// timestamps that are *not* within 8 hours after the input timestamp. -func removeCoveredTimestamps(timestamps []time.Time, cover time.Time, tolerance time.Duration) []time.Time { - r := make([]time.Time, 0) - for _, ts := range timestamps { - // Copy the timestamp into the results slice if it is before the covering - // timestamp, or more than 8 hours after the covering timestamp (i.e. if - // it is *not* covered by the covering timestamp). - diff := ts.Sub(cover) - if diff < -tolerance || diff > 8*time.Hour+tolerance { - ts := ts - r = append(r, ts) - } - } - return r -} - -// emitErrors returns nil if the input map is empty. Otherwise, it logs -// a line for each name and issuance time that was not covered by a CAA -// check, and return an error. -func emitErrors(log blog.Logger, remaining map[string][]time.Time) error { - if len(remaining) == 0 { - return nil - } - - for name, timestamps := range remaining { - for _, timestamp := range timestamps { - log.Infof("CAA-checking log event not found for issuance of %s at %s", name, timestamp) - } - } - - return errors.New("Some CAA-checking log events not found") -} - -func main() { - logStdoutLevel := flag.Int("stdout-level", 6, "Minimum severity of messages to send to stdout") - logSyslogLevel := flag.Int("syslog-level", 6, "Minimum severity of messages to send to syslog") - raLog := flag.String("ra-log", "", "Path to a single boulder-ra log file") - vaLogs := flag.String("va-logs", "", "List of paths to boulder-va logs, separated by commas") - timeTolerance := flag.Duration("time-tolerance", 0, "How much slop to allow when comparing timestamps for ordering") - earliestFlag := flag.String("earliest", "", "Deprecated.") - latestFlag := flag.String("latest", "", "Deprecated.") - - flag.Parse() - - logger := cmd.NewLogger(cmd.SyslogConfig{ - StdoutLevel: *logStdoutLevel, - SyslogLevel: *logSyslogLevel, - }) - - if *timeTolerance < 0 { - cmd.Fail("value of -time-tolerance must be non-negative") - } - - if *earliestFlag != "" || *latestFlag != "" { - logger.Info("The -earliest and -latest flags are deprecated and ignored.") - } - - // Build a map from hostnames to times at which those names were issued for. - // Also retrieve the earliest and latest issuance times represented in the - // data, so we can be more efficient when examining entries from the CAA log. - issuanceMap, earliest, latest, err := loadIssuanceLog(*raLog) - cmd.FailOnError(err, "failed to load issuance logs") - - // Try to pare the issuance map down to nothing by removing every entry which - // is covered by a CAA check. - for _, vaLog := range strings.Split(*vaLogs, ",") { - err = processCAALog(vaLog, issuanceMap, earliest, latest, *timeTolerance) - cmd.FailOnError(err, "failed to process CAA checking logs") - } - - err = emitErrors(logger, issuanceMap) - if err != nil { - logger.AuditErrf("%s", err) - os.Exit(1) - } -} - -func init() { - cmd.RegisterCommand("caa-log-checker", main) -} diff --git a/cmd/caa-log-checker/main_test.go b/cmd/caa-log-checker/main_test.go deleted file mode 100644 index 4e3f588a27a..00000000000 --- a/cmd/caa-log-checker/main_test.go +++ /dev/null @@ -1,280 +0,0 @@ -package notmain - -import ( - "compress/gzip" - "fmt" - "io/ioutil" - "os" - "testing" - "time" - - "github.com/letsencrypt/boulder/test" -) - -// A timestamp which matches the format we put in our logs. Note that it has -// sub-second precision only out to microseconds (not nanoseconds), and must -// include the timezone indicator. -// 0001-01-01T01:01:01.001001+00:00 -var testTime = time.Time{}.Add(time.Hour + time.Minute + time.Second + time.Millisecond + time.Microsecond).Local() - -func TestOpenFile(t *testing.T) { - tmpPlain, err := ioutil.TempFile(os.TempDir(), "plain") - test.AssertNotError(t, err, "failed to create temporary file") - defer os.Remove(tmpPlain.Name()) - _, err = tmpPlain.Write([]byte("test-1\ntest-2")) - test.AssertNotError(t, err, "failed to write to temp file") - tmpPlain.Close() - - tmpGzip, err := ioutil.TempFile(os.TempDir(), "gzip-*.gz") - test.AssertNotError(t, err, "failed to create temporary file") - defer os.Remove(tmpGzip.Name()) - gzipWriter := gzip.NewWriter(tmpGzip) - _, err = gzipWriter.Write([]byte("test-1\ntest-2")) - test.AssertNotError(t, err, "failed to write to temp file") - gzipWriter.Flush() - gzipWriter.Close() - tmpGzip.Close() - - checkFile := func(path string) { - t.Helper() - scanner, err := openFile(path) - test.AssertNotError(t, err, fmt.Sprintf("failed to open %q", path)) - var lines []string - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - test.AssertNotError(t, scanner.Err(), fmt.Sprintf("failed to read from %q", path)) - test.AssertEquals(t, len(lines), 2) - test.AssertDeepEquals(t, lines, []string{"test-1", "test-2"}) - } - - checkFile(tmpPlain.Name()) - checkFile(tmpGzip.Name()) -} - -func TestLoadIssuanceLog(t *testing.T) { - - for _, tc := range []struct { - name string - loglines string - expMap map[string][]time.Time - expEarliest time.Time - expLatest time.Time - expErrStr string - }{ - { - "empty file", - "", - map[string][]time.Time{}, - time.Time{}, - time.Time{}, - "", - }, - { - "no matches", - "some text\nsome other text", - map[string][]time.Time{}, - time.Time{}, - time.Time{}, - "", - }, - { - "bad json", - "Certificate request - successful JSON=this is not valid json", - map[string][]time.Time{}, - time.Time{}, - time.Time{}, - "failed to unmarshal JSON", - }, - { - "bad timestamp", - "2009-11-10 23:00:00 UTC Certificate request - successful JSON={}", - map[string][]time.Time{}, - time.Time{}, - time.Time{}, - "failed to parse timestamp", - }, - { - "normal behavior", - `header -0001-01-01T01:01:01.001001+00:00 Certificate request - successful JSON={"SerialNumber": "1", "Names":["example.com"], "Requester":0} -0001-01-01T02:01:01.001001+00:00 Certificate request - successful JSON={"SerialNumber": "2", "Names":["2.example.com", "3.example.com"], "Requester":0} -filler -0001-01-01T03:01:01.001001+00:00 Certificate request - successful JSON={"SerialNumber": "3", "Names":["2.example.com"], "Requester":0} -trailer`, - map[string][]time.Time{ - "example.com": {testTime}, - "2.example.com": {testTime.Add(time.Hour), testTime.Add(2 * time.Hour)}, - "3.example.com": {testTime.Add(time.Hour)}, - }, - testTime, - testTime.Add(2 * time.Hour), - "", - }, - } { - t.Run(tc.name, func(t *testing.T) { - tmp, err := ioutil.TempFile(os.TempDir(), "TestLoadIssuanceLog") - test.AssertNotError(t, err, "failed to create temporary log file") - defer os.Remove(tmp.Name()) - _, err = tmp.Write([]byte(tc.loglines)) - test.AssertNotError(t, err, "failed to write temporary log file") - err = tmp.Close() - test.AssertNotError(t, err, "failed to close temporary log file") - - resMap, resEarliest, resLatest, resError := loadIssuanceLog(tmp.Name()) - if tc.expErrStr != "" { - test.AssertError(t, resError, "loadIssuanceLog should have errored") - test.AssertContains(t, resError.Error(), tc.expErrStr) - return - } - test.AssertNotError(t, resError, "loadIssuanceLog shouldn't have errored") - test.AssertDeepEquals(t, resMap, tc.expMap) - test.AssertEquals(t, resEarliest, tc.expEarliest) - test.AssertEquals(t, resLatest, tc.expLatest) - }) - } -} - -func TestProcessCAALog(t *testing.T) { - for _, tc := range []struct { - name string - loglines string - issuances map[string][]time.Time - earliest time.Time - latest time.Time - tolerance time.Duration - expMap map[string][]time.Time - expErrStr string - }{ - { - "empty file", - "", - map[string][]time.Time{"example.com": {testTime}}, - time.Time{}, - time.Time{}, - time.Second, - map[string][]time.Time{"example.com": {testTime}}, - "", - }, - { - "no matches", - "", - map[string][]time.Time{"example.com": {testTime}}, - time.Time{}, - time.Time{}, - time.Second, - map[string][]time.Time{"example.com": {testTime}}, - "", - }, - { - "outside 8hr window", - `header -0001-01-01T01:01:01.001001+00:00 Checked CAA records for example.com, [Present: true, ... -filler -0001-01-01T21:01:01.001001+00:00 Checked CAA records for example.com, [Present: true, ... -trailer`, - map[string][]time.Time{"example.com": {testTime.Add(10 * time.Hour)}}, - testTime, - testTime.Add(24 * time.Hour), - time.Second, - map[string][]time.Time{"example.com": {testTime.Add(10 * time.Hour)}}, - "", - }, - { - "outside earliest and latest", - `header -0001-01-01T01:01:01.001001+00:00 Checked CAA records for example.com, [Present: true, ... -filler -0001-01-01T21:01:01.001001+00:00 Checked CAA records for example.com, [Present: true, ... -trailer`, - map[string][]time.Time{"example.com": {testTime.Add(24 * time.Hour)}}, - testTime.Add(10 * time.Hour), - testTime.Add(11 * time.Hour), - time.Second, - map[string][]time.Time{"example.com": {testTime.Add(24 * time.Hour)}}, - "", - }, - { - "present: false", - `header -0001-01-01T01:01:01.001001+00:00 Checked CAA records for a.b.example.com, [Present: false, ... -trailer`, - map[string][]time.Time{ - "a.b.example.com": {testTime.Add(time.Hour)}, - "b.example.com": {testTime.Add(time.Hour)}, - "example.com": {testTime.Add(time.Hour)}, - "other.com": {testTime.Add(time.Hour)}, - }, - testTime, - testTime.Add(2 * time.Hour), - time.Second, - map[string][]time.Time{"other.com": {testTime.Add(time.Hour)}}, - "", - }, - } { - t.Run(tc.name, func(t *testing.T) { - fmt.Println(tc.name) - tmp, err := ioutil.TempFile(os.TempDir(), "TestProcessCAALog") - test.AssertNotError(t, err, "failed to create temporary log file") - defer os.Remove(tmp.Name()) - _, err = tmp.Write([]byte(tc.loglines)) - test.AssertNotError(t, err, "failed to write temporary log file") - err = tmp.Close() - test.AssertNotError(t, err, "failed to close temporary log file") - - resError := processCAALog(tmp.Name(), tc.issuances, tc.earliest, tc.latest, tc.tolerance) - if tc.expErrStr != "" { - test.AssertError(t, resError, "processCAALog should have errored") - test.AssertContains(t, resError.Error(), tc.expErrStr) - return - } - // Because processCAALog modifies its input map, we have to compare the - // testcase's input against the testcase's expectation. - test.AssertDeepEquals(t, tc.issuances, tc.expMap) - }) - } -} - -func TestRemoveCoveredTimestamps(t *testing.T) { - for _, tc := range []struct { - name string - timestamps []time.Time - cover time.Time - tolerance time.Duration - expected []time.Time - }{ - { - "empty input", - []time.Time{}, - testTime, - time.Second, - []time.Time{}, - }, - { - "normal functioning", - []time.Time{testTime.Add(-1 * time.Hour), testTime.Add(5 * time.Hour), testTime.Add(10 * time.Hour)}, - testTime, - time.Second, - []time.Time{testTime.Add(-1 * time.Hour), testTime.Add(10 * time.Hour)}, - }, - { - "tolerance", - []time.Time{testTime.Add(-1 * time.Second), testTime.Add(8*time.Hour + 1*time.Second)}, - testTime, - time.Second, - []time.Time{}, - }, - { - "intolerance", - []time.Time{testTime.Add(-2 * time.Second), testTime.Add(8*time.Hour + 2*time.Second)}, - testTime, - time.Second, - []time.Time{testTime.Add(-2 * time.Second), testTime.Add(8*time.Hour + 2*time.Second)}, - }, - } { - t.Run(tc.name, func(t *testing.T) { - result := removeCoveredTimestamps(tc.timestamps, tc.cover, tc.tolerance) - test.AssertDeepEquals(t, result, tc.expected) - }) - } -} diff --git a/cmd/ceremony/README.md b/cmd/ceremony/README.md index 247477b17c1..4edc2940872 100644 --- a/cmd/ceremony/README.md +++ b/cmd/ceremony/README.md @@ -1,21 +1,19 @@ # `ceremony` -``` +```sh ceremony --config path/to/config.yml ``` `ceremony` is a tool designed for Certificate Authority specific key and certificate ceremonies. The main design principle is that unlike most ceremony tooling there is a single user input, a configuration file, which is required to complete a root, intermediate, or key ceremony. The goal is to make ceremonies as simple as possible and allow for simple verification of a single file, instead of verification of a large number of independent commands. `ceremony` has these modes: -* `root` - generates a signing key on HSM and creates a self-signed root certificate that uses the generated key, outputting a PEM public key, and a PEM certificate -* `intermediate` - creates a intermediate certificate and signs it using a signing key already on a HSM, outputting a PEM certificate -* `cross-csr` - creates a CSR for signing by a third party, outputting a PEM CSR. -* `cross-certificate` - issues a certificate for one root, signed by another root. This is distinct from an intermediate because there is no path length constraint and there are no EKUs. -* `ocsp-signer` - creates a delegated OCSP signing certificate and signs it using a signing key already on a HSM, outputting a PEM certificate -* `crl-signer` - creates a delegated CRL signing certificate and signs it using a signing key already on a HSM, outputting a PEM certificate -* `key` - generates a signing key on HSM, outputting a PEM public key -* `ocsp-response` - creates a OCSP response for the provided certificate and signs it using a signing key already on a HSM, outputting a base64 encoded response -* `crl` - creates a CRL from the provided profile and signs it using a signing key already on a HSM, outputting a PEM CRL + +- `root`: generates a signing key on HSM and creates a self-signed root certificate that uses the generated key, outputting a PEM public key, and a PEM certificate. After generating such a root for public trust purposes, it should be submitted to [as many root programs as is possible/practical](https://github.com/daknob/root-programs). +- `intermediate`: creates a intermediate certificate and signs it using a signing key already on a HSM, outputting a PEM certificate +- `cross-csr`: creates a CSR for signing by a third party, outputting a PEM CSR. +- `cross-certificate`: issues a certificate for one root, signed by another root. This is distinct from an intermediate because there is no path length constraint and there are no EKUs. +- `key`: generates a signing key on HSM, outputting a PEM public key +- `crl`: creates a CRL with the IDP extension and `onlyContainsCACerts = true` from the provided profile and signs it using a signing key already on a HSM, outputting a PEM CRL These modes are set in the `ceremony-type` field of the configuration file. @@ -29,24 +27,30 @@ This tool always generates key pairs such that the public and private key are bo - `ceremony-type`: string describing the ceremony type, `root`. - `pkcs11`: object containing PKCS#11 related fields. + | Field | Description | | --- | --- | | `module` | Path to the PKCS#11 module to use to communicate with a HSM. | | `pin` | Specifies the login PIN, should only be provided if the HSM device requires one to interact with the slot. | | `store-key-in-slot` | Specifies which HSM object slot the generated signing key should be stored in. | | `store-key-with-label` | Specifies the HSM object label for the generated signing key. Both public and private key objects are stored with this label. | + - `key`: object containing key generation related fields. + | Field | Description | | --- | --- | | `type` | Specifies the type of key to be generated, either `rsa` or `ecdsa`. If `rsa` the generated key will have an exponent of 65537 and a modulus length specified by `rsa-mod-length`. If `ecdsa` the curve is specified by `ecdsa-curve`. | - | `ecdsa-curve` | Specifies the ECDSA curve to use when generating key, either `P-224`, `P-256`, `P-384`, or `P-521`. | - | `rsa-mod-length` | Specifies the length of the RSA modulus, either `2048` or `4096`. + | `ecdsa-curve` | Specifies the ECDSA curve to use when generating key, either `P-256`, `P-384`, or `P-521`. | + | `rsa-mod-length` | Specifies the length of the RSA modulus, either `2048` or `4096`. | + - `outputs`: object containing paths to write outputs. + | Field | Description | | --- | --- | | `public-key-path` | Path to store generated PEM public key. | | `certificate-path` | Path to store signed PEM certificate. | -- `certificate-profile`: object containing profile for certificate to generate. Fields are documented [below](#Certificate-profile-format). + +- `certificate-profile`: object containing profile for certificate to generate. Fields are documented [below](#certificate-profile-format). Example: @@ -76,26 +80,32 @@ certificate-profile: This config generates a ECDSA P-384 key in the HSM with the object label `root signing key` and uses this key to sign a self-signed certificate. The public key for the key generated is written to `/home/user/root-signing-pub.pem` and the certificate is written to `/home/user/root-cert.pem`. -### Intermediate or Cross-Certificate ceremony +### Intermediate ceremony -- `ceremony-type`: string describing the ceremony type, `intermediate` or `cross-certificate`. +- `ceremony-type`: string describing the ceremony type, `intermediate`. - `pkcs11`: object containing PKCS#11 related fields. + | Field | Description | | --- | --- | | `module` | Path to the PKCS#11 module to use to communicate with a HSM. | | `pin` | Specifies the login PIN, should only be provided if the HSM device requires one to interact with the slot. | | `signing-key-slot` | Specifies which HSM object slot the signing key is in. | | `signing-key-label` | Specifies the HSM object label for the signing keypair's public key. | + - `inputs`: object containing paths for inputs + | Field | Description | | --- | --- | - | `public-key-path` | Path to PEM subject public key for certificate. | | `issuer-certificate-path` | Path to PEM issuer certificate. | + | `public-key-path` | Path to PEM subject public key for certificate. | + - `outputs`: object containing paths to write outputs. + | Field | Description | | --- | --- | | `certificate-path` | Path to store signed PEM certificate. | -- `certificate-profile`: object containing profile for certificate to generate. Fields are documented [below](#Certificate-profile-format). + +- `certificate-profile`: object containing profile for certificate to generate. Fields are documented [below](#certificate-profile-format). Example: @@ -106,8 +116,8 @@ pkcs11: signing-key-slot: 0 signing-key-label: root signing key inputs: - public-key-path: /home/user/intermediate-signing-pub.pem issuer-certificate-path: /home/user/root-cert.pem + public-key-path: /home/user/intermediate-signing-pub.pem outputs: certificate-path: /home/user/intermediate-cert.pem certificate-profile: @@ -117,13 +127,11 @@ certificate-profile: country: US not-before: 2020-01-01 12:00:00 not-after: 2040-01-01 12:00:00 - ocsp-url: http://good-guys.com/ocsp crl-url: http://good-guys.com/crl issuer-url: http://good-guys.com/root policies: - oid: 1.2.3 - oid: 4.5.6 - cps-uri: "http://example.com/cps" key-usages: - Digital Signature - Cert Sign @@ -132,161 +140,138 @@ certificate-profile: This config generates an intermediate certificate signed by a key in the HSM, identified by the object label `root signing key` and the object ID `ffff`. The subject key used is taken from `/home/user/intermediate-signing-pub.pem` and the issuer is `/home/user/root-cert.pem`, the resulting certificate is written to `/home/user/intermediate-cert.pem`. -Note: Intermediate certificates always include the extended key usages id-kp-serverAuth as required by 7.1.2.2.g of the CABF Baseline Requirements. Since we also include id-kp-clientAuth in end-entity certificates in boulder we also include it in intermediates, if this changes we may remove this inclusion. +Note: Intermediate certificates always include the extended key usages id-kp-serverAuth as required by 7.1.2.2.g of the CABF Baseline Requirements. -### Cross-CSR ceremony +### Cross-Certificate ceremony -- `ceremony-type`: string describing the ceremony type, `cross-csr`. +- `ceremony-type`: string describing the ceremony type, `cross-certificate`. - `pkcs11`: object containing PKCS#11 related fields. - | Field | Description | - | --- | --- | - | `module` | Path to the PKCS#11 module to use to communicate with a HSM. | - | `pin` | Specifies the login PIN, should only be provided if the HSM device requires one to interact with the slot. | - | `signing-key-slot` | Specifies which HSM object slot the signing key is in. | - | `signing-key-label` | Specifies the HSM object label for the signing keypair's public key. | -- `inputs`: object containing paths for inputs - | Field | Description | - | --- | --- | - | `public-key-path` | Path to PEM subject public key for certificate. | -- `outputs`: object containing paths to write outputs. - | Field | Description | - | --- | --- | - | `csr-path` | Path to store PEM CSR for cross-signing, optional. | -- `certificate-profile`: object containing profile for certificate to generate. Fields are documented [below](#Certificate-profile-format). Should only include Subject related fields `common-name`, `organization`, `country`. -Example: - -```yaml -ceremony-type: cross-csr -pkcs11: - module: /usr/lib/opensc-pkcs11.so - signing-key-slot: 0 - signing-key-label: intermediate signing key -inputs: - public-key-path: /home/user/intermediate-signing-pub.pem -outputs: - csr-path: /home/user/csr.pem -certificate-profile: - common-name: CA root - organization: good guys - country: US -``` - -This config generates a CSR signed by a key in the HSM, identified by the object label `intermediate signing key`, and writes it to `/home/user/csr.pem`. - -### OCSP Signing Certificate ceremony - -- `ceremony-type`: string describing the ceremony type, `ocsp-signer`. -- `pkcs11`: object containing PKCS#11 related fields. | Field | Description | | --- | --- | | `module` | Path to the PKCS#11 module to use to communicate with a HSM. | | `pin` | Specifies the login PIN, should only be provided if the HSM device requires one to interact with the slot. | | `signing-key-slot` | Specifies which HSM object slot the signing key is in. | | `signing-key-label` | Specifies the HSM object label for the signing keypair's public key. | + - `inputs`: object containing paths for inputs + | Field | Description | | --- | --- | - | `public-key-path` | Path to PEM subject public key for certificate. | | `issuer-certificate-path` | Path to PEM issuer certificate. | + | `public-key-path` | Path to PEM subject public key for certificate. | + | `certificate-to-cross-sign-path` | Path to PEM self-signed certificate that this ceremony is a cross-sign of. | + - `outputs`: object containing paths to write outputs. + | Field | Description | | --- | --- | | `certificate-path` | Path to store signed PEM certificate. | -- `certificate-profile`: object containing profile for certificate to generate. Fields are documented [below](#Certificate-profile-format). The key-usages, ocsp-url, and crl-url fields must not be set. -When generating an OCSP signing certificate the key usages field will be set to just Digital Signature and an EKU extension will be included with the id-kp-OCSPSigning usage. Additionally an id-pkix-ocsp-nocheck extension will be included in the certificate. +- `certificate-profile`: object containing profile for certificate to generate. Fields are documented [below](#certificate-profile-format). Example: ```yaml -ceremony-type: ocsp-signer +ceremony-type: cross-certificate pkcs11: module: /usr/lib/opensc-pkcs11.so signing-key-slot: 0 - signing-key-label: intermediate signing key + signing-key-label: root signing key inputs: - public-key-path: /home/user/ocsp-signer-signing-pub.pem - issuer-certificate-path: /home/user/intermediate-cert.pem + issuer-certificate-path: /home/user/root-cert.pem + public-key-path: /home/user/root-signing-pub-2.pem + certificate-to-cross-sign-path: /home/user/root-cert-2.pem outputs: - certificate-path: /home/user/ocsp-signer-cert.pem + certificate-path: /home/user/root-cert-2-cross.pem certificate-profile: signature-algorithm: ECDSAWithSHA384 - common-name: CA OCSP signer + common-name: CA root 2 organization: good guys country: US not-before: 2020-01-01 12:00:00 not-after: 2040-01-01 12:00:00 + crl-url: http://good-guys.com/crl issuer-url: http://good-guys.com/root + policies: + - oid: 1.2.3 + - oid: 4.5.6 + key-usages: + - Digital Signature + - Cert Sign + - CRL Sign ``` -This config generates a delegated OCSP signing certificate signed by a key in the HSM, identified by the object label `intermediate signing key` and the object ID `ffff`. The subject key used is taken from `/home/user/ocsp-signer-signing-pub.pem` and the issuer is `/home/user/intermediate-cert.pem`, the resulting certificate is written to `/home/user/ocsp-signer-cert.pem`. +This config generates a cross-sign of the already-created "CA root 2", issued from the similarly-already-created "CA root". The subject key used is taken from `/home/user/root-signing-pub-2.pem`. The EKUs and Subject Key Identifier are taken from `/home/user/root-cert-2-cross.pem`. The issuer is `/home/user/root-cert.pem`, and the Issuer and Authority Key Identifier fields are taken from that cert. The resulting certificate is written to `/home/user/root-cert-2-cross.pem`. -### CRL Signing Certificate ceremony +### Cross-CSR ceremony -- `ceremony-type`: string describing the ceremony type, `crl-signer`. +- `ceremony-type`: string describing the ceremony type, `cross-csr`. - `pkcs11`: object containing PKCS#11 related fields. + | Field | Description | | --- | --- | | `module` | Path to the PKCS#11 module to use to communicate with a HSM. | | `pin` | Specifies the login PIN, should only be provided if the HSM device requires one to interact with the slot. | | `signing-key-slot` | Specifies which HSM object slot the signing key is in. | | `signing-key-label` | Specifies the HSM object label for the signing keypair's public key. | + - `inputs`: object containing paths for inputs + | Field | Description | | --- | --- | | `public-key-path` | Path to PEM subject public key for certificate. | - | `issuer-certificate-path` | Path to PEM issuer certificate. | + - `outputs`: object containing paths to write outputs. + | Field | Description | | --- | --- | - | `certificate-path` | Path to store signed PEM certificate. | -- `certificate-profile`: object containing profile for certificate to generate. Fields are documented [below](#Certificate-profile-format). The key-usages, ocsp-url, and crl-url fields must not be set. + | `csr-path` | Path to store PEM CSR for cross-signing, optional. | -When generating a CRL signing certificate the key usages field will be set to just CRL Sign. +- `certificate-profile`: object containing profile for certificate to generate. Fields are documented [below](#certificate-profile-format). Should only include Subject related fields `common-name`, `organization`, `country`. Example: ```yaml -ceremony-type: crl-signer +ceremony-type: cross-csr pkcs11: module: /usr/lib/opensc-pkcs11.so signing-key-slot: 0 signing-key-label: intermediate signing key inputs: - public-key-path: /home/user/crl-signer-signing-pub.pem - issuer-certificate-path: /home/user/intermediate-cert.pem + public-key-path: /home/user/intermediate-signing-pub.pem outputs: - certificate-path: /home/user/crl-signer-cert.pem + csr-path: /home/user/csr.pem certificate-profile: - signature-algorithm: ECDSAWithSHA384 - common-name: CA CRL signer + common-name: CA root organization: good guys country: US - not-before: 2020-01-01 12:00:00 - not-after: 2040-01-01 12:00:00 - issuer-url: http://good-guys.com/root ``` -This config generates a delegated CRL signing certificate signed by a key in the HSM, identified by the object label `intermediate signing key` and the object ID `ffff`. The subject key used is taken from `/home/user/crl-signer-signing-pub.pem` and the issuer is `/home/user/intermediate-cert.pem`, the resulting certificate is written to `/home/user/crl-signer-cert.pem`. +This config generates a CSR signed by a key in the HSM, identified by the object label `intermediate signing key`, and writes it to `/home/user/csr.pem`. ### Key ceremony - `ceremony-type`: string describing the ceremony type, `key`. - `pkcs11`: object containing PKCS#11 related fields. + | Field | Description | | --- | --- | | `module` | Path to the PKCS#11 module to use to communicate with a HSM. | | `pin` | Specifies the login PIN, should only be provided if the HSM device requires one to interact with the slot. | | `store-key-in-slot` | Specifies which HSM object slot the generated signing key should be stored in. | | `store-key-with-label` | Specifies the HSM object label for the generated signing key. Both public and private key objects are stored with this label. | + - `key`: object containing key generation related fields. + | Field | Description | | --- | --- | | `type` | Specifies the type of key to be generated, either `rsa` or `ecdsa`. If `rsa` the generated key will have an exponent of 65537 and a modulus length specified by `rsa-mod-length`. If `ecdsa` the curve is specified by `ecdsa-curve`. | - | `ecdsa-curve` | Specifies the ECDSA curve to use when generating key, either `P-224`, `P-256`, `P-384`, or `P-521`. | - | `rsa-mod-length` | Specifies the length of the RSA modulus, either `2048` or `4096`. + | `ecdsa-curve` | Specifies the ECDSA curve to use when generating key, either `P-256`, `P-384`, or `P-521`. | + | `rsa-mod-length` | Specifies the length of the RSA modulus, either `2048` or `4096`. | + - `outputs`: object containing paths to write outputs. + | Field | Description | | --- | --- | | `public-key-path` | Path to store generated PEM public key. | @@ -308,73 +293,32 @@ outputs: This config generates an ECDSA P-384 key in the HSM with the object label `intermediate signing key`. The public key is written to `/home/user/intermediate-signing-pub.pem`. -### OCSP Response ceremony - -- `ceremony-type`: string describing the ceremony type, `ocsp-response`. -- `pkcs11`: object containing PKCS#11 related fields. - | Field | Description | - | --- | --- | - | `module` | Path to the PKCS#11 module to use to communicate with a HSM. | - | `pin` | Specifies the login PIN, should only be provided if the HSM device requires one to interact with the slot. | - | `signing-key-slot` | Specifies which HSM object slot the signing key is in. | - | `signing-key-label` | Specifies the HSM object label for the signing keypair's public key. | -- `inputs`: object containing paths for inputs - | Field | Description | - | --- | --- | - | `certificate-path` | Path to PEM certificate to create a response for. | - | `issuer-certificate-path` | Path to PEM issuer certificate. | - | `delegated-issuer-certificate-path` | Path to PEM delegated issuer certificate, if one is being used. | -- `outputs`: object containing paths to write outputs. - | Field | Description | - | --- | --- | - | `response-path` | Path to store signed base64 encoded response. | -- `ocsp-profile`: object containing profile for the OCSP response. - | Field | Description | - | --- | --- | - | `this-update` | Specifies the OCSP response thisUpdate date, in the format `2006-01-02 15:04:05`. The time will be interpreted as UTC. | - | `next-update` | Specifies the OCSP response nextUpdate date, in the format `2006-01-02 15:04:05`. The time will be interpreted as UTC. | - | `status` | Specifies the OCSP response status, either `good` or `revoked`. | - -Example: - -```yaml -ceremony-type: ocsp-response -pkcs11: - module: /usr/lib/opensc-pkcs11.so - signing-key-slot: 0 - signing-key-label: root signing key -inputs: - certificate-path: /home/user/certificate.pem - issuer-certificate-path: /home/user/root-cert.pem -outputs: - response-path: /home/user/ocsp-resp.b64 -ocsp-profile: - this-update: 2020-01-01 12:00:00 - next-update: 2021-01-01 12:00:00 - status: good -``` - -This config generates a OCSP response signed by a key in the HSM, identified by the object label `root signing key` and object ID `ffff`. The response will be for the certificate in `/home/user/certificate.pem`, and will be written to `/home/user/ocsp-resp.b64`. - ### CRL ceremony - `ceremony-type`: string describing the ceremony type, `crl`. - `pkcs11`: object containing PKCS#11 related fields. + | Field | Description | | --- | --- | | `module` | Path to the PKCS#11 module to use to communicate with a HSM. | | `pin` | Specifies the login PIN, should only be provided if the HSM device requires one to interact with the slot. | | `signing-key-slot` | Specifies which HSM object slot the signing key is in. | | `signing-key-label` | Specifies the HSM object label for the signing keypair's public key. | + - `inputs`: object containing paths for inputs + | Field | Description | | --- | --- | | `issuer-certificate-path` | Path to PEM issuer certificate. | + - `outputs`: object containing paths to write outputs. + | Field | Description | | --- | --- | | `crl-path` | Path to store signed PEM CRL. | + - `crl-profile`: object containing profile for the CRL. + | Field | Description | | --- | --- | | `this-update` | Specifies the CRL thisUpdate date, in the format `2006-01-02 15:04:05`. The time will be interpreted as UTC. | @@ -403,7 +347,7 @@ crl-profile: revocation-date: 2019-12-31 12:00:00 ``` -This config generates a CRL signed by a key in the HSM, identified by the object label `root signing key` and object ID `ffff`. The CRL will have the number `80` and will contain revocation information for the certificate `/home/user/revoked-cert.pem` +This config generates a CRL that must only contain subordinate CA certificates signed by a key in the HSM, identified by the object label `root signing key` and object ID `ffff`. The CRL will have the number `80` and will contain revocation information for the certificate `/home/user/revoked-cert.pem`. Each of the revoked certificates provided are checked to ensure they have the `IsCA` flag set to `true`. ### Certificate profile format @@ -417,8 +361,7 @@ The certificate profile defines a restricted set of fields that are used to gene | `country` | Specifies the subject country | | `not-before` | Specifies the certificate notBefore date, in the format `2006-01-02 15:04:05`. The time will be interpreted as UTC. | | `not-after` | Specifies the certificate notAfter date, in the format `2006-01-02 15:04:05`. The time will be interpreted as UTC. | -| `ocsp-url` | Specifies the AIA OCSP responder URL | | `crl-url` | Specifies the cRLDistributionPoints URL | | `issuer-url` | Specifies the AIA caIssuer URL | -| `policies` | Specifies contents of a certificatePolicies extension. Should contain a list of policies with the fields `oid`, indicating the policy OID, and a `cps-uri` field, containing the CPS URI to use, if the policy should contain a id-qt-cps qualifier. Only single CPS values are supported. | +| `policies` | Specifies contents of a certificatePolicies extension. Should contain a list of policies with the field `oid`, indicating the policy OID. | | `key-usages` | Specifies list of key usage bits should be set, list can contain `Digital Signature`, `CRL Sign`, and `Cert Sign` | diff --git a/cmd/ceremony/cert.go b/cmd/ceremony/cert.go index d244c7d1088..f2dfd734f3a 100644 --- a/cmd/ceremony/cert.go +++ b/cmd/ceremony/cert.go @@ -1,8 +1,8 @@ -package notmain +package main import ( "crypto" - "crypto/sha1" + "crypto/sha256" "crypto/x509" "crypto/x509/pkix" "encoding/asn1" @@ -13,13 +13,10 @@ import ( "strconv" "strings" "time" - - "github.com/letsencrypt/boulder/policyasn1" ) type policyInfoConfig struct { - OID string - CPSURI string `yaml:"cps-uri"` + OID string } // certProfile contains the information required to generate a certificate @@ -44,9 +41,6 @@ type certProfile struct { // always be UTC. NotAfter string `yaml:"not-after"` - // OCSPURL should contain the URL at which a OCSP responder that - // can respond to OCSP requests for this certificate operates - OCSPURL string `yaml:"ocsp-url"` // CRLURL should contain the URL at which CRLs for this certificate // can be found CRLURL string `yaml:"crl-url"` @@ -55,10 +49,9 @@ type certProfile struct { // certificate IssuerURL string `yaml:"issuer-url"` - // PolicyOIDs should contain any OIDs to be inserted in a certificate - // policies extension. If the CPSURI field of a policyInfoConfig element - // is set it will result in a PolicyInformation structure containing a - // single id-qt-cps type qualifier indicating the CPS URI. + // Policies should contain any OIDs to be inserted in a certificate + // policies extension. It should be empty for Root certs, and contain the + // BRs "domain-validated" Reserved Policy Identifier for Intermediates. Policies []policyInfoConfig `yaml:"policies"` // KeyUsages should contain the set of key usage bits to set @@ -80,8 +73,6 @@ type certType int const ( rootCert certType = iota intermediateCert - ocspCert - crlCert crossCert requestCert ) @@ -106,9 +97,6 @@ func (profile *certProfile) verifyProfile(ct certType) error { if profile.SignatureAlgorithm != "" { return errors.New("signature-algorithm cannot be set for a CSR") } - if profile.OCSPURL != "" { - return errors.New("ocsp-url cannot be set for a CSR") - } if profile.CRLURL != "" { return errors.New("crl-url cannot be set for a CSR") } @@ -142,36 +130,40 @@ func (profile *certProfile) verifyProfile(ct certType) error { return errors.New("country is required") } - if ct == intermediateCert { - if profile.CRLURL == "" { - return errors.New("crl-url is required for intermediates") - } - if profile.IssuerURL == "" { - return errors.New("issuer-url is required for intermediates") + if ct == rootCert { + if len(profile.Policies) != 0 { + return errors.New("policies should not be set on root certs") } } - if ct == ocspCert || ct == crlCert { - if len(profile.KeyUsages) != 0 { - return errors.New("key-usages cannot be set for a delegated signer") + if ct == intermediateCert || ct == crossCert { + if profile.CRLURL == "" { + return errors.New("crl-url is required for subordinate CAs") } - if profile.CRLURL != "" { - return errors.New("crl-url cannot be set for a delegated signer") + if profile.IssuerURL == "" { + return errors.New("issuer-url is required for subordinate CAs") } - if profile.OCSPURL != "" { - return errors.New("ocsp-url cannot be set for a delegated signer") + + // BR 7.1.2.10.5 CA Certificate Certificate Policies + // OID 2.23.140.1.2.1 is CABF BRs Domain Validated + if len(profile.Policies) != 1 || profile.Policies[0].OID != "2.23.140.1.2.1" { + return errors.New("policy should be exactly BRs domain-validated for subordinate CAs") } } + return nil } func parseOID(oidStr string) (asn1.ObjectIdentifier, error) { var oid asn1.ObjectIdentifier - for _, a := range strings.Split(oidStr, ".") { + for a := range strings.SplitSeq(oidStr, ".") { i, err := strconv.Atoi(a) if err != nil { return nil, err } + if i <= 0 { + return nil, errors.New("OID components must be >= 1") + } oid = append(oid, i) } return oid, nil @@ -183,34 +175,6 @@ var stringToKeyUsage = map[string]x509.KeyUsage{ "Cert Sign": x509.KeyUsageCertSign, } -var ( - oidExtensionCertificatePolicies = asn1.ObjectIdentifier{2, 5, 29, 32} - - oidOCSPNoCheck = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 5} -) - -func buildPolicies(policies []policyInfoConfig) (pkix.Extension, error) { - policyExt := pkix.Extension{Id: oidExtensionCertificatePolicies} - var policyInfo []policyasn1.PolicyInformation - for _, p := range policies { - oid, err := parseOID(p.OID) - if err != nil { - return pkix.Extension{}, err - } - pi := policyasn1.PolicyInformation{Policy: oid} - if p.CPSURI != "" { - pi.Qualifiers = []policyasn1.PolicyQualifier{{OID: policyasn1.CPSQualifierOID, Value: p.CPSURI}} - } - policyInfo = append(policyInfo, pi) - } - v, err := asn1.Marshal(policyInfo) - if err != nil { - return pkix.Extension{}, err - } - policyExt.Value = v - return policyExt, nil -} - func generateSKID(pk []byte) ([]byte, error) { var pkixPublicKey struct { Algo pkix.AlgorithmIdentifier @@ -219,16 +183,22 @@ func generateSKID(pk []byte) ([]byte, error) { if _, err := asn1.Unmarshal(pk, &pkixPublicKey); err != nil { return nil, err } - skid := sha1.Sum(pkixPublicKey.BitString.Bytes) - return skid[:], nil + + // RFC 7093 Section 2 Additional Methods for Generating Key Identifiers: The + // keyIdentifier [may be] composed of the leftmost 160-bits of the SHA-256 + // hash of the value of the BIT STRING subjectPublicKey (excluding the tag, + // length, and number of unused bits). + skid := sha256.Sum256(pkixPublicKey.BitString.Bytes) + return skid[0:20:20], nil } // makeTemplate generates the certificate template for use in x509.CreateCertificate -func makeTemplate(randReader io.Reader, profile *certProfile, pubKey []byte, ct certType) (*x509.Certificate, error) { - var ocspServer []string - if profile.OCSPURL != "" { - ocspServer = []string{profile.OCSPURL} +func makeTemplate(randReader io.Reader, profile *certProfile, pubKey []byte, tbcs *x509.Certificate, ct certType) (*x509.Certificate, error) { + // Handle "unrestricted" vs "restricted" subordinate CA profile specifics. + if ct == crossCert && tbcs == nil { + return nil, fmt.Errorf("toBeCrossSigned cert field was nil, but was required to gather EKUs for the lint cert") } + var crlDistributionPoints []string if profile.CRLURL != "" { crlDistributionPoints = []string{profile.CRLURL} @@ -257,11 +227,6 @@ func makeTemplate(randReader io.Reader, profile *certProfile, pubKey []byte, ct } ku |= kuBit } - if ct == ocspCert { - ku = x509.KeyUsageDigitalSignature - } else if ct == crlCert { - ku = x509.KeyUsageCRLSign - } if ku == 0 { return nil, errors.New("at least one key usage must be set") } @@ -271,7 +236,6 @@ func makeTemplate(randReader io.Reader, profile *certProfile, pubKey []byte, ct BasicConstraintsValid: true, IsCA: true, Subject: profile.Subject(), - OCSPServer: ocspServer, CRLDistributionPoints: crlDistributionPoints, IssuingCertificateURL: issuingCertificateURL, KeyUsage: ku, @@ -284,44 +248,55 @@ func makeTemplate(randReader io.Reader, profile *certProfile, pubKey []byte, ct return nil, fmt.Errorf("unsupported signature algorithm %q", profile.SignatureAlgorithm) } cert.SignatureAlgorithm = sigAlg - notBefore, err := time.Parse(configDateLayout, profile.NotBefore) + notBefore, err := time.Parse(time.DateTime, profile.NotBefore) if err != nil { return nil, err } - cert.NotBefore = notBefore - notAfter, err := time.Parse(configDateLayout, profile.NotAfter) + notAfter, err := time.Parse(time.DateTime, profile.NotAfter) if err != nil { return nil, err } + validity := notAfter.Add(time.Second).Sub(notBefore) + if ct == rootCert && validity >= 9132*24*time.Hour { + // The value 9132 comes directly from the BRs, where it is described + // as "approximately 25 years". It's equal to 365 * 25 + 7, to allow + // for some leap years. + return nil, fmt.Errorf("root cert validity too large: %s >= 25 years", validity) + } else if (ct == intermediateCert || ct == crossCert) && validity >= 8*365*24*time.Hour { + // Our CP/CPS states "at most 8 years", so we calculate that number + // in the most conservative way (i.e. not accounting for leap years) + // to give ourselves a buffer. + return nil, fmt.Errorf("subordinate CA cert validity too large: %s >= 8 years", validity) + } + cert.NotBefore = notBefore cert.NotAfter = notAfter } switch ct { - // rootCert and crossCert do not get EKU or MaxPathZero - case ocspCert: - cert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageOCSPSigning} - // ASN.1 NULL is 0x05, 0x00 - ocspNoCheckExt := pkix.Extension{Id: oidOCSPNoCheck, Value: []byte{5, 0}} - cert.ExtraExtensions = append(cert.ExtraExtensions, ocspNoCheckExt) - cert.IsCA = false - case crlCert: - cert.IsCA = false + // rootCert does not get EKU or MaxPathZero. + // BR 7.1.2.1.2 Root CA Extensions + // Extension Presence Critical Description + // extKeyUsage MUST NOT N - case requestCert, intermediateCert: - // id-kp-serverAuth and id-kp-clientAuth are included in intermediate - // certificates in order to technically constrain them. id-kp-serverAuth - // is required by 7.1.2.2.g of the CABF Baseline Requirements, but - // id-kp-clientAuth isn't. We include id-kp-clientAuth as we also include - // it in our end-entity certificates. - cert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth} + // id-kp-serverAuth is included in intermediate certificates, as required by + // Section 7.1.2.10.6 of the CA/BF Baseline Requirements. + // id-kp-clientAuth is excluded, as required by section 3.2.1 of the Chrome + // Root Program Requirements. + cert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} cert.MaxPathLenZero = true + case crossCert: + cert.ExtKeyUsage = tbcs.ExtKeyUsage + cert.MaxPathLenZero = tbcs.MaxPathLenZero + // The SKID needs to match the previous SKID, no matter how it was computed. + cert.SubjectKeyId = tbcs.SubjectKeyId } - if len(profile.Policies) > 0 { - policyExt, err := buildPolicies(profile.Policies) + for _, policyConfig := range profile.Policies { + x509OID, err := x509.ParseOID(policyConfig.OID) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to parse %s as OID: %w", policyConfig.OID, err) } - cert.ExtraExtensions = append(cert.ExtraExtensions, policyExt) + cert.Policies = append(cert.Policies, x509OID) } return cert, nil @@ -336,7 +311,7 @@ func makeTemplate(randReader io.Reader, profile *certProfile, pubKey []byte, ct type failReader struct{} func (fr *failReader) Read([]byte) (int, error) { - return 0, errors.New("Empty reader used by x509.CreateCertificate") + return 0, errors.New("empty reader used by x509.CreateCertificate") } func generateCSR(profile *certProfile, signer crypto.Signer) ([]byte, error) { diff --git a/cmd/ceremony/cert_test.go b/cmd/ceremony/cert_test.go index bd124f7d4a6..2cdf78f8c67 100644 --- a/cmd/ceremony/cert_test.go +++ b/cmd/ceremony/cert_test.go @@ -1,20 +1,24 @@ -package notmain +package main import ( - "bytes" + "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" - "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/asn1" "encoding/hex" "errors" "fmt" + "io/fs" + "math/big" "testing" + "time" + + "github.com/miekg/pkcs11" "github.com/letsencrypt/boulder/pkcs11helpers" "github.com/letsencrypt/boulder/test" - "github.com/miekg/pkcs11" ) // samplePubkey returns a slice of bytes containing an encoded @@ -38,6 +42,8 @@ func TestParseOID(t *testing.T) { test.AssertError(t, err, "parseOID accepted an empty OID") _, err = parseOID("a.b.c") test.AssertError(t, err, "parseOID accepted an OID containing non-ints") + _, err = parseOID("1.0.2") + test.AssertError(t, err, "parseOID accepted an OID containing zero") oid, err := parseOID("1.2.3") test.AssertNotError(t, err, "parseOID failed with a valid OID") test.Assert(t, oid.Equal(asn1.ObjectIdentifier{1, 2, 3}), "parseOID returned incorrect OID") @@ -57,7 +63,7 @@ func TestMakeSubject(t *testing.T) { test.AssertDeepEquals(t, profile.Subject(), expectedSubject) } -func TestMakeTemplate(t *testing.T) { +func TestMakeTemplateRoot(t *testing.T) { s, ctx := pkcs11helpers.NewSessionWithMock() profile := &certProfile{} randReader := newRandReader(s) @@ -65,74 +71,71 @@ func TestMakeTemplate(t *testing.T) { ctx.GenerateRandomFunc = realRand profile.NotBefore = "1234" - _, err := makeTemplate(randReader, profile, pubKey, rootCert) + _, err := makeTemplate(randReader, profile, pubKey, nil, rootCert) test.AssertError(t, err, "makeTemplate didn't fail with invalid not before") profile.NotBefore = "2018-05-18 11:31:00" profile.NotAfter = "1234" - _, err = makeTemplate(randReader, profile, pubKey, rootCert) + _, err = makeTemplate(randReader, profile, pubKey, nil, rootCert) test.AssertError(t, err, "makeTemplate didn't fail with invalid not after") profile.NotAfter = "2018-05-18 11:31:00" profile.SignatureAlgorithm = "nope" - _, err = makeTemplate(randReader, profile, pubKey, rootCert) + _, err = makeTemplate(randReader, profile, pubKey, nil, rootCert) test.AssertError(t, err, "makeTemplate didn't fail with invalid signature algorithm") profile.SignatureAlgorithm = "SHA256WithRSA" ctx.GenerateRandomFunc = func(pkcs11.SessionHandle, int) ([]byte, error) { return nil, errors.New("bad") } - _, err = makeTemplate(randReader, profile, pubKey, rootCert) + _, err = makeTemplate(randReader, profile, pubKey, nil, rootCert) test.AssertError(t, err, "makeTemplate didn't fail when GenerateRandom failed") ctx.GenerateRandomFunc = realRand - _, err = makeTemplate(randReader, profile, pubKey, rootCert) + _, err = makeTemplate(randReader, profile, pubKey, nil, rootCert) test.AssertError(t, err, "makeTemplate didn't fail with empty key usages") profile.KeyUsages = []string{"asd"} - _, err = makeTemplate(randReader, profile, pubKey, rootCert) + _, err = makeTemplate(randReader, profile, pubKey, nil, rootCert) test.AssertError(t, err, "makeTemplate didn't fail with invalid key usages") profile.KeyUsages = []string{"Digital Signature", "CRL Sign"} profile.Policies = []policyInfoConfig{{}} - _, err = makeTemplate(randReader, profile, pubKey, rootCert) - test.AssertError(t, err, "makeTemplate didn't fail with invalid policy OID") + _, err = makeTemplate(randReader, profile, pubKey, nil, rootCert) + test.AssertError(t, err, "makeTemplate didn't fail with invalid (empty) policy OID") - profile.Policies = []policyInfoConfig{{OID: "1.2.3"}, {OID: "1.2.3.4", CPSURI: "hello"}} + profile.Policies = []policyInfoConfig{{OID: "1.2.3"}, {OID: "1.2.3.4"}} profile.CommonName = "common name" profile.Organization = "organization" profile.Country = "country" - profile.OCSPURL = "ocsp" profile.CRLURL = "crl" profile.IssuerURL = "issuer" - cert, err := makeTemplate(randReader, profile, pubKey, rootCert) + cert, err := makeTemplate(randReader, profile, pubKey, nil, rootCert) test.AssertNotError(t, err, "makeTemplate failed when everything worked as expected") test.AssertEquals(t, cert.Subject.CommonName, profile.CommonName) test.AssertEquals(t, len(cert.Subject.Organization), 1) test.AssertEquals(t, cert.Subject.Organization[0], profile.Organization) test.AssertEquals(t, len(cert.Subject.Country), 1) test.AssertEquals(t, cert.Subject.Country[0], profile.Country) - test.AssertEquals(t, len(cert.OCSPServer), 1) - test.AssertEquals(t, cert.OCSPServer[0], profile.OCSPURL) test.AssertEquals(t, len(cert.CRLDistributionPoints), 1) test.AssertEquals(t, cert.CRLDistributionPoints[0], profile.CRLURL) test.AssertEquals(t, len(cert.IssuingCertificateURL), 1) test.AssertEquals(t, cert.IssuingCertificateURL[0], profile.IssuerURL) test.AssertEquals(t, cert.KeyUsage, x509.KeyUsageDigitalSignature|x509.KeyUsageCRLSign) - test.AssertEquals(t, len(cert.ExtraExtensions), 1) + test.AssertEquals(t, len(cert.Policies), 2) test.AssertEquals(t, len(cert.ExtKeyUsage), 0) - cert, err = makeTemplate(randReader, profile, pubKey, intermediateCert) + cert, err = makeTemplate(randReader, profile, pubKey, nil, intermediateCert) test.AssertNotError(t, err, "makeTemplate failed when everything worked as expected") test.Assert(t, cert.MaxPathLenZero, "MaxPathLenZero not set in intermediate template") - test.AssertEquals(t, len(cert.ExtKeyUsage), 2) - test.AssertEquals(t, cert.ExtKeyUsage[0], x509.ExtKeyUsageClientAuth) - test.AssertEquals(t, cert.ExtKeyUsage[1], x509.ExtKeyUsageServerAuth) + test.AssertEquals(t, len(cert.ExtKeyUsage), 1) + test.AssertEquals(t, cert.ExtKeyUsage[0], x509.ExtKeyUsageServerAuth) } -func TestMakeTemplateCrossCertificate(t *testing.T) { +func TestMakeTemplateRestrictedCrossCertificate(t *testing.T) { s, ctx := pkcs11helpers.NewSessionWithMock() + ctx.GenerateRandomFunc = realRand randReader := newRandReader(s) pubKey := samplePubkey() profile := &certProfile{ @@ -141,104 +144,47 @@ func TestMakeTemplateCrossCertificate(t *testing.T) { Organization: "organization", Country: "country", KeyUsages: []string{"Digital Signature", "CRL Sign"}, - OCSPURL: "ocsp", CRLURL: "crl", IssuerURL: "issuer", - NotAfter: "2018-05-18 11:31:00", - NotBefore: "2018-05-18 11:31:00", + NotAfter: "2020-10-10 11:31:00", + NotBefore: "2020-10-10 11:31:00", } - ctx.GenerateRandomFunc = realRand + tbcsCert := x509.Certificate{ + SerialNumber: big.NewInt(666), + Subject: pkix.Name{ + Organization: []string{"While Eek Ayote"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } - cert, err := makeTemplate(randReader, profile, pubKey, crossCert) + cert, err := makeTemplate(randReader, profile, pubKey, &tbcsCert, crossCert) test.AssertNotError(t, err, "makeTemplate failed when everything worked as expected") test.Assert(t, !cert.MaxPathLenZero, "MaxPathLenZero was set in cross-sign") - test.AssertEquals(t, len(cert.ExtKeyUsage), 0) -} - -func TestMakeTemplateOCSP(t *testing.T) { - s, ctx := pkcs11helpers.NewSessionWithMock() - ctx.GenerateRandomFunc = realRand - randReader := newRandReader(s) - profile := &certProfile{ - SignatureAlgorithm: "SHA256WithRSA", - CommonName: "common name", - Organization: "organization", - Country: "country", - OCSPURL: "ocsp", - CRLURL: "crl", - IssuerURL: "issuer", - NotAfter: "2018-05-18 11:31:00", - NotBefore: "2018-05-18 11:31:00", - } - pubKey := samplePubkey() - - cert, err := makeTemplate(randReader, profile, pubKey, ocspCert) - test.AssertNotError(t, err, "makeTemplate failed") - - test.Assert(t, !cert.IsCA, "IsCA is set") - // Check KU is only KeyUsageDigitalSignature - test.AssertEquals(t, cert.KeyUsage, x509.KeyUsageDigitalSignature) - // Check there is a single EKU with id-kp-OCSPSigning test.AssertEquals(t, len(cert.ExtKeyUsage), 1) - test.AssertEquals(t, cert.ExtKeyUsage[0], x509.ExtKeyUsageOCSPSigning) - // Check ExtraExtensions contains a single id-pkix-ocsp-nocheck - hasExt := false - asnNULL := []byte{5, 0} - for _, ext := range cert.ExtraExtensions { - if ext.Id.Equal(oidOCSPNoCheck) { - if hasExt { - t.Error("template contains multiple id-pkix-ocsp-nocheck extensions") - } - hasExt = true - if !bytes.Equal(ext.Value, asnNULL) { - t.Errorf("id-pkix-ocsp-nocheck has unexpected content: want %x, got %x", asnNULL, ext.Value) - } - } - } - test.Assert(t, hasExt, "template doesn't contain id-pkix-ocsp-nocheck extensions") -} - -func TestMakeTemplateCRL(t *testing.T) { - s, ctx := pkcs11helpers.NewSessionWithMock() - ctx.GenerateRandomFunc = realRand - randReader := newRandReader(s) - profile := &certProfile{ - SignatureAlgorithm: "SHA256WithRSA", - CommonName: "common name", - Organization: "organization", - Country: "country", - OCSPURL: "ocsp", - CRLURL: "crl", - IssuerURL: "issuer", - NotAfter: "2018-05-18 11:31:00", - NotBefore: "2018-05-18 11:31:00", - } - pubKey := samplePubkey() - - cert, err := makeTemplate(randReader, profile, pubKey, crlCert) - test.AssertNotError(t, err, "makeTemplate failed") - - test.Assert(t, !cert.IsCA, "IsCA is set") - test.AssertEquals(t, cert.KeyUsage, x509.KeyUsageCRLSign) + test.AssertEquals(t, cert.ExtKeyUsage[0], x509.ExtKeyUsageServerAuth) } func TestVerifyProfile(t *testing.T) { for _, tc := range []struct { profile certProfile - certType certType + certType []certType expectedErr string }{ { profile: certProfile{}, - certType: intermediateCert, + certType: []certType{intermediateCert, crossCert}, expectedErr: "not-before is required", }, { profile: certProfile{ NotBefore: "a", }, - certType: intermediateCert, + certType: []certType{intermediateCert, crossCert}, expectedErr: "not-after is required", }, { @@ -246,7 +192,7 @@ func TestVerifyProfile(t *testing.T) { NotBefore: "a", NotAfter: "b", }, - certType: intermediateCert, + certType: []certType{intermediateCert, crossCert}, expectedErr: "signature-algorithm is required", }, { @@ -255,7 +201,7 @@ func TestVerifyProfile(t *testing.T) { NotAfter: "b", SignatureAlgorithm: "c", }, - certType: intermediateCert, + certType: []certType{intermediateCert, crossCert}, expectedErr: "common-name is required", }, { @@ -265,7 +211,7 @@ func TestVerifyProfile(t *testing.T) { SignatureAlgorithm: "c", CommonName: "d", }, - certType: intermediateCert, + certType: []certType{intermediateCert, crossCert}, expectedErr: "organization is required", }, { @@ -276,7 +222,7 @@ func TestVerifyProfile(t *testing.T) { CommonName: "d", Organization: "e", }, - certType: intermediateCert, + certType: []certType{intermediateCert, crossCert}, expectedErr: "country is required", }, { @@ -287,10 +233,9 @@ func TestVerifyProfile(t *testing.T) { CommonName: "d", Organization: "e", Country: "f", - OCSPURL: "g", }, - certType: intermediateCert, - expectedErr: "crl-url is required for intermediates", + certType: []certType{intermediateCert, crossCert}, + expectedErr: "crl-url is required for subordinate CAs", }, { profile: certProfile{ @@ -300,11 +245,10 @@ func TestVerifyProfile(t *testing.T) { CommonName: "d", Organization: "e", Country: "f", - OCSPURL: "g", CRLURL: "h", }, - certType: intermediateCert, - expectedErr: "issuer-url is required for intermediates", + certType: []certType{intermediateCert, crossCert}, + expectedErr: "issuer-url is required for subordinate CAs", }, { profile: certProfile{ @@ -314,90 +258,11 @@ func TestVerifyProfile(t *testing.T) { CommonName: "d", Organization: "e", Country: "f", + CRLURL: "h", + IssuerURL: "i", }, - certType: rootCert, - }, - { - profile: certProfile{ - NotBefore: "a", - NotAfter: "b", - SignatureAlgorithm: "c", - CommonName: "d", - Organization: "e", - Country: "f", - IssuerURL: "g", - KeyUsages: []string{"j"}, - }, - certType: ocspCert, - expectedErr: "key-usages cannot be set for a delegated signer", - }, - { - profile: certProfile{ - NotBefore: "a", - NotAfter: "b", - SignatureAlgorithm: "c", - CommonName: "d", - Organization: "e", - Country: "f", - IssuerURL: "g", - CRLURL: "i", - }, - certType: ocspCert, - expectedErr: "crl-url cannot be set for a delegated signer", - }, - { - profile: certProfile{ - NotBefore: "a", - NotAfter: "b", - SignatureAlgorithm: "c", - CommonName: "d", - Organization: "e", - Country: "f", - IssuerURL: "g", - OCSPURL: "h", - }, - certType: ocspCert, - expectedErr: "ocsp-url cannot be set for a delegated signer", - }, - { - profile: certProfile{ - NotBefore: "a", - NotAfter: "b", - SignatureAlgorithm: "c", - CommonName: "d", - Organization: "e", - Country: "f", - IssuerURL: "g", - }, - certType: ocspCert, - }, - { - profile: certProfile{ - NotBefore: "a", - NotAfter: "b", - SignatureAlgorithm: "c", - CommonName: "d", - Organization: "e", - Country: "f", - IssuerURL: "g", - KeyUsages: []string{"j"}, - }, - certType: crlCert, - expectedErr: "key-usages cannot be set for a delegated signer", - }, - { - profile: certProfile{ - NotBefore: "a", - NotAfter: "b", - SignatureAlgorithm: "c", - CommonName: "d", - Organization: "e", - Country: "f", - IssuerURL: "g", - CRLURL: "i", - }, - certType: crlCert, - expectedErr: "crl-url cannot be set for a delegated signer", + certType: []certType{intermediateCert, crossCert}, + expectedErr: "policy should be exactly BRs domain-validated for subordinate CAs", }, { profile: certProfile{ @@ -407,11 +272,12 @@ func TestVerifyProfile(t *testing.T) { CommonName: "d", Organization: "e", Country: "f", - IssuerURL: "g", - OCSPURL: "h", + CRLURL: "h", + IssuerURL: "i", + Policies: []policyInfoConfig{{OID: "1.2.3"}, {OID: "4.5.6"}}, }, - certType: crlCert, - expectedErr: "ocsp-url cannot be set for a delegated signer", + certType: []certType{intermediateCert, crossCert}, + expectedErr: "policy should be exactly BRs domain-validated for subordinate CAs", }, { profile: certProfile{ @@ -421,75 +287,68 @@ func TestVerifyProfile(t *testing.T) { CommonName: "d", Organization: "e", Country: "f", - IssuerURL: "g", }, - certType: crlCert, + certType: []certType{rootCert}, }, { profile: certProfile{ NotBefore: "a", }, - certType: requestCert, + certType: []certType{requestCert}, expectedErr: "not-before cannot be set for a CSR", }, { profile: certProfile{ NotAfter: "a", }, - certType: requestCert, + certType: []certType{requestCert}, expectedErr: "not-after cannot be set for a CSR", }, { profile: certProfile{ SignatureAlgorithm: "a", }, - certType: requestCert, + certType: []certType{requestCert}, expectedErr: "signature-algorithm cannot be set for a CSR", }, - { - profile: certProfile{ - OCSPURL: "a", - }, - certType: requestCert, - expectedErr: "ocsp-url cannot be set for a CSR", - }, { profile: certProfile{ CRLURL: "a", }, - certType: requestCert, + certType: []certType{requestCert}, expectedErr: "crl-url cannot be set for a CSR", }, { profile: certProfile{ IssuerURL: "a", }, - certType: requestCert, + certType: []certType{requestCert}, expectedErr: "issuer-url cannot be set for a CSR", }, { profile: certProfile{ - Policies: []policyInfoConfig{ - {OID: "1.2.3"}, {OID: "1.2.3.4", CPSURI: "hello"}}, + Policies: []policyInfoConfig{{OID: "1.2.3"}}, }, - certType: requestCert, + certType: []certType{requestCert}, expectedErr: "policies cannot be set for a CSR", }, { profile: certProfile{ KeyUsages: []string{"a"}, }, - certType: requestCert, + certType: []certType{requestCert}, expectedErr: "key-usages cannot be set for a CSR", }, } { - err := tc.profile.verifyProfile(tc.certType) - if err != nil { - if tc.expectedErr != err.Error() { - t.Fatalf("Expected %q, got %q", tc.expectedErr, err.Error()) + for _, ct := range tc.certType { + err := tc.profile.verifyProfile(ct) + if err != nil { + if tc.expectedErr != err.Error() { + t.Fatalf("Expected %q, got %q", tc.expectedErr, err.Error()) + } + } else if tc.expectedErr != "" { + t.Fatalf("verifyProfile didn't fail, expected %q", tc.expectedErr) } - } else if tc.expectedErr != "" { - t.Fatalf("verifyProfile didn't fail, expected %q", tc.expectedErr) } } } @@ -501,7 +360,7 @@ func TestGenerateCSR(t *testing.T) { Country: "country", } - signer, err := rsa.GenerateKey(rand.Reader, 1024) + signer, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) test.AssertNotError(t, err, "failed to generate test key") csrBytes, err := generateCSR(profile, &wrappedSigner{signer}) @@ -515,3 +374,22 @@ func TestGenerateCSR(t *testing.T) { test.AssertEquals(t, csr.Subject.String(), fmt.Sprintf("CN=%s,O=%s,C=%s", profile.CommonName, profile.Organization, profile.Country)) } + +func TestLoadCert(t *testing.T) { + _, err := loadCert("../../test/hierarchy/int-e1.cert.pem") + test.AssertNotError(t, err, "should not have errored") + + _, err = loadCert("/path/that/will/not/ever/exist/ever") + test.AssertError(t, err, "should have failed opening certificate at non-existent path") + test.AssertErrorIs(t, err, fs.ErrNotExist) + + _, err = loadCert("../../test/hierarchy/int-e1.key.pem") + test.AssertError(t, err, "should have failed when trying to parse a private key") +} + +func TestGenerateSKID(t *testing.T) { + sha256skid, err := generateSKID(samplePubkey()) + test.AssertNotError(t, err, "Error generating SKID") + test.AssertEquals(t, len(sha256skid), 20) + test.AssertEquals(t, cap(sha256skid), 20) +} diff --git a/cmd/ceremony/crl.go b/cmd/ceremony/crl.go index fec17b3fd01..cde31023dbb 100644 --- a/cmd/ceremony/crl.go +++ b/cmd/ceremony/crl.go @@ -1,21 +1,24 @@ -package notmain +package main import ( "crypto" "crypto/x509" - "crypto/x509/pkix" "encoding/pem" "errors" + "fmt" "math/big" "time" + + "github.com/letsencrypt/boulder/crl/idp" + "github.com/letsencrypt/boulder/linter" ) -func generateCRL(signer crypto.Signer, issuer *x509.Certificate, thisUpdate, nextUpdate time.Time, number int64, revokedCertificates []pkix.RevokedCertificate) ([]byte, error) { +func generateCRL(signer crypto.Signer, issuer *x509.Certificate, thisUpdate, nextUpdate time.Time, number int64, revokedCertificates []x509.RevocationListEntry, skipLints []string) ([]byte, error) { template := &x509.RevocationList{ - RevokedCertificates: revokedCertificates, - Number: big.NewInt(number), - ThisUpdate: thisUpdate, - NextUpdate: nextUpdate, + RevokedCertificateEntries: revokedCertificates, + Number: big.NewInt(number), + ThisUpdate: thisUpdate, + NextUpdate: nextUpdate, } if nextUpdate.Before(thisUpdate) { @@ -32,6 +35,17 @@ func generateCRL(signer crypto.Signer, issuer *x509.Certificate, thisUpdate, nex if nextUpdate.Sub(thisUpdate) > time.Hour*24*365 { return nil, errors.New("nextUpdate must be less than 12 months after thisUpdate") } + // Add the Issuing Distribution Point extension. + idp, err := idp.MakeCACertsExt() + if err != nil { + return nil, fmt.Errorf("creating IDP extension: %w", err) + } + template.ExtraExtensions = append(template.ExtraExtensions, *idp) + + err = linter.CheckCRL(template, issuer, signer, skipLints) + if err != nil { + return nil, fmt.Errorf("crl failed pre-issuance lint: %w", err) + } // x509.CreateRevocationList uses an io.Reader here for signing methods that require // a source of randomness. Since PKCS#11 based signing generates needed randomness diff --git a/cmd/ceremony/crl_test.go b/cmd/ceremony/crl_test.go index 9da7a7d52ba..60f951af1ea 100644 --- a/cmd/ceremony/crl_test.go +++ b/cmd/ceremony/crl_test.go @@ -1,4 +1,4 @@ -package notmain +package main import ( "crypto" @@ -18,30 +18,28 @@ import ( ) func TestGenerateCRLTimeBounds(t *testing.T) { - _, err := generateCRL(nil, nil, time.Time{}.Add(time.Hour), time.Time{}, 1, nil) + _, err := generateCRL(nil, nil, time.Now().Add(time.Hour), time.Now(), 1, nil, []string{}) test.AssertError(t, err, "generateCRL did not fail") test.AssertEquals(t, err.Error(), "thisUpdate must be before nextUpdate") _, err = generateCRL(nil, &x509.Certificate{ - NotBefore: time.Time{}.Add(time.Hour), - NotAfter: time.Time{}, - }, time.Time{}, time.Time{}, 1, nil) + NotBefore: time.Now().Add(time.Hour), + NotAfter: time.Now(), + }, time.Now(), time.Now(), 1, nil, []string{}) test.AssertError(t, err, "generateCRL did not fail") test.AssertEquals(t, err.Error(), "thisUpdate is before issuing certificate's notBefore") _, err = generateCRL(nil, &x509.Certificate{ - NotBefore: time.Time{}, - NotAfter: time.Time{}.Add(time.Hour * 2), - }, time.Time{}.Add(time.Hour), time.Time{}.Add(time.Hour*3), 1, nil) + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 2), + }, time.Now().Add(time.Hour), time.Now().Add(time.Hour*3), 1, nil, []string{}) test.AssertError(t, err, "generateCRL did not fail") test.AssertEquals(t, err.Error(), "nextUpdate is after issuing certificate's notAfter") -} -func TestGenerateCRLLength(t *testing.T) { - _, err := generateCRL(nil, &x509.Certificate{ - NotBefore: time.Time{}, - NotAfter: time.Time{}.Add(time.Hour * 24 * 366), - }, time.Time{}, time.Time{}.Add(time.Hour*24*366), 1, nil) + _, err = generateCRL(nil, &x509.Certificate{ + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * 370), + }, time.Now(), time.Now().Add(time.Hour*24*366), 1, nil, []string{}) test.AssertError(t, err, "generateCRL did not fail") test.AssertEquals(t, err.Error(), "nextUpdate must be less than 12 months after thisUpdate") } @@ -62,36 +60,79 @@ func (p wrappedSigner) Public() crypto.PublicKey { return p.k.Public() } -func TestGenerateCRL(t *testing.T) { +func TestGenerateCRLLints(t *testing.T) { k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) test.AssertNotError(t, err, "failed to generate test key") - template := &x509.Certificate{ + cert := &x509.Certificate{ Subject: pkix.Name{CommonName: "asd"}, SerialNumber: big.NewInt(7), - NotBefore: time.Time{}, - NotAfter: time.Time{}.Add(time.Hour * 3), + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + IsCA: true, KeyUsage: x509.KeyUsageCRLSign, SubjectKeyId: []byte{1, 2, 3}, } + certBytes, err := x509.CreateCertificate(rand.Reader, cert, cert, k.Public(), k) + test.AssertNotError(t, err, "failed to generate test cert") + cert, err = x509.ParseCertificate(certBytes) + test.AssertNotError(t, err, "failed to parse test cert") + + // This CRL should fail the "e_crl_next_update_invalid" lint because the + // validity interval is more than 10 days, and this lint can't tell the + // difference between end-entity and CA CRLs. + _, err = generateCRL(&wrappedSigner{k}, cert, time.Now().Add(time.Hour), time.Now().Add(100*24*time.Hour), 1, []x509.RevocationListEntry{ + { + SerialNumber: big.NewInt(12345), + RevocationTime: time.Now().Add(time.Hour), + }, + }, []string{}) + test.AssertError(t, err, "generateCRL did not fail") + test.AssertContains(t, err.Error(), "e_crl_next_update_invalid") + + // But we can tell it to ignore that lint, too. + _, err = generateCRL(&wrappedSigner{k}, cert, time.Now().Add(time.Hour), time.Now().Add(100*24*time.Hour), 1, []x509.RevocationListEntry{ + { + SerialNumber: big.NewInt(12345), + RevocationTime: time.Now().Add(time.Hour), + }, + }, []string{"e_crl_next_update_invalid"}) + test.AssertNotError(t, err, "generateCRL should have ignored the failing lint") +} + +func TestGenerateCRL(t *testing.T) { + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "failed to generate test key") + + template := &x509.Certificate{ + Subject: pkix.Name{CommonName: "asd"}, + SerialNumber: big.NewInt(7), + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + IsCA: true, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageCRLSign, + SubjectKeyId: []byte{1, 2, 3}, + } + certBytes, err := x509.CreateCertificate(rand.Reader, template, template, k.Public(), k) test.AssertNotError(t, err, "failed to generate test cert") cert, err := x509.ParseCertificate(certBytes) test.AssertNotError(t, err, "failed to parse test cert") - crlPEM, err := generateCRL(&wrappedSigner{k}, cert, time.Time{}.Add(time.Hour), time.Time{}.Add(time.Hour*2), 1, nil) + crlPEM, err := generateCRL(&wrappedSigner{k}, cert, time.Now().Add(time.Hour), time.Now().Add(time.Hour*2), 1, nil, []string{}) test.AssertNotError(t, err, "generateCRL failed with valid profile") pemBlock, _ := pem.Decode(crlPEM) crlDER := pemBlock.Bytes // use crypto/x509 to check signature is valid and list is empty - goCRL, err := x509.ParseCRL(crlDER) + goCRL, err := x509.ParseRevocationList(crlDER) test.AssertNotError(t, err, "failed to parse CRL") - err = cert.CheckCRLSignature(goCRL) + err = goCRL.CheckSignatureFrom(cert) test.AssertNotError(t, err, "CRL signature check failed") - test.AssertEquals(t, len(goCRL.TBSCertList.RevokedCertificates), 0) + test.AssertEquals(t, len(goCRL.RevokedCertificateEntries), 0) // fully parse the CRL to check that the version is correct, and that // it contains the CRL number extension containing the number we expect @@ -99,8 +140,9 @@ func TestGenerateCRL(t *testing.T) { _, err = asn1.Unmarshal(crlDER, &crl) test.AssertNotError(t, err, "failed to parse CRL") test.AssertEquals(t, crl.TBS.Version, 1) // x509v2 == 1 - test.AssertEquals(t, len(crl.TBS.Extensions), 2) // AKID, CRL number + test.AssertEquals(t, len(crl.TBS.Extensions), 3) // AKID, CRL number, IssuingDistributionPoint test.Assert(t, crl.TBS.Extensions[1].Id.Equal(asn1.ObjectIdentifier{2, 5, 29, 20}), "unexpected OID in extension") + test.Assert(t, crl.TBS.Extensions[2].Id.Equal(asn1.ObjectIdentifier{2, 5, 29, 28}), "unexpected OID in extension") var number int _, err = asn1.Unmarshal(crl.TBS.Extensions[1].Value, &number) test.AssertNotError(t, err, "failed to parse CRL number extension") diff --git a/cmd/ceremony/ecdsa.go b/cmd/ceremony/ecdsa.go index 6b3c8f8fc36..e6d8700940e 100644 --- a/cmd/ceremony/ecdsa.go +++ b/cmd/ceremony/ecdsa.go @@ -1,4 +1,4 @@ -package notmain +package main import ( "crypto/ecdsa" @@ -7,12 +7,12 @@ import ( "fmt" "log" - "github.com/letsencrypt/boulder/pkcs11helpers" "github.com/miekg/pkcs11" + + "github.com/letsencrypt/boulder/pkcs11helpers" ) var stringToCurve = map[string]elliptic.Curve{ - elliptic.P224().Params().Name: elliptic.P224(), elliptic.P256().Params().Name: elliptic.P256(), elliptic.P384().Params().Name: elliptic.P384(), elliptic.P521().Params().Name: elliptic.P521(), @@ -20,7 +20,6 @@ var stringToCurve = map[string]elliptic.Curve{ // curveToOIDDER maps the name of the curves to their DER encoded OIDs var curveToOIDDER = map[string][]byte{ - elliptic.P224().Params().Name: {6, 5, 43, 129, 4, 0, 33}, elliptic.P256().Params().Name: {6, 8, 42, 134, 72, 206, 61, 3, 1, 7}, elliptic.P384().Params().Name: {6, 5, 43, 129, 4, 0, 34}, elliptic.P521().Params().Name: {6, 5, 43, 129, 4, 0, 35}, @@ -70,7 +69,7 @@ func ecPub( return nil, err } if pubKey.Curve != expectedCurve { - return nil, errors.New("Returned EC parameters doesn't match expected curve") + return nil, errors.New("returned EC parameters doesn't match expected curve") } log.Printf("\tX: %X\n", pubKey.X.Bytes()) log.Printf("\tY: %X\n", pubKey.Y.Bytes()) diff --git a/cmd/ceremony/ecdsa_test.go b/cmd/ceremony/ecdsa_test.go index 6d3e3cc6925..8bd34867581 100644 --- a/cmd/ceremony/ecdsa_test.go +++ b/cmd/ceremony/ecdsa_test.go @@ -1,4 +1,4 @@ -package notmain +package main import ( "crypto/ecdsa" diff --git a/cmd/ceremony/file.go b/cmd/ceremony/file.go index ea901b08dcc..752d7b7465e 100644 --- a/cmd/ceremony/file.go +++ b/cmd/ceremony/file.go @@ -1,4 +1,4 @@ -package notmain +package main import "os" diff --git a/cmd/ceremony/file_test.go b/cmd/ceremony/file_test.go index 92c7d106118..e46be891340 100644 --- a/cmd/ceremony/file_test.go +++ b/cmd/ceremony/file_test.go @@ -1,4 +1,4 @@ -package notmain +package main import ( "testing" diff --git a/cmd/ceremony/key.go b/cmd/ceremony/key.go index a63287cfaad..2315a2081a3 100644 --- a/cmd/ceremony/key.go +++ b/cmd/ceremony/key.go @@ -1,4 +1,4 @@ -package notmain +package main import ( "crypto" @@ -7,8 +7,9 @@ import ( "fmt" "log" - "github.com/letsencrypt/boulder/pkcs11helpers" "github.com/miekg/pkcs11" + + "github.com/letsencrypt/boulder/pkcs11helpers" ) type hsmRandReader struct { @@ -34,10 +35,6 @@ type generateArgs struct { publicAttrs []*pkcs11.Attribute } -const ( - rsaExp = 65537 -) - // keyInfo is a struct used to pass around information about the public key // associated with the generated private key. der contains the DER encoding // of the SubjectPublicKeyInfo structure for the public key. id contains the @@ -53,35 +50,36 @@ func generateKey(session *pkcs11helpers.Session, label string, outputPath string {Type: pkcs11.CKA_LABEL, Value: []byte(label)}, }) if err != pkcs11helpers.ErrNoObject { - return nil, fmt.Errorf("expected no preexisting objects with label %q in slot for key storage. got error: %s", label, err) + return nil, fmt.Errorf("expected no preexisting objects with label %q in slot for key storage. got error: %w", label, err) } var pubKey crypto.PublicKey var keyID []byte switch config.Type { case "rsa": - pubKey, keyID, err = rsaGenerate(session, label, config.RSAModLength, rsaExp) + pubKey, keyID, err = rsaGenerate(session, label, config.RSAModLength) if err != nil { - return nil, fmt.Errorf("failed to generate RSA key pair: %s", err) + return nil, fmt.Errorf("failed to generate RSA key pair: %w", err) } case "ecdsa": pubKey, keyID, err = ecGenerate(session, label, config.ECDSACurve) if err != nil { - return nil, fmt.Errorf("failed to generate ECDSA key pair: %s", err) + return nil, fmt.Errorf("failed to generate ECDSA key pair: %w", err) } } der, err := x509.MarshalPKIXPublicKey(pubKey) if err != nil { - return nil, fmt.Errorf("Failed to marshal public key: %s", err) + return nil, fmt.Errorf("failed to marshal public key: %w", err) } pemBytes := pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: der}) log.Printf("Public key PEM:\n%s\n", pemBytes) err = writeFile(outputPath, pemBytes) if err != nil { - return nil, fmt.Errorf("Failed to write public key to %q: %s", outputPath, err) + return nil, fmt.Errorf("failed to write public key to %q: %w", outputPath, err) } log.Printf("Public key written to %q\n", outputPath) + return &keyInfo{key: pubKey, der: der, id: keyID}, nil } diff --git a/cmd/ceremony/key_test.go b/cmd/ceremony/key_test.go index f92c6e11a76..5a1768c491d 100644 --- a/cmd/ceremony/key_test.go +++ b/cmd/ceremony/key_test.go @@ -1,4 +1,4 @@ -package notmain +package main import ( "crypto" @@ -8,8 +8,8 @@ import ( "crypto/rsa" "crypto/x509" "encoding/pem" - "io/ioutil" "math/big" + "os" "path" "strings" "testing" @@ -65,7 +65,7 @@ func TestGenerateKeyRSA(t *testing.T) { RSAModLength: 1024, }) test.AssertNotError(t, err, "Failed to generate RSA key") - diskKeyBytes, err := ioutil.ReadFile(keyPath) + diskKeyBytes, err := os.ReadFile(keyPath) test.AssertNotError(t, err, "Failed to load key from disk") block, _ := pem.Decode(diskKeyBytes) diskKey, err := x509.ParsePKIXPublicKey(block.Bytes) @@ -101,7 +101,7 @@ func TestGenerateKeyEC(t *testing.T) { ECDSACurve: "P-256", }) test.AssertNotError(t, err, "Failed to generate ECDSA key") - diskKeyBytes, err := ioutil.ReadFile(keyPath) + diskKeyBytes, err := os.ReadFile(keyPath) test.AssertNotError(t, err, "Failed to load key from disk") block, _ := pem.Decode(diskKeyBytes) diskKey, err := x509.ParsePKIXPublicKey(block.Bytes) diff --git a/cmd/ceremony/main.go b/cmd/ceremony/main.go index 6c69d0f824e..97c94eb1d30 100644 --- a/cmd/ceremony/main.go +++ b/cmd/ceremony/main.go @@ -1,8 +1,11 @@ -package notmain +package main import ( "bytes" + "context" "crypto" + "crypto/ecdsa" + "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/asn1" @@ -10,28 +13,94 @@ import ( "errors" "flag" "fmt" - "io/ioutil" "log" "os" + "slices" "time" - "github.com/letsencrypt/boulder/cmd" + "gopkg.in/yaml.v3" + + zlintx509 "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3" + + "github.com/letsencrypt/boulder/goodkey" "github.com/letsencrypt/boulder/linter" "github.com/letsencrypt/boulder/pkcs11helpers" - "golang.org/x/crypto/ocsp" - "gopkg.in/yaml.v2" + "github.com/letsencrypt/boulder/revocation" + "github.com/letsencrypt/boulder/strictyaml" ) -const configDateLayout = "2006-01-02 15:04:05" +var kp goodkey.KeyPolicy + +func init() { + var err error + kp, err = goodkey.NewPolicy(nil, nil) + if err != nil { + log.Fatal("Could not create goodkey.KeyPolicy") + } +} + +type lintCert *x509.Certificate + +// issueLintCertAndPerformLinting issues a linting certificate from a given +// template certificate signed by a given issuer and returns a *lintCert or an +// error. The lint certificate is linted prior to being returned. The public key +// from the just issued lint certificate is checked by the GoodKey package. +func issueLintCertAndPerformLinting(tbs, issuer *x509.Certificate, subjectPubKey crypto.PublicKey, signer crypto.Signer, skipLints []string) (lintCert, error) { + bytes, err := linter.Check(tbs, subjectPubKey, issuer, signer, skipLints) + if err != nil { + return nil, fmt.Errorf("certificate failed pre-issuance lint: %w", err) + } + lc, err := x509.ParseCertificate(bytes) + if err != nil { + return nil, err + } + err = kp.GoodKey(context.Background(), lc.PublicKey) + if err != nil { + return nil, err + } + + return lc, nil +} + +// postIssuanceLinting performs post-issuance linting on the raw bytes of a +// given certificate with the same set of lints as +// issueLintCertAndPerformLinting. The public key is also checked by the GoodKey +// package. +func postIssuanceLinting(fc *x509.Certificate, skipLints []string) error { + if fc == nil { + return fmt.Errorf("certificate was not provided") + } + parsed, err := zlintx509.ParseCertificate(fc.Raw) + if err != nil { + // If zlintx509.ParseCertificate fails, the certificate is too broken to + // lint. This should be treated as ZLint rejecting the certificate + return fmt.Errorf("unable to parse certificate: %s", err) + } + registry, err := linter.NewRegistry(skipLints) + if err != nil { + return fmt.Errorf("unable to create zlint registry: %s", err) + } + lintRes := zlint.LintCertificateEx(parsed, registry) + err = linter.ProcessResultSet(lintRes) + if err != nil { + return err + } + err = kp.GoodKey(context.Background(), fc.PublicKey) + if err != nil { + return err + } + + return nil +} type keyGenConfig struct { Type string `yaml:"type"` - RSAModLength uint `yaml:"rsa-mod-length"` + RSAModLength int `yaml:"rsa-mod-length"` ECDSACurve string `yaml:"ecdsa-curve"` } var allowedCurves = map[string]bool{ - "P-224": true, "P-256": true, "P-384": true, "P-521": true, @@ -51,7 +120,7 @@ func (kgc keyGenConfig) validate() error { return errors.New("if key.type = 'rsa' then key.ecdsa-curve is not used") } if kgc.Type == "ecdsa" && !allowedCurves[kgc.ECDSACurve] { - return errors.New("key.ecdsa-curve can only be 'P-224', 'P-256', 'P-384', or 'P-521'") + return errors.New("key.ecdsa-curve can only be 'P-256', 'P-384', or 'P-521'") } if kgc.Type == "ecdsa" && kgc.RSAModLength != 0 { return errors.New("if key.type = 'ecdsa' then key.rsa-mod-length is not used") @@ -90,6 +159,7 @@ func checkOutputFile(filename, fieldname string) error { return fmt.Errorf("outputs.%s is %q, which already exists", fieldname, filename) } + return nil } @@ -168,7 +238,7 @@ type intermediateConfig struct { SkipLints []string `yaml:"skip-lints"` } -func (ic intermediateConfig) validate(ct certType) error { +func (ic intermediateConfig) validate() error { err := ic.PKCS11.validate() if err != nil { return err @@ -189,7 +259,48 @@ func (ic intermediateConfig) validate(ct certType) error { } // Certificate profile - err = ic.CertProfile.verifyProfile(ct) + err = ic.CertProfile.verifyProfile(intermediateCert) + if err != nil { + return err + } + + return nil +} + +type crossCertConfig struct { + CeremonyType string `yaml:"ceremony-type"` + PKCS11 PKCS11SigningConfig `yaml:"pkcs11"` + Inputs struct { + PublicKeyPath string `yaml:"public-key-path"` + IssuerCertificatePath string `yaml:"issuer-certificate-path"` + CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"` + } `yaml:"inputs"` + Outputs struct { + CertificatePath string `yaml:"certificate-path"` + } `yaml:"outputs"` + CertProfile certProfile `yaml:"certificate-profile"` + SkipLints []string `yaml:"skip-lints"` +} + +func (csc crossCertConfig) validate() error { + err := csc.PKCS11.validate() + if err != nil { + return err + } + if csc.Inputs.PublicKeyPath == "" { + return errors.New("inputs.public-key-path is required") + } + if csc.Inputs.IssuerCertificatePath == "" { + return errors.New("inputs.issuer-certificate is required") + } + if csc.Inputs.CertificateToCrossSignPath == "" { + return errors.New("inputs.certificate-to-cross-sign-path is required") + } + err = checkOutputFile(csc.Outputs.CertificatePath, "certificate-path") + if err != nil { + return err + } + err = csc.CertProfile.verifyProfile(crossCert) if err != nil { return err } @@ -240,7 +351,8 @@ type keyConfig struct { PKCS11 PKCS11KeyGenConfig `yaml:"pkcs11"` Key keyGenConfig `yaml:"key"` Outputs struct { - PublicKeyPath string `yaml:"public-key-path"` + PublicKeyPath string `yaml:"public-key-path"` + PKCS11ConfigPath string `yaml:"pkcs11-config-path"` } `yaml:"outputs"` } @@ -265,59 +377,6 @@ func (kc keyConfig) validate() error { return nil } -type ocspRespConfig struct { - CeremonyType string `yaml:"ceremony-type"` - PKCS11 PKCS11SigningConfig `yaml:"pkcs11"` - Inputs struct { - CertificatePath string `yaml:"certificate-path"` - IssuerCertificatePath string `yaml:"issuer-certificate-path"` - DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"` - } `yaml:"inputs"` - Outputs struct { - ResponsePath string `yaml:"response-path"` - } `yaml:"outputs"` - OCSPProfile struct { - ThisUpdate string `yaml:"this-update"` - NextUpdate string `yaml:"next-update"` - Status string `yaml:"status"` - } `yaml:"ocsp-profile"` -} - -func (orc ocspRespConfig) validate() error { - err := orc.PKCS11.validate() - if err != nil { - return err - } - - // Input fields - if orc.Inputs.CertificatePath == "" { - return errors.New("inputs.certificate-path is required") - } - if orc.Inputs.IssuerCertificatePath == "" { - return errors.New("inputs.issuer-certificate-path is required") - } - // DelegatedIssuerCertificatePath may be omitted - - // Output fields - err = checkOutputFile(orc.Outputs.ResponsePath, "response-path") - if err != nil { - return err - } - - // OCSP fields - if orc.OCSPProfile.ThisUpdate == "" { - return errors.New("ocsp-profile.this-update is required") - } - if orc.OCSPProfile.NextUpdate == "" { - return errors.New("ocsp-profile.next-update is required") - } - if orc.OCSPProfile.Status != "good" && orc.OCSPProfile.Status != "revoked" { - return errors.New("ocsp-profile.status must be either \"good\" or \"revoked\"") - } - - return nil -} - type crlConfig struct { CeremonyType string `yaml:"ceremony-type"` PKCS11 PKCS11SigningConfig `yaml:"pkcs11"` @@ -334,9 +393,10 @@ type crlConfig struct { RevokedCertificates []struct { CertificatePath string `yaml:"certificate-path"` RevocationDate string `yaml:"revocation-date"` - RevocationReason int `yaml:"revocation-reason"` + RevocationReason string `yaml:"revocation-reason"` } `yaml:"revoked-certificates"` } `yaml:"crl-profile"` + SkipLints []string `yaml:"skip-lints"` } func (cc crlConfig) validate() error { @@ -373,7 +433,7 @@ func (cc crlConfig) validate() error { if rc.RevocationDate == "" { return errors.New("crl-profile.revoked-certificates.revocation-date is required") } - if rc.RevocationReason == 0 { + if rc.RevocationReason == "" { return errors.New("crl-profile.revoked-certificates.revocation-reason is required") } } @@ -381,30 +441,40 @@ func (cc crlConfig) validate() error { return nil } -// loadCert loads a PEM certificate specified by filename or returns an error -func loadCert(filename string) (cert *x509.Certificate, err error) { - certPEM, err := ioutil.ReadFile(filename) +// loadCert loads a PEM certificate specified by filename or returns an error. +// The public key from the loaded certificate is checked by the GoodKey package. +func loadCert(filename string) (*x509.Certificate, error) { + certPEM, err := os.ReadFile(filename) if err != nil { - return + return nil, err } + log.Printf("Loaded certificate from %s\n", filename) block, _ := pem.Decode(certPEM) if block == nil { - return nil, fmt.Errorf("No data in cert PEM file %s", filename) + return nil, fmt.Errorf("no data in cert PEM file %q", filename) } - cert, err = x509.ParseCertificate(block.Bytes) - return -} - -func equalPubKeys(a, b interface{}) bool { - aBytes, err := x509.MarshalPKIXPublicKey(a) + cert, err := x509.ParseCertificate(block.Bytes) if err != nil { - return false + return nil, err } - bBytes, err := x509.MarshalPKIXPublicKey(b) - if err != nil { - return false + goodkeyErr := kp.GoodKey(context.Background(), cert.PublicKey) + if goodkeyErr != nil { + return nil, goodkeyErr + } + + return cert, nil +} + +// publicKeysEqual determines whether two public keys are identical. +func publicKeysEqual(a, b crypto.PublicKey) (bool, error) { + switch ak := a.(type) { + case *rsa.PublicKey: + return ak.Equal(b), nil + case *ecdsa.PublicKey: + return ak.Equal(b), nil + default: + return false, fmt.Errorf("unsupported public key type %T", ak) } - return bytes.Equal(aBytes, bBytes) } func openSigner(cfg PKCS11SigningConfig, pubKey crypto.PublicKey) (crypto.Signer, *hsmRandReader, error) { @@ -418,17 +488,17 @@ func openSigner(cfg PKCS11SigningConfig, pubKey crypto.PublicKey) (crypto.Signer if err != nil { return nil, nil, fmt.Errorf("failed to retrieve private key handle: %s", err) } - if !equalPubKeys(signer.Public(), pubKey) { - return nil, nil, fmt.Errorf("signer pubkey did not match issuer pubkey") + ok, err := publicKeysEqual(signer.Public(), pubKey) + if !ok { + return nil, nil, err } - log.Println("Retrieved private key handle") + return signer, newRandReader(session), nil } -func signAndWriteCert(tbs, issuer *x509.Certificate, subjectPubKey crypto.PublicKey, signer crypto.Signer, certPath string, skipLints []string) error { - err := linter.Check(tbs, subjectPubKey, issuer, signer, skipLints) - if err != nil { - return fmt.Errorf("certificate failed pre-issuance lint: %w", err) +func signAndWriteCert(tbs, issuer *x509.Certificate, lintCert lintCert, subjectPubKey crypto.PublicKey, signer crypto.Signer, certPath string) (*x509.Certificate, error) { + if lintCert == nil { + return nil, fmt.Errorf("linting was not performed prior to issuance") } // x509.CreateCertificate uses a io.Reader here for signing methods that require // a source of randomness. Since PKCS#11 based signing generates needed randomness @@ -437,13 +507,13 @@ func signAndWriteCert(tbs, issuer *x509.Certificate, subjectPubKey crypto.Public // changes. certBytes, err := x509.CreateCertificate(&failReader{}, tbs, issuer, subjectPubKey, signer) if err != nil { - return fmt.Errorf("failed to create certificate: %s", err) + return nil, fmt.Errorf("failed to create certificate: %s", err) } pemBytes := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certBytes}) log.Printf("Signed certificate PEM:\n%s", pemBytes) cert, err := x509.ParseCertificate(certBytes) if err != nil { - return fmt.Errorf("failed to parse signed certificate: %s", err) + return nil, fmt.Errorf("failed to parse signed certificate: %s", err) } if tbs == issuer { // If cert is self-signed we need to populate the issuer subject key to @@ -451,25 +521,52 @@ func signAndWriteCert(tbs, issuer *x509.Certificate, subjectPubKey crypto.Public issuer.PublicKey = cert.PublicKey issuer.PublicKeyAlgorithm = cert.PublicKeyAlgorithm } - err = cert.CheckSignatureFrom(issuer) if err != nil { - return fmt.Errorf("failed to verify certificate signature: %s", err) + return nil, fmt.Errorf("failed to verify certificate signature: %s", err) } err = writeFile(certPath, pemBytes) if err != nil { - return fmt.Errorf("failed to write certificate to %q: %s", certPath, err) + return nil, fmt.Errorf("failed to write certificate to %q: %s", certPath, err) } log.Printf("Certificate written to %q\n", certPath) - return nil + + return cert, nil +} + +// loadPubKey loads a PEM public key specified by filename. It returns a +// crypto.PublicKey, the PEM bytes of the public key, and an error. If an error +// exists, no public key or bytes are returned. The public key is checked by the +// GoodKey package. +func loadPubKey(filename string) (crypto.PublicKey, []byte, error) { + keyPEM, err := os.ReadFile(filename) + if err != nil { + return nil, nil, err + } + log.Printf("Loaded public key from %s\n", filename) + block, _ := pem.Decode(keyPEM) + if block == nil { + return nil, nil, fmt.Errorf("no data in cert PEM file %q", filename) + } + key, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, nil, err + } + err = kp.GoodKey(context.Background(), key) + if err != nil { + return nil, nil, err + } + + return key, block.Bytes, nil } func rootCeremony(configBytes []byte) error { var config rootConfig - err := yaml.UnmarshalStrict(configBytes, &config) + err := strictyaml.Unmarshal(configBytes, &config) if err != nil { return fmt.Errorf("failed to parse config: %s", err) } + log.Printf("Preparing root ceremony for %s\n", config.Outputs.CertificatePath) err = config.validate() if err != nil { return fmt.Errorf("failed to validate config: %s", err) @@ -487,69 +584,181 @@ func rootCeremony(configBytes []byte) error { if err != nil { return fmt.Errorf("failed to retrieve signer: %s", err) } - template, err := makeTemplate(newRandReader(session), &config.CertProfile, keyInfo.der, rootCert) + template, err := makeTemplate(newRandReader(session), &config.CertProfile, keyInfo.der, nil, rootCert) if err != nil { return fmt.Errorf("failed to create certificate profile: %s", err) } - - err = signAndWriteCert(template, template, keyInfo.key, signer, config.Outputs.CertificatePath, config.SkipLints) + lintCert, err := issueLintCertAndPerformLinting(template, template, keyInfo.key, signer, config.SkipLints) + if err != nil { + return err + } + finalCert, err := signAndWriteCert(template, template, lintCert, keyInfo.key, signer, config.Outputs.CertificatePath) if err != nil { return err } + err = postIssuanceLinting(finalCert, config.SkipLints) + if err != nil { + return err + } + log.Printf("Post issuance linting completed for %s\n", config.Outputs.CertificatePath) return nil } -func intermediateCeremony(configBytes []byte, ct certType) error { +func intermediateCeremony(configBytes []byte) error { var config intermediateConfig - err := yaml.UnmarshalStrict(configBytes, &config) + err := strictyaml.Unmarshal(configBytes, &config) if err != nil { return fmt.Errorf("failed to parse config: %s", err) } - err = config.validate(ct) + log.Printf("Preparing intermediate ceremony for %s\n", config.Outputs.CertificatePath) + err = config.validate() if err != nil { return fmt.Errorf("failed to validate config: %s", err) } + pub, pubBytes, err := loadPubKey(config.Inputs.PublicKeyPath) + if err != nil { + return err + } + issuer, err := loadCert(config.Inputs.IssuerCertificatePath) + if err != nil { + return fmt.Errorf("failed to load issuer certificate %q: %s", config.Inputs.IssuerCertificatePath, err) + } + signer, randReader, err := openSigner(config.PKCS11, issuer.PublicKey) + if err != nil { + return err + } + template, err := makeTemplate(randReader, &config.CertProfile, pubBytes, nil, intermediateCert) + if err != nil { + return fmt.Errorf("failed to create certificate profile: %s", err) + } + template.AuthorityKeyId = issuer.SubjectKeyId + lintCert, err := issueLintCertAndPerformLinting(template, issuer, pub, signer, config.SkipLints) + if err != nil { + return err + } + finalCert, err := signAndWriteCert(template, issuer, lintCert, pub, signer, config.Outputs.CertificatePath) + if err != nil { + return err + } + // Verify that x509.CreateCertificate is deterministic and produced + // identical DER bytes between the lintCert and finalCert signing + // operations. If this fails it's mississuance, but it's better to know + // about the problem sooner than later. + if !bytes.Equal(lintCert.RawTBSCertificate, finalCert.RawTBSCertificate) { + return fmt.Errorf("mismatch between lintCert and finalCert RawTBSCertificate DER bytes: \"%x\" != \"%x\"", lintCert.RawTBSCertificate, finalCert.RawTBSCertificate) + } + err = postIssuanceLinting(finalCert, config.SkipLints) + if err != nil { + return err + } + log.Printf("Post issuance linting completed for %s\n", config.Outputs.CertificatePath) + + return nil +} - pubPEMBytes, err := ioutil.ReadFile(config.Inputs.PublicKeyPath) +func crossCertCeremony(configBytes []byte) error { + var config crossCertConfig + err := strictyaml.Unmarshal(configBytes, &config) if err != nil { - return fmt.Errorf("failed to read public key %q: %s", config.Inputs.PublicKeyPath, err) + return fmt.Errorf("failed to parse config: %s", err) } - pubPEM, _ := pem.Decode(pubPEMBytes) - if pubPEM == nil { - return fmt.Errorf("failed to parse public key") + log.Printf("Preparing cross-certificate ceremony for %s\n", config.Outputs.CertificatePath) + err = config.validate() + if err != nil { + return fmt.Errorf("failed to validate config: %s", err) } - pub, err := x509.ParsePKIXPublicKey(pubPEM.Bytes) + pub, pubBytes, err := loadPubKey(config.Inputs.PublicKeyPath) if err != nil { - return fmt.Errorf("failed to parse public key: %s", err) + return err } issuer, err := loadCert(config.Inputs.IssuerCertificatePath) if err != nil { return fmt.Errorf("failed to load issuer certificate %q: %s", config.Inputs.IssuerCertificatePath, err) } - + toBeCrossSigned, err := loadCert(config.Inputs.CertificateToCrossSignPath) + if err != nil { + return fmt.Errorf("failed to load toBeCrossSigned certificate %q: %s", config.Inputs.CertificateToCrossSignPath, err) + } signer, randReader, err := openSigner(config.PKCS11, issuer.PublicKey) if err != nil { return err } - - template, err := makeTemplate(randReader, &config.CertProfile, pubPEM.Bytes, ct) + template, err := makeTemplate(randReader, &config.CertProfile, pubBytes, toBeCrossSigned, crossCert) if err != nil { return fmt.Errorf("failed to create certificate profile: %s", err) } template.AuthorityKeyId = issuer.SubjectKeyId - - err = signAndWriteCert(template, issuer, pub, signer, config.Outputs.CertificatePath, config.SkipLints) + lintCert, err := issueLintCertAndPerformLinting(template, issuer, pub, signer, config.SkipLints) + if err != nil { + return err + } + // Ensure that we've configured the correct certificate to cross-sign compared to the profile. + // + // Example of a misconfiguration below: + // ... + // inputs: + // certificate-to-cross-sign-path: int-e6.cert.pem + // certificate-profile: + // common-name: (FAKE) E5 + // organization: (FAKE) Let's Encrypt + // ... + // + if !bytes.Equal(toBeCrossSigned.RawSubject, lintCert.RawSubject) { + return fmt.Errorf("mismatch between toBeCrossSigned and lintCert RawSubject DER bytes: \"%x\" != \"%x\"", toBeCrossSigned.RawSubject, lintCert.RawSubject) + } + // BR 7.1.2.2.1 Cross-Certified Subordinate CA Validity + // The earlier of one day prior to the time of signing or the earliest + // notBefore date of the existing CA Certificate(s). + if lintCert.NotBefore.Before(toBeCrossSigned.NotBefore) { + return fmt.Errorf("cross-signed subordinate CA's NotBefore predates the existing CA's NotBefore") + } + // BR 7.1.2.2.3 Cross-Certified Subordinate CA Extensions + // We want the Extended Key Usages of our cross-signs to be identical to those + // in the cert being cross-signed, for the sake of consistency. However, our + // Root CA Certificates do not contain any EKUs, as required by BR 7.1.2.1.2. + // Therefore, cross-signs of our roots count as "unrestricted" cross-signs per + // the definition in BR 7.1.2.2.3, and are subject to the requirement that + // the cross-sign's Issuer and Subject fields must either: + // - have identical organizationNames; or + // - have orgnaizationNames which are affiliates of each other. + // Therefore, we enforce that cross-signs with empty EKUs have identical + // Subject Organization Name fields... or allow one special case where the + // issuer is "Internet Security Research Group" and the subject is "ISRG" to + // allow us to migrate from the longer string to the shorter one. + if !slices.Equal(lintCert.ExtKeyUsage, toBeCrossSigned.ExtKeyUsage) { + return fmt.Errorf("lint cert and toBeCrossSigned cert EKUs differ") + } + if len(lintCert.ExtKeyUsage) == 0 { + if !slices.Equal(lintCert.Subject.Organization, issuer.Subject.Organization) && + !(slices.Equal(issuer.Subject.Organization, []string{"Internet Security Research Group"}) && slices.Equal(lintCert.Subject.Organization, []string{"ISRG"})) { + return fmt.Errorf("attempted unrestricted cross-sign of certificate operated by a different organization") + } + } + // Issue the cross-signed certificate. + finalCert, err := signAndWriteCert(template, issuer, lintCert, pub, signer, config.Outputs.CertificatePath) + if err != nil { + return err + } + // Verify that x509.CreateCertificate is deterministic and produced + // identical DER bytes between the lintCert and finalCert signing + // operations. If this fails it's mississuance, but it's better to know + // about the problem sooner than later. + if !bytes.Equal(lintCert.RawTBSCertificate, finalCert.RawTBSCertificate) { + return fmt.Errorf("mismatch between lintCert and finalCert RawTBSCertificate DER bytes: \"%x\" != \"%x\"", lintCert.RawTBSCertificate, finalCert.RawTBSCertificate) + } + err = postIssuanceLinting(finalCert, config.SkipLints) if err != nil { return err } + log.Printf("Post issuance linting completed for %s\n", config.Outputs.CertificatePath) return nil } func csrCeremony(configBytes []byte) error { var config csrConfig - err := yaml.UnmarshalStrict(configBytes, &config) + err := strictyaml.Unmarshal(configBytes, &config) if err != nil { return fmt.Errorf("failed to parse config: %s", err) } @@ -558,17 +767,9 @@ func csrCeremony(configBytes []byte) error { return fmt.Errorf("failed to validate config: %s", err) } - pubPEMBytes, err := ioutil.ReadFile(config.Inputs.PublicKeyPath) + pub, _, err := loadPubKey(config.Inputs.PublicKeyPath) if err != nil { - return fmt.Errorf("failed to read public key %q: %s", config.Inputs.PublicKeyPath, err) - } - pubPEM, _ := pem.Decode(pubPEMBytes) - if pubPEM == nil { - return fmt.Errorf("failed to parse public key") - } - pub, err := x509.ParsePKIXPublicKey(pubPEM.Bytes) - if err != nil { - return fmt.Errorf("failed to parse public key: %s", err) + return err } signer, _, err := openSigner(config.PKCS11, pub) @@ -592,7 +793,7 @@ func csrCeremony(configBytes []byte) error { func keyCeremony(configBytes []byte) error { var config keyConfig - err := yaml.UnmarshalStrict(configBytes, &config) + err := strictyaml.Unmarshal(configBytes, &config) if err != nil { return fmt.Errorf("failed to parse config: %s", err) } @@ -609,82 +810,23 @@ func keyCeremony(configBytes []byte) error { return err } - return nil -} - -func ocspRespCeremony(configBytes []byte) error { - var config ocspRespConfig - err := yaml.UnmarshalStrict(configBytes, &config) - if err != nil { - return fmt.Errorf("failed to parse config: %s", err) - } - err = config.validate() - if err != nil { - return fmt.Errorf("failed to validate config: %s", err) - } - - cert, err := loadCert(config.Inputs.CertificatePath) - if err != nil { - return fmt.Errorf("failed to load certificate %q: %s", config.Inputs.CertificatePath, err) - } - issuer, err := loadCert(config.Inputs.IssuerCertificatePath) - if err != nil { - return fmt.Errorf("failed to load issuer certificate %q: %s", config.Inputs.IssuerCertificatePath, err) - } - var signer crypto.Signer - var delegatedIssuer *x509.Certificate - if config.Inputs.DelegatedIssuerCertificatePath != "" { - delegatedIssuer, err = loadCert(config.Inputs.DelegatedIssuerCertificatePath) - if err != nil { - return fmt.Errorf("failed to load delegated issuer certificate %q: %s", config.Inputs.DelegatedIssuerCertificatePath, err) - } - - signer, _, err = openSigner(config.PKCS11, delegatedIssuer.PublicKey) - if err != nil { - return err - } - } else { - signer, _, err = openSigner(config.PKCS11, issuer.PublicKey) + if config.Outputs.PKCS11ConfigPath != "" { + contents := fmt.Sprintf( + `{"module": %q, "tokenLabel": %q, "pin": %q}`, + config.PKCS11.Module, config.PKCS11.StoreLabel, config.PKCS11.PIN, + ) + err = writeFile(config.Outputs.PKCS11ConfigPath, []byte(contents)) if err != nil { return err } } - thisUpdate, err := time.Parse(configDateLayout, config.OCSPProfile.ThisUpdate) - if err != nil { - return fmt.Errorf("unable to parse ocsp-profile.this-update: %s", err) - } - nextUpdate, err := time.Parse(configDateLayout, config.OCSPProfile.NextUpdate) - if err != nil { - return fmt.Errorf("unable to parse ocsp-profile.next-update: %s", err) - } - var status int - switch config.OCSPProfile.Status { - case "good": - status = int(ocsp.Good) - case "revoked": - status = int(ocsp.Revoked) - default: - // this shouldn't happen if the config is validated - return fmt.Errorf("unexpected ocsp-profile.stats: %s", config.OCSPProfile.Status) - } - - resp, err := generateOCSPResponse(signer, issuer, delegatedIssuer, cert, thisUpdate, nextUpdate, status) - if err != nil { - return err - } - - err = writeFile(config.Outputs.ResponsePath, resp) - if err != nil { - return fmt.Errorf("failed to write OCSP response to %q: %s", config.Outputs.ResponsePath, err) - } - return nil } func crlCeremony(configBytes []byte) error { var config crlConfig - err := yaml.UnmarshalStrict(configBytes, &config) + err := strictyaml.Unmarshal(configBytes, &config) if err != nil { return fmt.Errorf("failed to parse config: %s", err) } @@ -702,32 +844,39 @@ func crlCeremony(configBytes []byte) error { return err } - thisUpdate, err := time.Parse(configDateLayout, config.CRLProfile.ThisUpdate) + thisUpdate, err := time.Parse(time.DateTime, config.CRLProfile.ThisUpdate) if err != nil { return fmt.Errorf("unable to parse crl-profile.this-update: %s", err) } - nextUpdate, err := time.Parse(configDateLayout, config.CRLProfile.NextUpdate) + nextUpdate, err := time.Parse(time.DateTime, config.CRLProfile.NextUpdate) if err != nil { return fmt.Errorf("unable to parse crl-profile.next-update: %s", err) } - var revokedCertificates []pkix.RevokedCertificate + var revokedCertificates []x509.RevocationListEntry for _, rc := range config.CRLProfile.RevokedCertificates { cert, err := loadCert(rc.CertificatePath) if err != nil { return fmt.Errorf("failed to load revoked certificate %q: %s", rc.CertificatePath, err) } - revokedAt, err := time.Parse(configDateLayout, rc.RevocationDate) + if !cert.IsCA { + return fmt.Errorf("certificate with serial %d is not a CA certificate", cert.SerialNumber) + } + revokedAt, err := time.Parse(time.DateTime, rc.RevocationDate) if err != nil { return fmt.Errorf("unable to parse crl-profile.revoked-certificates.revocation-date") } - revokedCert := pkix.RevokedCertificate{ + revokedCert := x509.RevocationListEntry{ SerialNumber: cert.SerialNumber, RevocationTime: revokedAt, } - encReason, err := asn1.Marshal(rc.RevocationReason) + reasonCode, err := revocation.StringToReason(rc.RevocationReason) + if err != nil { + return fmt.Errorf("looking up revocation reason: %w", err) + } + encReason, err := asn1.Marshal(reasonCode) if err != nil { - return fmt.Errorf("failed to marshal revocation reason %q: %s", rc.RevocationReason, err) + return fmt.Errorf("failed to marshal revocation reason %d (%q): %s", reasonCode, rc.RevocationReason, err) } revokedCert.Extensions = []pkix.Extension{{ Id: asn1.ObjectIdentifier{2, 5, 29, 21}, // id-ce-reasonCode @@ -736,7 +885,7 @@ func crlCeremony(configBytes []byte) error { revokedCertificates = append(revokedCertificates, revokedCert) } - crlBytes, err := generateCRL(signer, issuer, thisUpdate, nextUpdate, config.CRLProfile.Number, revokedCertificates) + crlBytes, err := generateCRL(signer, issuer, thisUpdate, nextUpdate, config.CRLProfile.Number, revokedCertificates, config.SkipLints) if err != nil { return err } @@ -758,13 +907,18 @@ func main() { if *configPath == "" { log.Fatal("--config is required") } - configBytes, err := ioutil.ReadFile(*configPath) + configBytes, err := os.ReadFile(*configPath) if err != nil { log.Fatalf("Failed to read config file: %s", err) } var ct struct { CeremonyType string `yaml:"ceremony-type"` } + + // We are intentionally using non-strict unmarshaling to read the top level + // tags to populate the "ct" struct for use in the switch statement below. + // Further strict processing of each yaml node is done on a case by case basis + // inside the switch statement. err = yaml.Unmarshal(configBytes, &ct) if err != nil { log.Fatalf("Failed to parse config: %s", err) @@ -777,12 +931,12 @@ func main() { log.Fatalf("root ceremony failed: %s", err) } case "cross-certificate": - err = intermediateCeremony(configBytes, crossCert) + err = crossCertCeremony(configBytes) if err != nil { log.Fatalf("cross-certificate ceremony failed: %s", err) } case "intermediate": - err = intermediateCeremony(configBytes, intermediateCert) + err = intermediateCeremony(configBytes) if err != nil { log.Fatalf("intermediate ceremony failed: %s", err) } @@ -791,36 +945,17 @@ func main() { if err != nil { log.Fatalf("cross-csr ceremony failed: %s", err) } - case "ocsp-signer": - err = intermediateCeremony(configBytes, ocspCert) - if err != nil { - log.Fatalf("ocsp signer ceremony failed: %s", err) - } case "key": err = keyCeremony(configBytes) if err != nil { log.Fatalf("key ceremony failed: %s", err) } - case "ocsp-response": - err = ocspRespCeremony(configBytes) - if err != nil { - log.Fatalf("ocsp response ceremony failed: %s", err) - } case "crl": err = crlCeremony(configBytes) if err != nil { log.Fatalf("crl ceremony failed: %s", err) } - case "crl-signer": - err = intermediateCeremony(configBytes, crlCert) - if err != nil { - log.Fatalf("crl signer ceremony failed: %s", err) - } default: - log.Fatalf("unknown ceremony-type, must be one of: root, intermediate, ocsp-signer, crl-signer, key, ocsp-response") + log.Fatalf("unknown ceremony-type, must be one of: root, cross-certificate, intermediate, cross-csr, key, crl") } } - -func init() { - cmd.RegisterCommand("ceremony", main) -} diff --git a/cmd/ceremony/main_test.go b/cmd/ceremony/main_test.go index 2c6215be93b..a275f0c6c95 100644 --- a/cmd/ceremony/main_test.go +++ b/cmd/ceremony/main_test.go @@ -1,10 +1,47 @@ -package notmain +package main import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "fmt" + "io/fs" + "math/big" + "os" + "path" "strings" "testing" + "time" + + "github.com/jmhodges/clock" + + "github.com/letsencrypt/boulder/test" ) +func TestLoadPubKey(t *testing.T) { + tmp := t.TempDir() + key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + + _, _, err := loadPubKey(path.Join(tmp, "does", "not", "exist")) + test.AssertError(t, err, "should fail on non-existent file") + test.AssertErrorIs(t, err, fs.ErrNotExist) + + _, _, err = loadPubKey("../../test/hierarchy/README.md") + test.AssertError(t, err, "should fail on non-PEM file") + + priv, _ := x509.MarshalPKCS8PrivateKey(key) + _ = os.WriteFile(path.Join(tmp, "priv.pem"), pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: priv}), 0644) + _, _, err = loadPubKey(path.Join(tmp, "priv.pem")) + test.AssertError(t, err, "should fail on non-pubkey PEM") + + pub, _ := x509.MarshalPKIXPublicKey(key.Public()) + _ = os.WriteFile(path.Join(tmp, "pub.pem"), pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: pub}), 0644) + _, _, err = loadPubKey(path.Join(tmp, "pub.pem")) + test.AssertNotError(t, err, "should not have errored") +} + func TestCheckOutputFileSucceeds(t *testing.T) { dir := t.TempDir() err := checkOutputFile(dir+"/example", "foo") @@ -80,7 +117,7 @@ func TestKeyGenConfigValidate(t *testing.T) { Type: "ecdsa", ECDSACurve: "bad", }, - expectedError: "key.ecdsa-curve can only be 'P-224', 'P-256', 'P-384', or 'P-521'", + expectedError: "key.ecdsa-curve can only be 'P-256', 'P-384', or 'P-521'", }, { name: "key.type is ecdsa but key.rsa-mod-length is present", @@ -340,6 +377,74 @@ func TestIntermediateConfigValidate(t *testing.T) { }, expectedError: "not-before is required", }, + { + name: "too many policy OIDs", + config: intermediateConfig{ + PKCS11: PKCS11SigningConfig{ + Module: "module", + SigningLabel: "label", + }, + Inputs: struct { + PublicKeyPath string `yaml:"public-key-path"` + IssuerCertificatePath string `yaml:"issuer-certificate-path"` + }{ + PublicKeyPath: "path", + IssuerCertificatePath: "path", + }, + Outputs: struct { + CertificatePath string `yaml:"certificate-path"` + }{ + CertificatePath: "path", + }, + CertProfile: certProfile{ + NotBefore: "a", + NotAfter: "b", + SignatureAlgorithm: "c", + CommonName: "d", + Organization: "e", + Country: "f", + CRLURL: "h", + IssuerURL: "i", + Policies: []policyInfoConfig{{OID: "2.23.140.1.2.1"}, {OID: "6.6.6"}}, + }, + SkipLints: []string{}, + }, + expectedError: "policy should be exactly BRs domain-validated for subordinate CAs", + }, + { + name: "too few policy OIDs", + config: intermediateConfig{ + PKCS11: PKCS11SigningConfig{ + Module: "module", + SigningLabel: "label", + }, + Inputs: struct { + PublicKeyPath string `yaml:"public-key-path"` + IssuerCertificatePath string `yaml:"issuer-certificate-path"` + }{ + PublicKeyPath: "path", + IssuerCertificatePath: "path", + }, + Outputs: struct { + CertificatePath string `yaml:"certificate-path"` + }{ + CertificatePath: "path", + }, + CertProfile: certProfile{ + NotBefore: "a", + NotAfter: "b", + SignatureAlgorithm: "c", + CommonName: "d", + Organization: "e", + Country: "f", + CRLURL: "h", + IssuerURL: "i", + Policies: []policyInfoConfig{}, + }, + SkipLints: []string{}, + }, + expectedError: "policy should be exactly BRs domain-validated for subordinate CAs", + }, { name: "good config", config: intermediateConfig{ @@ -366,9 +471,9 @@ func TestIntermediateConfigValidate(t *testing.T) { CommonName: "d", Organization: "e", Country: "f", - OCSPURL: "g", CRLURL: "h", IssuerURL: "i", + Policies: []policyInfoConfig{{OID: "2.23.140.1.2.1"}}, }, SkipLints: []string{}, }, @@ -376,7 +481,7 @@ func TestIntermediateConfigValidate(t *testing.T) { } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - err := tc.config.validate(intermediateCert) + err := tc.config.validate() if err != nil && err.Error() != tc.expectedError { t.Fatalf("Unexpected error, wanted: %q, got: %q", tc.expectedError, err) } else if err == nil && tc.expectedError != "" { @@ -386,20 +491,20 @@ func TestIntermediateConfigValidate(t *testing.T) { } } -func TestCSRConfigValidate(t *testing.T) { +func TestCrossCertConfigValidate(t *testing.T) { cases := []struct { name string - config csrConfig + config crossCertConfig expectedError string }{ { name: "no pkcs11.module", - config: csrConfig{}, + config: crossCertConfig{}, expectedError: "pkcs11.module is required", }, { name: "no pkcs11.signing-key-label", - config: csrConfig{ + config: crossCertConfig{ PKCS11: PKCS11SigningConfig{ Module: "module", }, @@ -408,7 +513,7 @@ func TestCSRConfigValidate(t *testing.T) { }, { name: "no inputs.public-key-path", - config: csrConfig{ + config: crossCertConfig{ PKCS11: PKCS11SigningConfig{ Module: "module", SigningLabel: "label", @@ -417,137 +522,189 @@ func TestCSRConfigValidate(t *testing.T) { expectedError: "inputs.public-key-path is required", }, { - name: "no outputs.csr-path", - config: csrConfig{ + name: "no inputs.issuer-certificate-path", + config: crossCertConfig{ PKCS11: PKCS11SigningConfig{ Module: "module", SigningLabel: "label", }, Inputs: struct { - PublicKeyPath string `yaml:"public-key-path"` + PublicKeyPath string `yaml:"public-key-path"` + IssuerCertificatePath string `yaml:"issuer-certificate-path"` + CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"` }{ - PublicKeyPath: "path", + PublicKeyPath: "path", + CertificateToCrossSignPath: "path", }, }, - expectedError: "outputs.csr-path is required", + expectedError: "inputs.issuer-certificate is required", + }, + { + name: "no inputs.certificate-to-cross-sign-path", + config: crossCertConfig{ + PKCS11: PKCS11SigningConfig{ + Module: "module", + SigningLabel: "label", + }, + Inputs: struct { + PublicKeyPath string `yaml:"public-key-path"` + IssuerCertificatePath string `yaml:"issuer-certificate-path"` + CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"` + }{ + PublicKeyPath: "path", + IssuerCertificatePath: "path", + }, + }, + expectedError: "inputs.certificate-to-cross-sign-path is required", + }, + { + name: "no outputs.certificate-path", + config: crossCertConfig{ + PKCS11: PKCS11SigningConfig{ + Module: "module", + SigningLabel: "label", + }, + Inputs: struct { + PublicKeyPath string `yaml:"public-key-path"` + IssuerCertificatePath string `yaml:"issuer-certificate-path"` + CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"` + }{ + PublicKeyPath: "path", + IssuerCertificatePath: "path", + CertificateToCrossSignPath: "path", + }, + }, + expectedError: "outputs.certificate-path is required", }, { name: "bad certificate-profile", - config: csrConfig{ + config: crossCertConfig{ PKCS11: PKCS11SigningConfig{ Module: "module", SigningLabel: "label", }, Inputs: struct { - PublicKeyPath string `yaml:"public-key-path"` + PublicKeyPath string `yaml:"public-key-path"` + IssuerCertificatePath string `yaml:"issuer-certificate-path"` + CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"` }{ - PublicKeyPath: "path", + PublicKeyPath: "path", + IssuerCertificatePath: "path", + CertificateToCrossSignPath: "path", }, Outputs: struct { - CSRPath string `yaml:"csr-path"` + CertificatePath string `yaml:"certificate-path"` }{ - CSRPath: "path", + CertificatePath: "path", }, }, - expectedError: "common-name is required", + expectedError: "not-before is required", }, { - name: "good config", - config: csrConfig{ + name: "too many policy OIDs", + config: crossCertConfig{ PKCS11: PKCS11SigningConfig{ Module: "module", SigningLabel: "label", }, Inputs: struct { - PublicKeyPath string `yaml:"public-key-path"` + PublicKeyPath string `yaml:"public-key-path"` + IssuerCertificatePath string `yaml:"issuer-certificate-path"` + CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"` }{ - PublicKeyPath: "path", + PublicKeyPath: "path", + IssuerCertificatePath: "path", + CertificateToCrossSignPath: "path", }, Outputs: struct { - CSRPath string `yaml:"csr-path"` + CertificatePath string `yaml:"certificate-path"` }{ - CSRPath: "path", + CertificatePath: "path", }, CertProfile: certProfile{ - CommonName: "d", - Organization: "e", - Country: "f", + NotBefore: "a", + NotAfter: "b", + SignatureAlgorithm: "c", + CommonName: "d", + Organization: "e", + Country: "f", + CRLURL: "h", + IssuerURL: "i", + Policies: []policyInfoConfig{{OID: "2.23.140.1.2.1"}, {OID: "6.6.6"}}, }, + SkipLints: []string{}, }, - }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - err := tc.config.validate() - if err != nil && err.Error() != tc.expectedError { - t.Fatalf("Unexpected error, wanted: %q, got: %q", tc.expectedError, err) - } else if err == nil && tc.expectedError != "" { - t.Fatalf("validate didn't fail, wanted: %q", err) - } - }) - } -} - -func TestKeyConfigValidate(t *testing.T) { - cases := []struct { - name string - config keyConfig - expectedError string - }{ - { - name: "no pkcs11.module", - config: keyConfig{}, - expectedError: "pkcs11.module is required", + expectedError: "policy should be exactly BRs domain-validated for subordinate CAs", }, { - name: "no pkcs11.store-key-with-label", - config: keyConfig{ - PKCS11: PKCS11KeyGenConfig{ - Module: "module", + name: "too few policy OIDs", + config: crossCertConfig{ + PKCS11: PKCS11SigningConfig{ + Module: "module", + SigningLabel: "label", }, - }, - expectedError: "pkcs11.store-key-with-label is required", - }, - { - name: "bad key fields", - config: keyConfig{ - PKCS11: PKCS11KeyGenConfig{ - Module: "module", - StoreLabel: "label", + Inputs: struct { + PublicKeyPath string `yaml:"public-key-path"` + IssuerCertificatePath string `yaml:"issuer-certificate-path"` + CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"` + }{ + PublicKeyPath: "path", + IssuerCertificatePath: "path", + CertificateToCrossSignPath: "path", }, - }, - expectedError: "key.type is required", - }, - { - name: "no outputs.public-key-path", - config: keyConfig{ - PKCS11: PKCS11KeyGenConfig{ - Module: "module", - StoreLabel: "label", + Outputs: struct { + CertificatePath string `yaml:"certificate-path"` + }{ + CertificatePath: "path", }, - Key: keyGenConfig{ - Type: "rsa", - RSAModLength: 2048, + CertProfile: certProfile{ + NotBefore: "a", + NotAfter: "b", + SignatureAlgorithm: "c", + CommonName: "d", + Organization: "e", + Country: "f", + CRLURL: "h", + IssuerURL: "i", + Policies: []policyInfoConfig{}, }, + SkipLints: []string{}, }, - expectedError: "outputs.public-key-path is required", + expectedError: "policy should be exactly BRs domain-validated for subordinate CAs", }, { name: "good config", - config: keyConfig{ - PKCS11: PKCS11KeyGenConfig{ - Module: "module", - StoreLabel: "label", + config: crossCertConfig{ + PKCS11: PKCS11SigningConfig{ + Module: "module", + SigningLabel: "label", }, - Key: keyGenConfig{ - Type: "rsa", - RSAModLength: 2048, + Inputs: struct { + PublicKeyPath string `yaml:"public-key-path"` + IssuerCertificatePath string `yaml:"issuer-certificate-path"` + CertificateToCrossSignPath string `yaml:"certificate-to-cross-sign-path"` + }{ + PublicKeyPath: "path", + IssuerCertificatePath: "path", + CertificateToCrossSignPath: "path", }, Outputs: struct { - PublicKeyPath string `yaml:"public-key-path"` + CertificatePath string `yaml:"certificate-path"` }{ - PublicKeyPath: "path", + CertificatePath: "path", }, + CertProfile: certProfile{ + NotBefore: "a", + NotAfter: "b", + SignatureAlgorithm: "c", + CommonName: "d", + Organization: "e", + Country: "f", + CRLURL: "h", + IssuerURL: "i", + Policies: []policyInfoConfig{{OID: "2.23.140.1.2.1"}}, + }, + SkipLints: []string{}, }, }, } @@ -563,20 +720,20 @@ func TestKeyConfigValidate(t *testing.T) { } } -func TestOCSPRespConfig(t *testing.T) { +func TestCSRConfigValidate(t *testing.T) { cases := []struct { name string - config ocspRespConfig + config csrConfig expectedError string }{ { name: "no pkcs11.module", - config: ocspRespConfig{}, + config: csrConfig{}, expectedError: "pkcs11.module is required", }, { name: "no pkcs11.signing-key-label", - config: ocspRespConfig{ + config: csrConfig{ PKCS11: PKCS11SigningConfig{ Module: "module", }, @@ -584,162 +741,148 @@ func TestOCSPRespConfig(t *testing.T) { expectedError: "pkcs11.signing-key-label is required", }, { - name: "no inputs.certificate-path", - config: ocspRespConfig{ - PKCS11: PKCS11SigningConfig{ - Module: "module", - SigningLabel: "label", - }, - }, - expectedError: "inputs.certificate-path is required", - }, - { - name: "no inputs.issuer-certificate-path", - config: ocspRespConfig{ + name: "no inputs.public-key-path", + config: csrConfig{ PKCS11: PKCS11SigningConfig{ Module: "module", SigningLabel: "label", }, - Inputs: struct { - CertificatePath string `yaml:"certificate-path"` - IssuerCertificatePath string `yaml:"issuer-certificate-path"` - DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"` - }{ - CertificatePath: "path", - }, }, - expectedError: "inputs.issuer-certificate-path is required", + expectedError: "inputs.public-key-path is required", }, { - name: "no outputs.response-path", - config: ocspRespConfig{ + name: "no outputs.csr-path", + config: csrConfig{ PKCS11: PKCS11SigningConfig{ Module: "module", SigningLabel: "label", }, Inputs: struct { - CertificatePath string `yaml:"certificate-path"` - IssuerCertificatePath string `yaml:"issuer-certificate-path"` - DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"` + PublicKeyPath string `yaml:"public-key-path"` }{ - CertificatePath: "path", - IssuerCertificatePath: "path", + PublicKeyPath: "path", }, }, - expectedError: "outputs.response-path is required", + expectedError: "outputs.csr-path is required", }, { - name: "no ocsp-profile.this-update", - config: ocspRespConfig{ + name: "bad certificate-profile", + config: csrConfig{ PKCS11: PKCS11SigningConfig{ Module: "module", SigningLabel: "label", }, Inputs: struct { - CertificatePath string `yaml:"certificate-path"` - IssuerCertificatePath string `yaml:"issuer-certificate-path"` - DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"` + PublicKeyPath string `yaml:"public-key-path"` }{ - CertificatePath: "path", - IssuerCertificatePath: "path", + PublicKeyPath: "path", }, Outputs: struct { - ResponsePath string `yaml:"response-path"` + CSRPath string `yaml:"csr-path"` }{ - ResponsePath: "path", + CSRPath: "path", }, }, - expectedError: "ocsp-profile.this-update is required", + expectedError: "common-name is required", }, { - name: "no ocsp-profile.next-update", - config: ocspRespConfig{ + name: "good config", + config: csrConfig{ PKCS11: PKCS11SigningConfig{ Module: "module", SigningLabel: "label", }, Inputs: struct { - CertificatePath string `yaml:"certificate-path"` - IssuerCertificatePath string `yaml:"issuer-certificate-path"` - DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"` + PublicKeyPath string `yaml:"public-key-path"` }{ - CertificatePath: "path", - IssuerCertificatePath: "path", + PublicKeyPath: "path", }, Outputs: struct { - ResponsePath string `yaml:"response-path"` + CSRPath string `yaml:"csr-path"` }{ - ResponsePath: "path", + CSRPath: "path", }, - OCSPProfile: struct { - ThisUpdate string `yaml:"this-update"` - NextUpdate string `yaml:"next-update"` - Status string `yaml:"status"` - }{ - ThisUpdate: "this-update", + CertProfile: certProfile{ + CommonName: "d", + Organization: "e", + Country: "f", }, }, - expectedError: "ocsp-profile.next-update is required", }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := tc.config.validate() + if err != nil && err.Error() != tc.expectedError { + t.Fatalf("Unexpected error, wanted: %q, got: %q", tc.expectedError, err) + } else if err == nil && tc.expectedError != "" { + t.Fatalf("validate didn't fail, wanted: %q", err) + } + }) + } +} + +func TestKeyConfigValidate(t *testing.T) { + cases := []struct { + name string + config keyConfig + expectedError string + }{ { - name: "no ocsp-profile.status", - config: ocspRespConfig{ - PKCS11: PKCS11SigningConfig{ - Module: "module", - SigningLabel: "label", + name: "no pkcs11.module", + config: keyConfig{}, + expectedError: "pkcs11.module is required", + }, + { + name: "no pkcs11.store-key-with-label", + config: keyConfig{ + PKCS11: PKCS11KeyGenConfig{ + Module: "module", }, - Inputs: struct { - CertificatePath string `yaml:"certificate-path"` - IssuerCertificatePath string `yaml:"issuer-certificate-path"` - DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"` - }{ - CertificatePath: "path", - IssuerCertificatePath: "path", + }, + expectedError: "pkcs11.store-key-with-label is required", + }, + { + name: "bad key fields", + config: keyConfig{ + PKCS11: PKCS11KeyGenConfig{ + Module: "module", + StoreLabel: "label", }, - Outputs: struct { - ResponsePath string `yaml:"response-path"` - }{ - ResponsePath: "path", + }, + expectedError: "key.type is required", + }, + { + name: "no outputs.public-key-path", + config: keyConfig{ + PKCS11: PKCS11KeyGenConfig{ + Module: "module", + StoreLabel: "label", }, - OCSPProfile: struct { - ThisUpdate string `yaml:"this-update"` - NextUpdate string `yaml:"next-update"` - Status string `yaml:"status"` - }{ - ThisUpdate: "this-update", - NextUpdate: "next-update", + Key: keyGenConfig{ + Type: "rsa", + RSAModLength: 2048, }, }, - expectedError: "ocsp-profile.status must be either \"good\" or \"revoked\"", + expectedError: "outputs.public-key-path is required", }, { name: "good config", - config: ocspRespConfig{ - PKCS11: PKCS11SigningConfig{ - Module: "module", - SigningLabel: "label", + config: keyConfig{ + PKCS11: PKCS11KeyGenConfig{ + Module: "module", + StoreLabel: "label", }, - Inputs: struct { - CertificatePath string `yaml:"certificate-path"` - IssuerCertificatePath string `yaml:"issuer-certificate-path"` - DelegatedIssuerCertificatePath string `yaml:"delegated-issuer-certificate-path"` - }{ - CertificatePath: "path", - IssuerCertificatePath: "path", + Key: keyGenConfig{ + Type: "rsa", + RSAModLength: 2048, }, Outputs: struct { - ResponsePath string `yaml:"response-path"` - }{ - ResponsePath: "path", - }, - OCSPProfile: struct { - ThisUpdate string `yaml:"this-update"` - NextUpdate string `yaml:"next-update"` - Status string `yaml:"status"` + PublicKeyPath string `yaml:"public-key-path"` + PKCS11ConfigPath string `yaml:"pkcs11-config-path"` }{ - ThisUpdate: "this-update", - NextUpdate: "next-update", - Status: "good", + PublicKeyPath: "path", + PKCS11ConfigPath: "path.json", }, }, }, @@ -845,7 +988,7 @@ func TestCRLConfig(t *testing.T) { RevokedCertificates []struct { CertificatePath string `yaml:"certificate-path"` RevocationDate string `yaml:"revocation-date"` - RevocationReason int `yaml:"revocation-reason"` + RevocationReason string `yaml:"revocation-reason"` } `yaml:"revoked-certificates"` }{ ThisUpdate: "this-update", @@ -877,7 +1020,7 @@ func TestCRLConfig(t *testing.T) { RevokedCertificates []struct { CertificatePath string `yaml:"certificate-path"` RevocationDate string `yaml:"revocation-date"` - RevocationReason int `yaml:"revocation-reason"` + RevocationReason string `yaml:"revocation-reason"` } `yaml:"revoked-certificates"` }{ ThisUpdate: "this-update", @@ -910,7 +1053,7 @@ func TestCRLConfig(t *testing.T) { RevokedCertificates []struct { CertificatePath string `yaml:"certificate-path"` RevocationDate string `yaml:"revocation-date"` - RevocationReason int `yaml:"revocation-reason"` + RevocationReason string `yaml:"revocation-reason"` } `yaml:"revoked-certificates"` }{ ThisUpdate: "this-update", @@ -919,7 +1062,7 @@ func TestCRLConfig(t *testing.T) { RevokedCertificates: []struct { CertificatePath string `yaml:"certificate-path"` RevocationDate string `yaml:"revocation-date"` - RevocationReason int `yaml:"revocation-reason"` + RevocationReason string `yaml:"revocation-reason"` }{{}}, }, }, @@ -949,7 +1092,7 @@ func TestCRLConfig(t *testing.T) { RevokedCertificates []struct { CertificatePath string `yaml:"certificate-path"` RevocationDate string `yaml:"revocation-date"` - RevocationReason int `yaml:"revocation-reason"` + RevocationReason string `yaml:"revocation-reason"` } `yaml:"revoked-certificates"` }{ ThisUpdate: "this-update", @@ -958,7 +1101,7 @@ func TestCRLConfig(t *testing.T) { RevokedCertificates: []struct { CertificatePath string `yaml:"certificate-path"` RevocationDate string `yaml:"revocation-date"` - RevocationReason int `yaml:"revocation-reason"` + RevocationReason string `yaml:"revocation-reason"` }{{ CertificatePath: "path", }}, @@ -990,7 +1133,7 @@ func TestCRLConfig(t *testing.T) { RevokedCertificates []struct { CertificatePath string `yaml:"certificate-path"` RevocationDate string `yaml:"revocation-date"` - RevocationReason int `yaml:"revocation-reason"` + RevocationReason string `yaml:"revocation-reason"` } `yaml:"revoked-certificates"` }{ ThisUpdate: "this-update", @@ -999,7 +1142,7 @@ func TestCRLConfig(t *testing.T) { RevokedCertificates: []struct { CertificatePath string `yaml:"certificate-path"` RevocationDate string `yaml:"revocation-date"` - RevocationReason int `yaml:"revocation-reason"` + RevocationReason string `yaml:"revocation-reason"` }{{ CertificatePath: "path", RevocationDate: "date", @@ -1032,7 +1175,7 @@ func TestCRLConfig(t *testing.T) { RevokedCertificates []struct { CertificatePath string `yaml:"certificate-path"` RevocationDate string `yaml:"revocation-date"` - RevocationReason int `yaml:"revocation-reason"` + RevocationReason string `yaml:"revocation-reason"` } `yaml:"revoked-certificates"` }{ ThisUpdate: "this-update", @@ -1041,11 +1184,11 @@ func TestCRLConfig(t *testing.T) { RevokedCertificates: []struct { CertificatePath string `yaml:"certificate-path"` RevocationDate string `yaml:"revocation-date"` - RevocationReason int `yaml:"revocation-reason"` + RevocationReason string `yaml:"revocation-reason"` }{{ CertificatePath: "path", RevocationDate: "date", - RevocationReason: 1, + RevocationReason: "keyCompromise", }}, }, }, @@ -1062,3 +1205,29 @@ func TestCRLConfig(t *testing.T) { }) } } + +func TestSignAndWriteNoLintCert(t *testing.T) { + _, err := signAndWriteCert(nil, nil, nil, nil, nil, "") + test.AssertError(t, err, "should have failed because no lintCert was provided") + test.AssertDeepEquals(t, err, fmt.Errorf("linting was not performed prior to issuance")) +} + +func TestPostIssuanceLinting(t *testing.T) { + clk := clock.New() + err := postIssuanceLinting(nil, nil) + test.AssertError(t, err, "should have failed because no certificate was provided") + + testKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "unable to generate ECDSA private key") + template := &x509.Certificate{ + NotAfter: clk.Now().Add(1 * time.Hour), + DNSNames: []string{"example.com"}, + SerialNumber: big.NewInt(1), + } + certDer, err := x509.CreateCertificate(rand.Reader, template, template, &testKey.PublicKey, testKey) + test.AssertNotError(t, err, "unable to create certificate") + parsedCert, err := x509.ParseCertificate(certDer) + test.AssertNotError(t, err, "unable to parse DER bytes") + err = postIssuanceLinting(parsedCert, nil) + test.AssertNotError(t, err, "should not have errored") +} diff --git a/cmd/ceremony/ocsp.go b/cmd/ceremony/ocsp.go deleted file mode 100644 index f1f62dbbda1..00000000000 --- a/cmd/ceremony/ocsp.go +++ /dev/null @@ -1,69 +0,0 @@ -package notmain - -import ( - "crypto" - "crypto/x509" - "encoding/base64" - "errors" - "fmt" - "time" - - "golang.org/x/crypto/ocsp" -) - -func generateOCSPResponse(signer crypto.Signer, issuer, delegatedIssuer, cert *x509.Certificate, thisUpdate, nextUpdate time.Time, status int) ([]byte, error) { - err := cert.CheckSignatureFrom(issuer) - if err != nil { - return nil, fmt.Errorf("invalid signature on certificate from issuer: %s", err) - } - - signingCert := issuer - if delegatedIssuer != nil { - signingCert = delegatedIssuer - err := delegatedIssuer.CheckSignatureFrom(issuer) - if err != nil { - return nil, fmt.Errorf("invalid signature on delegated issuer from issuer: %s", err) - } - - gotOCSPEKU := false - for _, eku := range delegatedIssuer.ExtKeyUsage { - if eku == x509.ExtKeyUsageOCSPSigning { - gotOCSPEKU = true - break - } - } - if !gotOCSPEKU { - return nil, errors.New("delegated issuer certificate doesn't contain OCSPSigning extended key usage") - } - } - - if nextUpdate.Before(thisUpdate) { - return nil, errors.New("thisUpdate must be before nextUpdate") - } - if thisUpdate.Before(signingCert.NotBefore) { - return nil, errors.New("thisUpdate is before signing certificate's notBefore") - } else if nextUpdate.After(signingCert.NotAfter) { - return nil, errors.New("nextUpdate is after signing certificate's notAfter") - } - - template := ocsp.Response{ - SerialNumber: cert.SerialNumber, - ThisUpdate: thisUpdate, - NextUpdate: nextUpdate, - Status: status, - } - if delegatedIssuer != nil { - template.Certificate = delegatedIssuer - } - - resp, err := ocsp.CreateResponse(issuer, signingCert, template, signer) - if err != nil { - return nil, fmt.Errorf("failed to create response: %s", err) - } - - encodedResp := make([]byte, base64.StdEncoding.EncodedLen(len(resp))+1) - base64.StdEncoding.Encode(encodedResp, resp) - encodedResp[len(encodedResp)-1] = '\n' - - return encodedResp, nil -} diff --git a/cmd/ceremony/ocsp_test.go b/cmd/ceremony/ocsp_test.go deleted file mode 100644 index 1bcbb6de253..00000000000 --- a/cmd/ceremony/ocsp_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package notmain - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "math/big" - "testing" - "time" - - "github.com/letsencrypt/boulder/test" -) - -func TestGenerateOCSPResponse(t *testing.T) { - kA, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "failed to generate test key") - kB, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "failed to generate test key") - kC, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "failed to generate test key") - - template := &x509.Certificate{ - SerialNumber: big.NewInt(9), - Subject: pkix.Name{ - CommonName: "cn", - }, - KeyUsage: x509.KeyUsageCertSign, - BasicConstraintsValid: true, - IsCA: true, - NotBefore: time.Time{}.Add(time.Hour * 10), - NotAfter: time.Time{}.Add(time.Hour * 20), - } - issuerBytes, err := x509.CreateCertificate(rand.Reader, template, template, kA.Public(), kA) - test.AssertNotError(t, err, "failed to create test issuer") - issuer, err := x509.ParseCertificate(issuerBytes) - test.AssertNotError(t, err, "failed to parse test issuer") - delegatedIssuerBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, kB.Public(), kA) - test.AssertNotError(t, err, "failed to create test delegated issuer") - badDelegatedIssuer, err := x509.ParseCertificate(delegatedIssuerBytes) - test.AssertNotError(t, err, "failed to parse test delegated issuer") - template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageOCSPSigning} - delegatedIssuerBytes, err = x509.CreateCertificate(rand.Reader, template, issuer, kB.Public(), kA) - test.AssertNotError(t, err, "failed to create test delegated issuer") - goodDelegatedIssuer, err := x509.ParseCertificate(delegatedIssuerBytes) - test.AssertNotError(t, err, "failed to parse test delegated issuer") - template.BasicConstraintsValid, template.IsCA = false, false - certBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, kC.Public(), kA) - test.AssertNotError(t, err, "failed to create test cert") - cert, err := x509.ParseCertificate(certBytes) - test.AssertNotError(t, err, "failed to parse test cert") - - cases := []struct { - name string - issuer *x509.Certificate - delegatedIssuer *x509.Certificate - cert *x509.Certificate - thisUpdate time.Time - nextUpdate time.Time - expectedError string - }{ - { - name: "invalid signature from issuer on certificate", - issuer: &x509.Certificate{}, - cert: &x509.Certificate{}, - expectedError: "invalid signature on certificate from issuer: x509: cannot verify signature: algorithm unimplemented", - }, - { - name: "nextUpdate before thisUpdate", - issuer: issuer, - cert: cert, - thisUpdate: time.Time{}.Add(time.Hour), - nextUpdate: time.Time{}, - expectedError: "thisUpdate must be before nextUpdate", - }, - { - name: "thisUpdate before signer notBefore", - issuer: issuer, - cert: cert, - thisUpdate: time.Time{}, - nextUpdate: time.Time{}.Add(time.Hour), - expectedError: "thisUpdate is before signing certificate's notBefore", - }, - { - name: "nextUpdate after signer notAfter", - issuer: issuer, - cert: cert, - thisUpdate: time.Time{}.Add(time.Hour * 11), - nextUpdate: time.Time{}.Add(time.Hour * 21), - expectedError: "nextUpdate is after signing certificate's notAfter", - }, - { - name: "bad delegated issuer signature", - issuer: issuer, - cert: cert, - delegatedIssuer: &x509.Certificate{}, - expectedError: "invalid signature on delegated issuer from issuer: x509: cannot verify signature: algorithm unimplemented", - }, - { - name: "good", - issuer: issuer, - cert: cert, - thisUpdate: time.Time{}.Add(time.Hour * 11), - nextUpdate: time.Time{}.Add(time.Hour * 12), - }, - { - name: "bad delegated issuer without EKU", - issuer: issuer, - cert: cert, - delegatedIssuer: badDelegatedIssuer, - expectedError: "delegated issuer certificate doesn't contain OCSPSigning extended key usage", - }, - { - name: "good delegated issuer", - issuer: issuer, - cert: cert, - delegatedIssuer: goodDelegatedIssuer, - thisUpdate: time.Time{}.Add(time.Hour * 11), - nextUpdate: time.Time{}.Add(time.Hour * 12), - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - _, err := generateOCSPResponse(kA, tc.issuer, tc.delegatedIssuer, tc.cert, tc.thisUpdate, tc.nextUpdate, 0) - if err != nil { - if tc.expectedError != "" && tc.expectedError != err.Error() { - t.Errorf("unexpected error: got %q, want %q", err.Error(), tc.expectedError) - } else if tc.expectedError == "" { - t.Errorf("unexpected error: %s", err) - } - } else if tc.expectedError != "" { - t.Errorf("expected error: %s", tc.expectedError) - } - }) - } -} diff --git a/cmd/ceremony/rsa.go b/cmd/ceremony/rsa.go index 6b39f6b554a..7d0eb4b30c5 100644 --- a/cmd/ceremony/rsa.go +++ b/cmd/ceremony/rsa.go @@ -1,4 +1,4 @@ -package notmain +package main import ( "crypto/rsa" @@ -6,18 +6,23 @@ import ( "log" "math/big" - "github.com/letsencrypt/boulder/pkcs11helpers" "github.com/miekg/pkcs11" + + "github.com/letsencrypt/boulder/pkcs11helpers" +) + +const ( + rsaExp = 65537 ) // rsaArgs constructs the private and public key template attributes sent to the // device and specifies which mechanism should be used. modulusLen specifies the // length of the modulus to be generated on the device in bits and exponent // specifies the public exponent that should be used. -func rsaArgs(label string, modulusLen, exponent uint, keyID []byte) generateArgs { +func rsaArgs(label string, modulusLen int, keyID []byte) generateArgs { // Encode as unpadded big endian encoded byte slice - expSlice := big.NewInt(int64(exponent)).Bytes() - log.Printf("\tEncoded public exponent (%d) as: %0X\n", exponent, expSlice) + expSlice := big.NewInt(rsaExp).Bytes() + log.Printf("\tEncoded public exponent (%d) as: %0X\n", rsaExp, expSlice) return generateArgs{ mechanism: []*pkcs11.Mechanism{ pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_KEY_PAIR_GEN, nil), @@ -51,15 +56,15 @@ func rsaArgs(label string, modulusLen, exponent uint, keyID []byte) generateArgs // handle, and constructs a rsa.PublicKey. It also checks that the key has the // correct length modulus and that the public exponent is what was requested in // the public key template. -func rsaPub(session *pkcs11helpers.Session, object pkcs11.ObjectHandle, modulusLen, exponent uint) (*rsa.PublicKey, error) { +func rsaPub(session *pkcs11helpers.Session, object pkcs11.ObjectHandle, modulusLen int) (*rsa.PublicKey, error) { pubKey, err := session.GetRSAPublicKey(object) if err != nil { return nil, err } - if pubKey.E != int(exponent) { + if pubKey.E != rsaExp { return nil, errors.New("returned CKA_PUBLIC_EXPONENT doesn't match expected exponent") } - if pubKey.N.BitLen() != int(modulusLen) { + if pubKey.N.BitLen() != modulusLen { return nil, errors.New("returned CKA_MODULUS isn't of the expected bit length") } log.Printf("\tPublic exponent: %d\n", pubKey.E) @@ -68,24 +73,24 @@ func rsaPub(session *pkcs11helpers.Session, object pkcs11.ObjectHandle, modulusL } // rsaGenerate is used to generate and verify a RSA key pair of the size -// specified by modulusLen and with the exponent specified by pubExponent. +// specified by modulusLen and with the exponent 65537. // It returns the public part of the generated key pair as a rsa.PublicKey // and the random key ID that the HSM uses to identify the key pair. -func rsaGenerate(session *pkcs11helpers.Session, label string, modulusLen, pubExponent uint) (*rsa.PublicKey, []byte, error) { +func rsaGenerate(session *pkcs11helpers.Session, label string, modulusLen int) (*rsa.PublicKey, []byte, error) { keyID := make([]byte, 4) _, err := newRandReader(session).Read(keyID) if err != nil { return nil, nil, err } - log.Printf("Generating RSA key with %d bit modulus and public exponent %d and ID %x\n", modulusLen, pubExponent, keyID) - args := rsaArgs(label, modulusLen, pubExponent, keyID) + log.Printf("Generating RSA key with %d bit modulus and public exponent %d and ID %x\n", modulusLen, rsaExp, keyID) + args := rsaArgs(label, modulusLen, keyID) pub, _, err := session.GenerateKeyPair(args.mechanism, args.publicAttrs, args.privateAttrs) if err != nil { return nil, nil, err } log.Println("Key generated") log.Println("Extracting public key") - pk, err := rsaPub(session, pub, modulusLen, pubExponent) + pk, err := rsaPub(session, pub, modulusLen) if err != nil { return nil, nil, err } diff --git a/cmd/ceremony/rsa_test.go b/cmd/ceremony/rsa_test.go index 06929cf9d09..40eb9d5df90 100644 --- a/cmd/ceremony/rsa_test.go +++ b/cmd/ceremony/rsa_test.go @@ -1,4 +1,4 @@ -package notmain +package main import ( "crypto" @@ -8,24 +8,15 @@ import ( "math/big" "testing" + "github.com/miekg/pkcs11" + "github.com/letsencrypt/boulder/pkcs11helpers" "github.com/letsencrypt/boulder/test" - "github.com/miekg/pkcs11" ) func TestRSAPub(t *testing.T) { s, ctx := pkcs11helpers.NewSessionWithMock() - // test we fail to construct key with non-matching exp - ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) { - return []*pkcs11.Attribute{ - pkcs11.NewAttribute(pkcs11.CKA_PUBLIC_EXPONENT, []byte{1, 0, 1}), - pkcs11.NewAttribute(pkcs11.CKA_MODULUS, []byte{255}), - }, nil - } - _, err := rsaPub(s, 0, 0, 255) - test.AssertError(t, err, "rsaPub didn't fail with non-matching exp") - // test we fail to construct key with non-matching modulus ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) { return []*pkcs11.Attribute{ @@ -33,7 +24,7 @@ func TestRSAPub(t *testing.T) { pkcs11.NewAttribute(pkcs11.CKA_MODULUS, []byte{255}), }, nil } - _, err = rsaPub(s, 0, 16, 65537) + _, err := rsaPub(s, 0, 16) test.AssertError(t, err, "rsaPub didn't fail with non-matching modulus size") // test we don't fail with the correct attributes @@ -43,7 +34,7 @@ func TestRSAPub(t *testing.T) { pkcs11.NewAttribute(pkcs11.CKA_MODULUS, []byte{255}), }, nil } - _, err = rsaPub(s, 0, 8, 65537) + _, err = rsaPub(s, 0, 8) test.AssertNotError(t, err, "rsaPub failed with valid attributes") } @@ -60,7 +51,7 @@ func TestRSAGenerate(t *testing.T) { ctx.GenerateKeyPairFunc = func(pkcs11.SessionHandle, []*pkcs11.Mechanism, []*pkcs11.Attribute, []*pkcs11.Attribute) (pkcs11.ObjectHandle, pkcs11.ObjectHandle, error) { return 0, 0, errors.New("bad") } - _, _, err = rsaGenerate(s, "", 1024, 65537) + _, _, err = rsaGenerate(s, "", 1024) test.AssertError(t, err, "rsaGenerate didn't fail on GenerateKeyPair error") // Test rsaGenerate fails when rsaPub fails @@ -70,7 +61,7 @@ func TestRSAGenerate(t *testing.T) { ctx.GetAttributeValueFunc = func(pkcs11.SessionHandle, pkcs11.ObjectHandle, []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) { return nil, errors.New("bad") } - _, _, err = rsaGenerate(s, "", 1024, 65537) + _, _, err = rsaGenerate(s, "", 1024) test.AssertError(t, err, "rsaGenerate didn't fail on rsaPub error") // Test rsaGenerate fails when rsaVerify fails @@ -83,7 +74,7 @@ func TestRSAGenerate(t *testing.T) { ctx.GenerateRandomFunc = func(pkcs11.SessionHandle, int) ([]byte, error) { return nil, errors.New("yup") } - _, _, err = rsaGenerate(s, "", 1024, 65537) + _, _, err = rsaGenerate(s, "", 1024) test.AssertError(t, err, "rsaGenerate didn't fail on rsaVerify error") // Test rsaGenerate doesn't fail when everything works @@ -97,6 +88,6 @@ func TestRSAGenerate(t *testing.T) { // Chop of the hash identifier and feed back into rsa.SignPKCS1v15 return rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA256, msg[19:]) } - _, _, err = rsaGenerate(s, "", 1024, 65537) + _, _, err = rsaGenerate(s, "", 1024) test.AssertNotError(t, err, "rsaGenerate didn't succeed when everything worked as expected") } diff --git a/cmd/cert-checker/main.go b/cmd/cert-checker/main.go index 35ba1973cae..c5da249842b 100644 --- a/cmd/cert-checker/main.go +++ b/cmd/cert-checker/main.go @@ -4,35 +4,43 @@ import ( "bytes" "context" "crypto/x509" + "database/sql" "encoding/json" "flag" "fmt" - "log/syslog" + "net/netip" "os" - "reflect" "regexp" + "slices" "sync" "sync/atomic" "time" "github.com/jmhodges/clock" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" zX509 "github.com/zmap/zcrypto/x509" "github.com/zmap/zlint/v3" "github.com/zmap/zlint/v3/lint" "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/core" + corepb "github.com/letsencrypt/boulder/core/proto" + "github.com/letsencrypt/boulder/ctpolicy/loglist" "github.com/letsencrypt/boulder/features" "github.com/letsencrypt/boulder/goodkey" + "github.com/letsencrypt/boulder/goodkey/sagoodkey" "github.com/letsencrypt/boulder/identifier" + "github.com/letsencrypt/boulder/linter" blog "github.com/letsencrypt/boulder/log" "github.com/letsencrypt/boulder/policy" + "github.com/letsencrypt/boulder/precert" "github.com/letsencrypt/boulder/sa" ) -// For defense-in-depth in addition to using the PA & its hostnamePolicy to -// check domain names we also perform a check against the regex's from the +// For defense-in-depth in addition to using the PA & its identPolicy to check +// domain names we also perform a check against the regex's from the // forbiddenDomains array var forbiddenDomainPatterns = []*regexp.Regexp{ regexp.MustCompile(`^\s*$`), @@ -57,6 +65,7 @@ type report struct { end time.Time GoodCerts int64 `json:"good-certs"` BadCerts int64 `json:"bad-certs"` + DbErrs int64 `json:"db-errs"` Entries map[string]reportEntry `json:"entries"` } @@ -71,57 +80,135 @@ func (r *report) dump() error { type reportEntry struct { Valid bool `json:"valid"` - DNSNames []string `json:"dnsNames"` + SANs []string `json:"sans"` Problems []string `json:"problems,omitempty"` } -// certDB is an interface collecting the gorp.saDbMap functions that the various +// certDB is an interface collecting the borp.DbMap functions that the various // parts of cert-checker rely on. Using this adapter shim allows tests to swap // out the saDbMap implementation. type certDB interface { - Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) - SelectInt(query string, args ...interface{}) (int64, error) + Select(ctx context.Context, i any, query string, args ...any) ([]any, error) + SelectOne(ctx context.Context, i any, query string, args ...any) error + SelectNullInt(ctx context.Context, query string, args ...any) (sql.NullInt64, error) } +// A function that looks up a precertificate by serial and returns its DER bytes. Used for +// mocking in tests. +type precertGetter func(context.Context, string) ([]byte, error) + type certChecker struct { pa core.PolicyAuthority kp goodkey.KeyPolicy dbMap certDB - certs chan core.Certificate + getPrecert precertGetter + certs chan *corepb.Certificate clock clock.Clock rMu *sync.Mutex issuedReport report checkPeriod time.Duration acceptableValidityDurations map[time.Duration]bool + lints lint.Registry + logger blog.Logger } -func newChecker(saDbMap certDB, clk clock.Clock, pa core.PolicyAuthority, kp goodkey.KeyPolicy, period time.Duration, avd map[time.Duration]bool) certChecker { +func newChecker(saDbMap certDB, + clk clock.Clock, + pa core.PolicyAuthority, + kp goodkey.KeyPolicy, + period time.Duration, + avd map[time.Duration]bool, + lints lint.Registry, + logger blog.Logger, +) certChecker { + precertGetter := func(ctx context.Context, serial string) ([]byte, error) { + precertPb, err := sa.SelectPrecertificate(ctx, saDbMap, serial) + if err != nil { + return nil, err + } + return precertPb.Der, nil + } return certChecker{ pa: pa, kp: kp, dbMap: saDbMap, - certs: make(chan core.Certificate, batchSize), + getPrecert: precertGetter, + certs: make(chan *corepb.Certificate, batchSize), rMu: new(sync.Mutex), clock: clk, issuedReport: report{Entries: make(map[string]reportEntry)}, checkPeriod: period, acceptableValidityDurations: avd, + lints: lints, + logger: logger, } } -func (c *certChecker) getCerts(unexpiredOnly bool) error { - c.issuedReport.end = c.clock.Now() - c.issuedReport.begin = c.issuedReport.end.Add(-c.checkPeriod) +// findStartingID returns the lowest `id` in the certificates table within the +// time window specified. The time window is a half-open interval [begin, end). +func (c *certChecker) findStartingID(ctx context.Context, begin, end time.Time) (int64, error) { + var output sql.NullInt64 + var err error + var retries int + + // Rather than querying `MIN(id)` across that whole window, we query it across the first + // hour of the window. This allows the query planner to use the index on `issued` more + // effectively. For a busy, actively issuing CA, that will always return results in the + // first query. For a less busy CA, or during integration tests, there may only exist + // certificates towards the end of the window, so we try querying later hourly chunks until + // we find a certificate or hit the end of the window. We also retry transient errors. + queryBegin := begin + queryEnd := begin.Add(time.Hour) + + for queryBegin.Compare(end) < 0 { + output, err = c.dbMap.SelectNullInt( + ctx, + `SELECT MIN(id) FROM certificates + WHERE issued >= :begin AND + issued < :end`, + map[string]any{ + "begin": queryBegin, + "end": queryEnd, + }, + ) + if err != nil { + c.logger.AuditErr("finding starting certificate", err, map[string]any{ + "begin": queryBegin.Format(time.RFC3339), + "end": queryEnd.Format(time.RFC3339), + "attempt": retries + 1, + }) + retries++ + time.Sleep(core.RetryBackoff(retries, time.Second, time.Minute, 2)) + continue + } + // https://mariadb.com/kb/en/min/ + // MIN() returns NULL if there were no matching rows + // https://pkg.go.dev/database/sql#NullInt64 + // Valid is true if Int64 is not NULL + if !output.Valid { + // No matching rows, try the next hour + queryBegin = queryBegin.Add(time.Hour) + queryEnd = queryEnd.Add(time.Hour) + if queryEnd.Compare(end) > 0 { + queryEnd = end + } + continue + } - args := map[string]interface{}{"issued": c.issuedReport.begin, "now": 0} - if unexpiredOnly { - args["now"] = c.clock.Now() + return output.Int64, nil } - initialID, err := c.dbMap.SelectInt( - "SELECT MIN(id) FROM certificates WHERE issued >= :issued AND expires >= :now", - args, - ) + // Fell through the loop without finding a valid ID + return 0, fmt.Errorf("no rows found for certificates issued between %s and %s", begin, end) +} + +func (c *certChecker) getCerts(ctx context.Context) error { + // The end of the report is the current time, rounded up to the nearest second. + c.issuedReport.end = c.clock.Now().Truncate(time.Second).Add(time.Second) + // The beginning of the report is the end minus the check period, rounded down to the nearest second. + c.issuedReport.begin = c.issuedReport.end.Add(-c.checkPeriod).Truncate(time.Second) + + initialID, err := c.findStartingID(ctx, c.issuedReport.begin, c.issuedReport.end) if err != nil { return err } @@ -130,31 +217,49 @@ func (c *certChecker) getCerts(unexpiredOnly bool) error { initialID -= 1 } - // Retrieve certs in batches of 1000 (the size of the certificate channel) - // so that we don't eat unnecessary amounts of memory and avoid the 16MB MySQL - // packet limit. - args["limit"] = batchSize - args["id"] = initialID + batchStartID := initialID + var retries int for { - certs, err := sa.SelectCertificates( + certs, highestID, err := sa.SelectCertificates( + ctx, c.dbMap, - "WHERE id > :id AND issued >= :issued AND expires >= :now ORDER BY id LIMIT :limit", - args, + `WHERE id > :id AND + issued >= :begin AND + issued < :end + ORDER BY id LIMIT :limit`, + map[string]any{ + "begin": c.issuedReport.begin, + "end": c.issuedReport.end, + // Retrieve certs in batches of 1000 (the size of the certificate channel) + // so that we don't eat unnecessary amounts of memory and avoid the 16MB MySQL + // packet limit. + "limit": batchSize, + "id": batchStartID, + }, ) if err != nil { - return err + c.logger.AuditErr("selecting certificates", err, map[string]any{ + "begin": c.issuedReport.begin.Format(time.RFC3339), + "end": c.issuedReport.end.Format(time.RFC3339), + "batchStartID": batchStartID, + "attempt": retries + 1, + }) + retries++ + time.Sleep(core.RetryBackoff(retries, time.Second, time.Minute, 2)) + continue } + retries = 0 for _, cert := range certs { - c.certs <- cert.Certificate + c.certs <- cert } if len(certs) == 0 { break } lastCert := certs[len(certs)-1] - args["id"] = lastCert.ID - if lastCert.Issued.After(c.issuedReport.end) { + if lastCert.Issued.AsTime().After(c.issuedReport.end) { break } + batchStartID = highestID } // Close channel so range operations won't block once the channel empties out @@ -162,15 +267,15 @@ func (c *certChecker) getCerts(unexpiredOnly bool) error { return nil } -func (c *certChecker) processCerts(wg *sync.WaitGroup, badResultsOnly bool, ignoredLints map[string]bool) { +func (c *certChecker) processCerts(ctx context.Context, wg *sync.WaitGroup, badResultsOnly bool) { for cert := range c.certs { - dnsNames, problems := c.checkCert(cert, ignoredLints) + sans, problems := c.checkCert(ctx, cert) valid := len(problems) == 0 c.rMu.Lock() if !badResultsOnly || (badResultsOnly && !valid) { c.issuedReport.Entries[cert.Serial] = reportEntry{ Valid: valid, - DNSNames: dnsNames, + SANs: sans, Problems: problems, } } @@ -204,63 +309,120 @@ var expectedExtensionContent = map[string][]byte{ "1.3.6.1.5.5.7.1.24": {0x30, 0x03, 0x02, 0x01, 0x05}, // Must staple feature } -// checkCert returns a list of DNS names in the certificate and a list of problems with the certificate. -func (c *certChecker) checkCert(cert core.Certificate, ignoredLints map[string]bool) ([]string, []string) { - var dnsNames []string +// checkValidations checks the database for matching authorizations that were +// likely valid at the time the certificate was issued. Authorizations with +// status = "deactivated" are counted for this, so long as their validatedAt +// is before the issuance and expiration is after. +func (c *certChecker) checkValidations(ctx context.Context, cert *corepb.Certificate, idents identifier.ACMEIdentifiers) error { + authzs, err := sa.SelectAuthzsMatchingIssuance(ctx, c.dbMap, cert.RegistrationID, cert.Issued.AsTime(), idents) + if err != nil { + return fmt.Errorf("error checking authzs for certificate %s: %w", cert.Serial, err) + } + + if len(authzs) == 0 { + return fmt.Errorf("no relevant authzs found valid at %s", cert.Issued) + } + + // We may get multiple authorizations for the same identifier, but that's + // okay. Any authorization for a given identifier is sufficient. + identToAuthz := make(map[identifier.ACMEIdentifier]*corepb.Authorization) + for _, m := range authzs { + identToAuthz[identifier.FromProto(m.Identifier)] = m + } + + var errors []error + for _, ident := range idents { + _, ok := identToAuthz[ident] + if !ok { + errors = append(errors, fmt.Errorf("missing authz for %q", ident.Value)) + continue + } + } + if len(errors) > 0 { + return fmt.Errorf("%s", errors) + } + return nil +} + +// checkCert returns a list of Subject Alternative Names in the certificate and a list of problems with the certificate. +func (c *certChecker) checkCert(ctx context.Context, cert *corepb.Certificate) ([]string, []string) { var problems []string + // Check that the digests match. - if cert.Digest != core.Fingerprint256(cert.DER) { + if cert.Digest != core.Fingerprint256(cert.Der) { problems = append(problems, "Stored digest doesn't match certificate digest") } + // Parse the certificate. - parsedCert, err := zX509.ParseCertificate(cert.DER) + parsedCert, err := zX509.ParseCertificate(cert.Der) if err != nil { problems = append(problems, fmt.Sprintf("Couldn't parse stored certificate: %s", err)) - } else { - dnsNames = parsedCert.DNSNames - // Run zlint checks. - results := zlint.LintCertificate(parsedCert) - for name, res := range results.Results { - if ignoredLints[name] || res.Status <= lint.Pass { - continue - } - prob := fmt.Sprintf("zlint %s: %s", res.Status, name) - if res.Details != "" { - prob = fmt.Sprintf("%s %s", prob, res.Details) - } - problems = append(problems, prob) - } - // Check if stored serial is correct. - storedSerial, err := core.StringToSerial(cert.Serial) - if err != nil { - problems = append(problems, "Stored serial is invalid") - } else if parsedCert.SerialNumber.Cmp(storedSerial) != 0 { - problems = append(problems, "Stored serial doesn't match certificate serial") - } - // Check that we have the correct expiration time. - if !parsedCert.NotAfter.Equal(cert.Expires) { - problems = append(problems, "Stored expiration doesn't match certificate NotAfter") - } - // Check if basic constraints are set. - if !parsedCert.BasicConstraintsValid { - problems = append(problems, "Certificate doesn't have basic constraints set") - } - // Check that the cert isn't able to sign other certificates. - if parsedCert.IsCA { - problems = append(problems, "Certificate can sign other certificates") - } - // Check that the cert has a valid validity period. The validity - // period is computed inclusive of the whole final second indicated by - // notAfter. - validityDuration := parsedCert.NotAfter.Add(time.Second).Sub(parsedCert.NotBefore) - _, ok := c.acceptableValidityDurations[validityDuration] - if !ok { - problems = append(problems, "Certificate has unacceptable validity period") + // This is a fatal error, we can't do any further processing. + return nil, problems + } + + // Now that it's parsed, we can extract the SANs. + sans := slices.Clone(parsedCert.DNSNames) + for _, ip := range parsedCert.IPAddresses { + sans = append(sans, ip.String()) + } + + // Run zlint checks. + results := zlint.LintCertificateEx(parsedCert, c.lints) + for name, res := range results.Results { + if res.Status <= lint.Pass { + continue } - // Check that the stored issuance time isn't too far back/forward dated. - if parsedCert.NotBefore.Before(cert.Issued.Add(-6*time.Hour)) || parsedCert.NotBefore.After(cert.Issued.Add(6*time.Hour)) { - problems = append(problems, "Stored issuance date is outside of 6 hour window of certificate NotBefore") + prob := fmt.Sprintf("zlint %s: %s", res.Status, name) + if res.Details != "" { + prob = fmt.Sprintf("%s %s", prob, res.Details) } + problems = append(problems, prob) + } + + // Check if stored serial is correct. + storedSerial, err := core.StringToSerial(cert.Serial) + if err != nil { + problems = append(problems, "Stored serial is invalid") + } else if parsedCert.SerialNumber.Cmp(storedSerial) != 0 { + problems = append(problems, "Stored serial doesn't match certificate serial") + } + + // Check that we have the correct expiration time. + if !parsedCert.NotAfter.Equal(cert.Expires.AsTime()) { + problems = append(problems, "Stored expiration doesn't match certificate NotAfter") + } + + // Check if basic constraints are set. + if !parsedCert.BasicConstraintsValid { + problems = append(problems, "Certificate doesn't have basic constraints set") + } + + // Check that the cert isn't able to sign other certificates. + if parsedCert.IsCA { + problems = append(problems, "Certificate can sign other certificates") + } + + // Check that the cert has a valid validity period. The validity + // period is computed inclusive of the whole final second indicated by + // notAfter. + validityDuration := parsedCert.NotAfter.Add(time.Second).Sub(parsedCert.NotBefore) + _, ok := c.acceptableValidityDurations[validityDuration] + if !ok { + problems = append(problems, "Certificate has unacceptable validity period") + } + + // Check that the stored issuance time isn't too far back/forward dated. + if parsedCert.NotBefore.Before(cert.Issued.AsTime().Add(-6*time.Hour)) || parsedCert.NotBefore.After(cert.Issued.AsTime().Add(6*time.Hour)) { + problems = append(problems, "Stored issuance date is outside of 6 hour window of certificate NotBefore") + } + + // Check that the cert doesn't contain any SANs of unexpected types. + if len(parsedCert.EmailAddresses) != 0 || len(parsedCert.URIs) != 0 { + problems = append(problems, "Certificate contains SAN of unacceptable type (email or URI)") + } + + if parsedCert.Subject.CommonName != "" { // Check if the CommonName is <= 64 characters. if len(parsedCert.Subject.CommonName) > 64 { problems = append( @@ -268,57 +430,110 @@ func (c *certChecker) checkCert(cert core.Certificate, ignoredLints map[string]b fmt.Sprintf("Certificate has common name >64 characters long (%d)", len(parsedCert.Subject.CommonName)), ) } - // Check that the PA is still willing to issue for each name in DNSNames - // + CommonName. - for _, name := range append(parsedCert.DNSNames, parsedCert.Subject.CommonName) { - id := identifier.ACMEIdentifier{Type: identifier.DNS, Value: name} - err = c.pa.WillingToIssueWildcards([]identifier.ACMEIdentifier{id}) - if err != nil { - problems = append(problems, fmt.Sprintf("Policy Authority isn't willing to issue for '%s': %s", name, err)) - } else { - // For defense-in-depth, even if the PA was willing to issue for a name - // we double check it against a list of forbidden domains. This way even - // if the hostnamePolicyFile malfunctions we will flag the forbidden - // domain matches - if forbidden, pattern := isForbiddenDomain(name); forbidden { - problems = append(problems, fmt.Sprintf( - "Policy Authority was willing to issue but domain '%s' matches "+ - "forbiddenDomains entry %q", name, pattern)) - } - } + + // Check that the CommonName is included in the SANs. + if !slices.Contains(sans, parsedCert.Subject.CommonName) { + problems = append(problems, fmt.Sprintf("Certificate Common Name does not appear in Subject Alternative Names: %q !< %v", + parsedCert.Subject.CommonName, parsedCert.DNSNames)) + } + } + + // Check that the PA is still willing to issue for each DNS name and IP + // address in the SANs. We do not check the CommonName here, as (if it exists) + // we already checked that it is identical to one of the DNSNames in the SAN. + for _, name := range parsedCert.DNSNames { + err = c.pa.WillingToIssue(identifier.ACMEIdentifiers{identifier.NewDNS(name)}) + if err != nil { + problems = append(problems, fmt.Sprintf("Policy Authority isn't willing to issue for '%s': %s", name, err)) + continue + } + // For defense-in-depth, even if the PA was willing to issue for a name + // we double check it against a list of forbidden domains. This way even + // if the hostnamePolicyFile malfunctions we will flag the forbidden + // domain matches + if forbidden, pattern := isForbiddenDomain(name); forbidden { + problems = append(problems, fmt.Sprintf( + "Policy Authority was willing to issue but domain '%s' matches "+ + "forbiddenDomains entry %q", name, pattern)) + } + } + for _, name := range parsedCert.IPAddresses { + ip, ok := netip.AddrFromSlice(name) + if !ok { + problems = append(problems, fmt.Sprintf("SANs contain malformed IP %q", name)) + continue } - // Check the cert has the correct key usage extensions - if !reflect.DeepEqual(parsedCert.ExtKeyUsage, []zX509.ExtKeyUsage{zX509.ExtKeyUsageServerAuth, zX509.ExtKeyUsageClientAuth}) { - problems = append(problems, "Certificate has incorrect key usage extensions") + err = c.pa.WillingToIssue(identifier.ACMEIdentifiers{identifier.NewIP(ip)}) + if err != nil { + problems = append(problems, fmt.Sprintf("Policy Authority isn't willing to issue for '%s': %s", name, err)) + continue } + } - for _, ext := range parsedCert.Extensions { - _, ok := allowedExtensions[ext.Id.String()] - if !ok { - problems = append(problems, fmt.Sprintf("Certificate contains an unexpected extension: %s", ext.Id)) - } - expectedContent, ok := expectedExtensionContent[ext.Id.String()] - if ok { - if !bytes.Equal(ext.Value, expectedContent) { - problems = append(problems, fmt.Sprintf("Certificate extension %s contains unexpected content: has %x, expected %x", ext.Id, ext.Value, expectedContent)) - } + // Check the cert has the correct key usage extensions + serverAndClient := slices.Equal(parsedCert.ExtKeyUsage, []zX509.ExtKeyUsage{zX509.ExtKeyUsageServerAuth, zX509.ExtKeyUsageClientAuth}) + serverOnly := slices.Equal(parsedCert.ExtKeyUsage, []zX509.ExtKeyUsage{zX509.ExtKeyUsageServerAuth}) + if !(serverAndClient || serverOnly) { + problems = append(problems, "Certificate has incorrect key usage extensions") + } + + for _, ext := range parsedCert.Extensions { + _, ok := allowedExtensions[ext.Id.String()] + if !ok { + problems = append(problems, fmt.Sprintf("Certificate contains an unexpected extension: %s", ext.Id)) + } + expectedContent, ok := expectedExtensionContent[ext.Id.String()] + if ok { + if !bytes.Equal(ext.Value, expectedContent) { + problems = append(problems, fmt.Sprintf("Certificate extension %s contains unexpected content: has %x, expected %x", ext.Id, ext.Value, expectedContent)) } } + } - // Check that the cert has a good key. Note that this does not perform - // checks which rely on external resources such as weak or blocked key - // lists, or the list of blocked keys in the database. This only performs - // static checks, such as against the RSA key size and the ECDSA curve. - p, err := x509.ParseCertificate(cert.DER) + // Check that the cert has a good key. Note that this does not perform + // checks which rely on external resources such as weak or blocked key + // lists, or the list of blocked keys in the database. This only performs + // static checks, such as against the RSA key size and the ECDSA curve. + p, err := x509.ParseCertificate(cert.Der) + if err != nil { + problems = append(problems, fmt.Sprintf("Couldn't parse stored certificate: %s", err)) + } else { + err = c.kp.GoodKey(ctx, p.PublicKey) if err != nil { - problems = append(problems, fmt.Sprintf("Couldn't parse stored certificate: %s", err)) + problems = append(problems, fmt.Sprintf("Key Policy isn't willing to issue for public key: %s", err)) } - err = c.kp.GoodKey(context.Background(), p.PublicKey) + } + + precertDER, err := c.getPrecert(ctx, cert.Serial) + if err != nil { + // Log and continue, since we want the problems slice to only contains + // problems with the cert itself. + c.logger.Errf("fetching linting precertificate for %s: %s", cert.Serial, err) + atomic.AddInt64(&c.issuedReport.DbErrs, 1) + } else { + err = precert.Correspond(precertDER, cert.Der) if err != nil { - problems = append(problems, fmt.Sprintf("Key Policy isn't willing to issue for public key: %s", err)) + problems = append(problems, fmt.Sprintf("Certificate does not correspond to precert for %s: %s", cert.Serial, err)) + } + } + + if features.Get().CertCheckerChecksValidations { + idents := identifier.FromCert(p) + err = c.checkValidations(ctx, cert, idents) + if err != nil { + if features.Get().CertCheckerRequiresValidations { + problems = append(problems, err.Error()) + } else { + var identValues []string + for _, ident := range idents { + identValues = append(identValues, ident.Value) + } + c.logger.Warningf("Certificate %s %s: %s", cert.Serial, identValues, err) + } } } - return dnsNames, problems + + return sans, problems } type Config struct { @@ -326,25 +541,39 @@ type Config struct { DB cmd.DBConfig cmd.HostnamePolicyConfig - Workers int - ReportDirectoryPath string - UnexpiredOnly bool - BadResultsOnly bool - CheckPeriod cmd.ConfigDuration + Workers int `validate:"required,min=1"` + // Deprecated: this is ignored, and cert checker always checks both expired and unexpired. + UnexpiredOnly bool + BadResultsOnly bool + CheckPeriod config.Duration // AcceptableValidityDurations is a list of durations which are // acceptable for certificates we issue. - AcceptableValidityDurations []cmd.ConfigDuration + AcceptableValidityDurations []config.Duration // GoodKey is an embedded config stanza for the goodkey library. If this // is populated, the cert-checker will perform static checks against the // public keys in the certs it checks. GoodKey goodkey.Config + // LintConfig is a path to a zlint config file, which can be used to control + // the behavior of zlint's "customizable lints". + LintConfig string // IgnoredLints is a list of zlint names. Any lint results from a lint in // the IgnoredLists list are ignored regardless of LintStatus level. IgnoredLints []string - Features map[string]bool + + // CTLogListFile is the path to a JSON file on disk containing the set of + // all logs trusted by Chrome. The file must match the v3 log list schema: + // https://www.gstatic.com/ct/log_list/v3/log_list_schema.json + CTLogListFile string + + // CTIncludeTestLogs allows logs marked as "test" to be included in the + // CT log list used for linting. This should be enabled in environments + // configured to submit SCTs to test logs. + CTIncludeTestLogs bool + + Features features.Config } PA cmd.PAConfig Syslog cmd.SyslogConfig @@ -362,17 +591,10 @@ func main() { err := cmd.ReadConfigFile(*configFile, &config) cmd.FailOnError(err, "Reading JSON config file into config structure") - err = features.Set(config.CertChecker.Features) - cmd.FailOnError(err, "Failed to set feature flags") - - syslogger, err := syslog.Dial("", "", syslog.LOG_INFO|syslog.LOG_LOCAL0, "") - cmd.FailOnError(err, "Failed to dial syslog") + features.Set(config.CertChecker.Features) - logger, err := blog.New(syslogger, 0, 0) - cmd.FailOnError(err, "Failed to construct logger") - - err = blog.Set(logger) - cmd.FailOnError(err, "Failed to set audit logger") + logger := cmd.NewLogger(config.Syslog) + cmd.LogStartup(logger) acceptableValidityDurations := make(map[time.Duration]bool) if len(config.CertChecker.AcceptableValidityDurations) > 0 { @@ -388,61 +610,65 @@ func main() { // Validate PA config and set defaults if needed. cmd.FailOnError(config.PA.CheckChallenges(), "Invalid PA configuration") + cmd.FailOnError(config.PA.CheckIdentifiers(), "Invalid PA configuration") - if config.CertChecker.GoodKey.WeakKeyFile != "" { - cmd.Fail("cert-checker does not support checking against weak key files") - } - if config.CertChecker.GoodKey.BlockedKeyFile != "" { - cmd.Fail("cert-checker does not support checking against blocked key files") - } - kp, err := goodkey.NewKeyPolicy(&config.CertChecker.GoodKey, nil) + kp, err := sagoodkey.NewPolicy(&config.CertChecker.GoodKey, nil) cmd.FailOnError(err, "Unable to create key policy") saDbMap, err := sa.InitWrappedDb(config.CertChecker.DB, prometheus.DefaultRegisterer, logger) cmd.FailOnError(err, "While initializing dbMap") - checkerLatency := prometheus.NewHistogram(prometheus.HistogramOpts{ + checkerLatency := promauto.NewHistogram(prometheus.HistogramOpts{ Name: "cert_checker_latency", Help: "Histogram of latencies a cert-checker worker takes to complete a batch", }) - prometheus.DefaultRegisterer.MustRegister(checkerLatency) - pa, err := policy.New(config.PA.Challenges) + pa, err := policy.New(config.PA.Identifiers, config.PA.Challenges, logger) cmd.FailOnError(err, "Failed to create PA") - err = pa.SetHostnamePolicyFile(config.CertChecker.HostnamePolicyFile) + err = pa.LoadIdentPolicyFile(config.CertChecker.HostnamePolicyFile) cmd.FailOnError(err, "Failed to load HostnamePolicyFile") + if config.CertChecker.CTLogListFile != "" { + err = loglist.InitLintList(config.CertChecker.CTLogListFile, config.CertChecker.CTIncludeTestLogs) + cmd.FailOnError(err, "Failed to load CT Log List") + } + + lints, err := linter.NewRegistry(config.CertChecker.IgnoredLints) + cmd.FailOnError(err, "Failed to create zlint registry") + if config.CertChecker.LintConfig != "" { + lintconfig, err := lint.NewConfigFromFile(config.CertChecker.LintConfig) + cmd.FailOnError(err, "Failed to load zlint config file") + lints.SetConfiguration(lintconfig) + } + checker := newChecker( saDbMap, - cmd.Clock(), + clock.New(), pa, kp, config.CertChecker.CheckPeriod.Duration, acceptableValidityDurations, + lints, + logger, ) fmt.Fprintf(os.Stderr, "# Getting certificates issued in the last %s\n", config.CertChecker.CheckPeriod) - ignoredLintsMap := make(map[string]bool) - for _, name := range config.CertChecker.IgnoredLints { - ignoredLintsMap[name] = true - } - // Since we grab certificates in batches we don't want this to block, when it // is finished it will close the certificate channel which allows the range // loops in checker.processCerts to break go func() { - err := checker.getCerts(config.CertChecker.UnexpiredOnly) + err := checker.getCerts(context.TODO()) cmd.FailOnError(err, "Batch retrieval of certificates failed") }() fmt.Fprintf(os.Stderr, "# Processing certificates using %d workers\n", config.CertChecker.Workers) wg := new(sync.WaitGroup) - for i := 0; i < config.CertChecker.Workers; i++ { + for range config.CertChecker.Workers { wg.Add(1) go func() { s := checker.clock.Now() - checker.processCerts(wg, config.CertChecker.BadResultsOnly, ignoredLintsMap) + checker.processCerts(context.TODO(), wg, config.CertChecker.BadResultsOnly) checkerLatency.Observe(checker.clock.Since(s).Seconds()) }() } @@ -459,5 +685,5 @@ func main() { } func init() { - cmd.RegisterCommand("cert-checker", main) + cmd.RegisterCommand("cert-checker", main, &cmd.ConfigValidator{Config: &Config{}}) } diff --git a/cmd/cert-checker/main_test.go b/cmd/cert-checker/main_test.go index ed1c29e5439..6d3504f7288 100644 --- a/cmd/cert-checker/main_test.go +++ b/cmd/cert-checker/main_test.go @@ -9,23 +9,30 @@ import ( "crypto/rsa" "crypto/x509" "crypto/x509/pkix" + "database/sql" "encoding/asn1" "encoding/pem" - "io/ioutil" + "errors" "log" "math/big" - mrand "math/rand" - "reflect" - "sort" + mrand "math/rand/v2" + "os" + "slices" "strings" "sync" "testing" "time" "github.com/jmhodges/clock" + "google.golang.org/protobuf/types/known/timestamppb" "github.com/letsencrypt/boulder/core" + corepb "github.com/letsencrypt/boulder/core/proto" + "github.com/letsencrypt/boulder/ctpolicy/loglist" "github.com/letsencrypt/boulder/goodkey" + "github.com/letsencrypt/boulder/goodkey/sagoodkey" + "github.com/letsencrypt/boulder/identifier" + "github.com/letsencrypt/boulder/linter" blog "github.com/letsencrypt/boulder/log" "github.com/letsencrypt/boulder/metrics" "github.com/letsencrypt/boulder/policy" @@ -46,23 +53,26 @@ var ( func init() { var err error - pa, err = policy.New(map[core.AcmeChallenge]bool{}) + pa, err = policy.New( + map[identifier.IdentifierType]bool{identifier.TypeDNS: true, identifier.TypeIP: true}, + map[core.AcmeChallenge]bool{}, + blog.NewMock()) if err != nil { log.Fatal(err) } - err = pa.SetHostnamePolicyFile("../../test/hostname-policy.yaml") + err = pa.LoadIdentPolicyFile("../../test/ident-policy.yaml") if err != nil { log.Fatal(err) } - kp, err = goodkey.NewKeyPolicy(&goodkey.Config{FermatRounds: 100}, nil) + kp, err = sagoodkey.NewPolicy(nil, nil) if err != nil { log.Fatal(err) } } func BenchmarkCheckCert(b *testing.B) { - checker := newChecker(nil, clock.New(), pa, kp, time.Hour, testValidityDurations) - testKey, _ := rsa.GenerateKey(rand.Reader, 1024) + checker := newChecker(nil, clock.New(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock()) + testKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) expiry := time.Now().AddDate(0, 0, 1) serial := big.NewInt(1337) rawCert := x509.Certificate{ @@ -74,30 +84,30 @@ func BenchmarkCheckCert(b *testing.B) { SerialNumber: serial, } certDer, _ := x509.CreateCertificate(rand.Reader, &rawCert, &rawCert, &testKey.PublicKey, testKey) - cert := core.Certificate{ + cert := &corepb.Certificate{ Serial: core.SerialToString(serial), Digest: core.Fingerprint256(certDer), - DER: certDer, - Issued: time.Now(), - Expires: expiry, + Der: certDer, + Issued: timestamppb.New(time.Now()), + Expires: timestamppb.New(expiry), } - b.ResetTimer() - for i := 0; i < b.N; i++ { - checker.checkCert(cert, nil) + + for b.Loop() { + checker.checkCert(context.Background(), cert) } } func TestCheckWildcardCert(t *testing.T) { - saDbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{}) + saDbMap, err := sa.DBMapForTest(vars.DBConnSA) test.AssertNotError(t, err, "Couldn't connect to database") - saCleanup := test.ResetSATestDatabase(t) + saCleanup := test.ResetBoulderTestDatabase(t) defer func() { saCleanup() }() testKey, _ := rsa.GenerateKey(rand.Reader, 2048) fc := clock.NewFake() - checker := newChecker(saDbMap, fc, pa, kp, time.Hour, testValidityDurations) + checker := newChecker(saDbMap, fc, pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock()) issued := checker.clock.Now().Add(-time.Minute) goodExpiry := issued.Add(testValidityDuration - time.Second) serial := big.NewInt(1337) @@ -120,29 +130,29 @@ func TestCheckWildcardCert(t *testing.T) { test.AssertNotError(t, err, "Couldn't create certificate") parsed, err := x509.ParseCertificate(wildcardCertDer) test.AssertNotError(t, err, "Couldn't parse created certificate") - cert := core.Certificate{ + cert := &corepb.Certificate{ Serial: core.SerialToString(serial), Digest: core.Fingerprint256(wildcardCertDer), - Expires: parsed.NotAfter, - Issued: parsed.NotBefore, - DER: wildcardCertDer, + Expires: timestamppb.New(parsed.NotAfter), + Issued: timestamppb.New(parsed.NotBefore), + Der: wildcardCertDer, } - _, problems := checker.checkCert(cert, nil) + _, problems := checker.checkCert(context.Background(), cert) for _, p := range problems { - t.Errorf(p) + t.Error(p) } } -func TestCheckCertReturnsDNSNames(t *testing.T) { - saDbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{}) +func TestCheckCertReturnsSANs(t *testing.T) { + saDbMap, err := sa.DBMapForTest(vars.DBConnSA) test.AssertNotError(t, err, "Couldn't connect to database") - saCleanup := test.ResetSATestDatabase(t) + saCleanup := test.ResetBoulderTestDatabase(t) defer func() { saCleanup() }() - checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations) + checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock()) - certPEM, err := ioutil.ReadFile("testdata/quite_invalid.pem") + certPEM, err := os.ReadFile("testdata/quite_invalid.pem") if err != nil { t.Fatal(err) } @@ -152,16 +162,16 @@ func TestCheckCertReturnsDNSNames(t *testing.T) { t.Fatal("failed to parse cert PEM") } - cert := core.Certificate{ + cert := &corepb.Certificate{ Serial: "00000000000", Digest: core.Fingerprint256(block.Bytes), - Expires: time.Now().Add(time.Hour), - Issued: time.Now(), - DER: block.Bytes, + Expires: timestamppb.New(time.Now().Add(time.Hour)), + Issued: timestamppb.New(time.Now()), + Der: block.Bytes, } - names, problems := checker.checkCert(cert, nil) - if !reflect.DeepEqual(names, []string{"quite_invalid.com", "al--so--wr--ong.com"}) { + names, problems := checker.checkCert(context.Background(), cert) + if !slices.Equal(names, []string{"quite_invalid.com", "al--so--wr--ong.com", "127.0.0.1"}) { t.Errorf("didn't get expected DNS names. other problems: %s", strings.Join(problems, "\n")) } } @@ -183,9 +193,9 @@ func (*rsa2048Generator) genKey() (crypto.Signer, error) { } func TestCheckCert(t *testing.T) { - saDbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{}) + saDbMap, err := sa.DBMapForTest(vars.DBConnSA) test.AssertNotError(t, err, "Couldn't connect to database") - saCleanup := test.ResetSATestDatabase(t) + saCleanup := test.ResetBoulderTestDatabase(t) defer func() { saCleanup() }() @@ -207,7 +217,7 @@ func TestCheckCert(t *testing.T) { t.Run(tc.name, func(t *testing.T) { testKey, _ := tc.key.genKey() - checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations) + checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock()) // Create a RFC 7633 OCSP Must Staple Extension. // OID 1.3.6.1.5.5.7.1.24 @@ -235,13 +245,12 @@ func TestCheckCert(t *testing.T) { NotBefore: issued, NotAfter: goodExpiry.AddDate(0, 0, 1), // Period too long DNSNames: []string{ - // longName should be flagged along with the long CN - longName, "example-a.com", "foodnotbombs.mil", // `dev-myqnapcloud.com` is included because it is an exact private // entry on the public suffix list "dev-myqnapcloud.com", + // don't include longName in the SANs, so the unique CN gets flagged }, SerialNumber: serial, BasicConstraintsValid: false, @@ -258,14 +267,14 @@ func TestCheckCert(t *testing.T) { // Serial doesn't match // Expiry doesn't match // Issued doesn't match - cert := core.Certificate{ + cert := &corepb.Certificate{ Serial: "8485f2687eba29ad455ae4e31c8679206fec", - DER: brokenCertDer, - Issued: issued.Add(12 * time.Hour), - Expires: goodExpiry.AddDate(0, 0, 2), // Expiration doesn't match + Der: brokenCertDer, + Issued: timestamppb.New(issued.Add(12 * time.Hour)), + Expires: timestamppb.New(goodExpiry.AddDate(0, 0, 2)), // Expiration doesn't match } - _, problems := checker.checkCert(cert, nil) + _, problems := checker.checkCert(context.Background(), cert) problemsMap := map[string]int{ "Stored digest doesn't match certificate digest": 1, @@ -277,6 +286,7 @@ func TestCheckCert(t *testing.T) { "Certificate has incorrect key usage extensions": 1, "Certificate has common name >64 characters long (65)": 1, "Certificate contains an unexpected extension: 1.3.3.7": 1, + "Certificate Common Name does not appear in Subject Alternative Names: \"eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeexample.com\" !< [example-a.com foodnotbombs.mil dev-myqnapcloud.com]": 1, } for _, p := range problems { _, ok := problemsMap[p] @@ -286,12 +296,12 @@ func TestCheckCert(t *testing.T) { delete(problemsMap, p) } for k := range problemsMap { - t.Errorf("Expected problem but didn't find it: '%s'.", k) + t.Errorf("Expected problem but didn't find '%s' in problems: %q.", k, problems) } // Same settings as above, but the stored serial number in the DB is invalid. cert.Serial = "not valid" - _, problems = checker.checkCert(cert, nil) + _, problems = checker.checkCert(context.Background(), cert) foundInvalidSerialProblem := false for _, p := range problems { if p == "Stored serial is invalid" { @@ -313,61 +323,63 @@ func TestCheckCert(t *testing.T) { test.AssertNotError(t, err, "Couldn't parse created certificate") cert.Serial = core.SerialToString(serial) cert.Digest = core.Fingerprint256(goodCertDer) - cert.DER = goodCertDer - cert.Expires = parsed.NotAfter - cert.Issued = parsed.NotBefore - _, problems = checker.checkCert(cert, nil) + cert.Der = goodCertDer + cert.Expires = timestamppb.New(parsed.NotAfter) + cert.Issued = timestamppb.New(parsed.NotBefore) + _, problems = checker.checkCert(context.Background(), cert) test.AssertEquals(t, len(problems), 0) }) } } func TestGetAndProcessCerts(t *testing.T) { - saDbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{}) + saDbMap, err := sa.DBMapForTest(vars.DBConnSA) test.AssertNotError(t, err, "Couldn't connect to database") fc := clock.NewFake() fc.Set(fc.Now().Add(time.Hour)) - checker := newChecker(saDbMap, fc, pa, kp, time.Hour, testValidityDurations) - sa, err := sa.NewSQLStorageAuthority(saDbMap, saDbMap, nil, nil, fc, blog.NewMock(), metrics.NoopRegisterer, 1) + checker := newChecker(saDbMap, fc, pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock()) + sa, err := sa.NewSQLStorageAuthority(saDbMap, saDbMap, nil, 1, 0, fc, blog.NewMock(), metrics.NoopRegisterer) test.AssertNotError(t, err, "Couldn't create SA to insert certificates") - saCleanUp := test.ResetSATestDatabase(t) + saCleanUp := test.ResetBoulderTestDatabase(t) defer func() { saCleanUp() }() - testKey, _ := rsa.GenerateKey(rand.Reader, 1024) + testKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) // Problems // Expiry period is too long rawCert := x509.Certificate{ Subject: pkix.Name{ CommonName: "not-blacklisted.com", }, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(999999 * time.Hour), BasicConstraintsValid: true, DNSNames: []string{"not-blacklisted.com"}, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, } reg := satest.CreateWorkingRegistration(t, isa.SA{Impl: sa}) test.AssertNotError(t, err, "Couldn't create registration") - for i := int64(0); i < 5; i++ { - rawCert.SerialNumber = big.NewInt(mrand.Int63()) + for range 5 { + rawCert.SerialNumber = big.NewInt(mrand.Int64()) certDER, err := x509.CreateCertificate(rand.Reader, &rawCert, &rawCert, &testKey.PublicKey, testKey) test.AssertNotError(t, err, "Couldn't create certificate") _, err = sa.AddCertificate(context.Background(), &sapb.AddCertificateRequest{ Der: certDER, RegID: reg.Id, - Issued: fc.Now().UnixNano(), + Issued: timestamppb.New(fc.Now()), }) test.AssertNotError(t, err, "Couldn't add certificate") } batchSize = 2 - err = checker.getCerts(false) + err = checker.getCerts(context.Background()) test.AssertNotError(t, err, "Failed to retrieve certificates") test.AssertEquals(t, len(checker.certs), 5) wg := new(sync.WaitGroup) wg.Add(1) - checker.processCerts(wg, false, nil) + checker.processCerts(context.Background(), wg, false) test.AssertEquals(t, checker.issuedReport.BadCerts, int64(5)) test.AssertEquals(t, len(checker.issuedReport.Entries), 5) } @@ -380,19 +392,24 @@ type mismatchedCountDB struct{} // `getCerts` calls `SelectInt` first to determine how many rows there are // matching the `getCertsCountQuery` criteria. For this mock we return // a non-zero number -func (db mismatchedCountDB) SelectInt(_ string, _ ...interface{}) (int64, error) { - return 99999, nil +func (db mismatchedCountDB) SelectNullInt(_ context.Context, _ string, _ ...any) (sql.NullInt64, error) { + return sql.NullInt64{ + Int64: 99999, + Valid: true, + }, + nil } // `getCerts` then calls `Select` to retrieve the Certificate rows. We pull // a dastardly switch-a-roo here and return an empty set -func (db mismatchedCountDB) Select(output interface{}, _ string, _ ...interface{}) ([]interface{}, error) { - // But actually return nothing - outputPtr, _ := output.(*[]sa.CertWithID) - *outputPtr = []sa.CertWithID{} +func (db mismatchedCountDB) Select(_ context.Context, output any, _ string, _ ...any) ([]any, error) { return nil, nil } +func (db mismatchedCountDB) SelectOne(_ context.Context, _ any, _ string, _ ...any) error { + return errors.New("unimplemented") +} + /* * In Boulder #2004[0] we identified that there is a race in `getCerts` * between the first call to `SelectOne` to identify how many rows there are, @@ -412,16 +429,88 @@ func (db mismatchedCountDB) Select(output interface{}, _ string, _ ...interface{ * 0: https://github.com/letsencrypt/boulder/issues/2004 */ func TestGetCertsEmptyResults(t *testing.T) { - saDbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{}) + saDbMap, err := sa.DBMapForTest(vars.DBConnSA) test.AssertNotError(t, err, "Couldn't connect to database") - checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations) + checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock()) checker.dbMap = mismatchedCountDB{} batchSize = 3 - err = checker.getCerts(false) + err = checker.getCerts(context.Background()) test.AssertNotError(t, err, "Failed to retrieve certificates") } +// emptyDB is a certDB object with methods used for testing that 'null' +// responses received from the database are handled properly. +type emptyDB struct { + certDB +} + +// SelectNullInt is a method that returns a false sql.NullInt64 struct to +// mock a null DB response +func (db emptyDB) SelectNullInt(_ context.Context, _ string, _ ...any) (sql.NullInt64, error) { + return sql.NullInt64{Valid: false}, + nil +} + +// TestGetCertsNullResults tests that a null response from the database will +// be handled properly. It uses the emptyDB above to mock the response +// expected if the DB finds no certificates to match the SELECT query and +// should return an error. +func TestGetCertsNullResults(t *testing.T) { + checker := newChecker(emptyDB{}, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock()) + + err := checker.getCerts(context.Background()) + test.AssertError(t, err, "Should have gotten error from empty DB") + if !strings.Contains(err.Error(), "no rows found for certificates issued between") { + t.Errorf("expected error to contain 'no rows found for certificates issued between', got '%s'", err.Error()) + } +} + +// lateDB is a certDB object that helps with TestGetCertsLate. +// It pretends to contain a single cert issued at the given time. +type lateDB struct { + issuedTime time.Time + selectedACert bool +} + +// SelectNullInt is a method that returns a false sql.NullInt64 struct to +// mock a null DB response +func (db *lateDB) SelectNullInt(_ context.Context, _ string, args ...any) (sql.NullInt64, error) { + args2 := args[0].(map[string]any) + begin := args2["begin"].(time.Time) + end := args2["end"].(time.Time) + if begin.Compare(db.issuedTime) < 0 && end.Compare(db.issuedTime) > 0 { + return sql.NullInt64{Int64: 23, Valid: true}, nil + } + return sql.NullInt64{Valid: false}, nil +} + +func (db *lateDB) Select(_ context.Context, output any, _ string, args ...any) ([]any, error) { + db.selectedACert = true + // For expediency we respond with an empty list of certificates; the checker will treat this as if it's + // reached the end of the list of certificates to process. + return nil, nil +} + +func (db *lateDB) SelectOne(_ context.Context, _ any, _ string, _ ...any) error { + return nil +} + +// TestGetCertsLate checks for correct behavior when certificates exist only late in the provided window. +func TestGetCertsLate(t *testing.T) { + clk := clock.NewFake() + db := &lateDB{issuedTime: clk.Now().Add(-time.Hour)} + checkPeriod := 24 * time.Hour + checker := newChecker(db, clk, pa, kp, checkPeriod, testValidityDurations, nil, blog.NewMock()) + + err := checker.getCerts(context.Background()) + test.AssertNotError(t, err, "getting certs") + + if !db.selectedACert { + t.Errorf("checker never selected a certificate after getting a MIN(id)") + } +} + func TestSaveReport(t *testing.T) { r := report{ begin: time.Time{}, @@ -450,7 +539,7 @@ func TestIsForbiddenDomain(t *testing.T) { // Note: These testcases are not an exhaustive representation of domains // Boulder won't issue for, but are instead testing the defense-in-depth // `isForbiddenDomain` function called *after* the PA has vetted the name - // against the complex hostname policy file. + // against the complex identifier policy file. testcases := []struct { Name string Expected bool @@ -487,32 +576,35 @@ func TestIsForbiddenDomain(t *testing.T) { } func TestIgnoredLint(t *testing.T) { - saDbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{}) + saDbMap, err := sa.DBMapForTest(vars.DBConnSA) test.AssertNotError(t, err, "Couldn't connect to database") - saCleanup := test.ResetSATestDatabase(t) + saCleanup := test.ResetBoulderTestDatabase(t) defer func() { saCleanup() }() + err = loglist.InitLintList("../../test/ct-test-srv/log_list.json", false) + test.AssertNotError(t, err, "failed to load ct log list") testKey, _ := rsa.GenerateKey(rand.Reader, 2048) - checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations) + checker := newChecker(saDbMap, clock.NewFake(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock()) serial := big.NewInt(1337) + x509OID, err := x509.OIDFromInts([]uint64{1, 2, 3}) + test.AssertNotError(t, err, "failed to create x509.OID") + template := &x509.Certificate{ Subject: pkix.Name{ CommonName: "CPU's Cool CA", }, - SerialNumber: serial, - NotBefore: time.Now(), - NotAfter: time.Now().Add(testValidityDuration - time.Second), - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - PolicyIdentifiers: []asn1.ObjectIdentifier{ - {1, 2, 3}, - }, + SerialNumber: serial, + NotBefore: time.Now(), + NotAfter: time.Now().Add(testValidityDuration - time.Second), + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + Policies: []x509.OID{x509OID}, BasicConstraintsValid: true, IsCA: true, - IssuingCertificateURL: []string{"http://ca.cpu"}, + IssuingCertificateURL: []string{"http://aia.example.org"}, SubjectKeyId: []byte("foobar"), } @@ -536,35 +628,77 @@ func TestIgnoredLint(t *testing.T) { subjectCert, err := x509.ParseCertificate(subjectCertDer) test.AssertNotError(t, err, "failed to parse EE cert") - cert := core.Certificate{ + cert := &corepb.Certificate{ Serial: core.SerialToString(serial), - DER: subjectCertDer, + Der: subjectCertDer, Digest: core.Fingerprint256(subjectCertDer), - Issued: subjectCert.NotBefore, - Expires: subjectCert.NotAfter, + Issued: timestamppb.New(subjectCert.NotBefore), + Expires: timestamppb.New(subjectCert.NotAfter), } - // Without any ignored lints we expect one error level result due to the - // missing OCSP url in the template. + // Without any ignored lints we expect several errors and warnings about SCTs, + // the common name, and the subject key identifier extension. expectedProblems := []string{ - "zlint error: e_sub_cert_aia_does_not_contain_ocsp_url", - "zlint info: n_subject_common_name_included", + "zlint warn: w_subject_common_name_included", + "zlint warn: w_ext_subject_key_identifier_not_recommended_subscriber", "zlint info: w_ct_sct_policy_count_unsatisfied Certificate had 0 embedded SCTs. Browser policy may require 2 for this certificate.", + "zlint error: e_scts_from_same_operator Certificate had too few embedded SCTs; browser policy requires 2.", } - sort.Strings(expectedProblems) + slices.Sort(expectedProblems) // Check the certificate with a nil ignore map. This should return the // expected zlint problems. - _, problems := checker.checkCert(cert, nil) - sort.Strings(problems) - test.Assert(t, reflect.DeepEqual(problems, expectedProblems), "problems did not match expected") + _, problems := checker.checkCert(context.Background(), cert) + slices.Sort(problems) + test.AssertDeepEquals(t, problems, expectedProblems) // Check the certificate again with an ignore map that excludes the affected // lints. This should return no problems. - _, problems = checker.checkCert(cert, map[string]bool{ - "e_sub_cert_aia_does_not_contain_ocsp_url": true, - "n_subject_common_name_included": true, - "w_ct_sct_policy_count_unsatisfied": true, + lints, err := linter.NewRegistry([]string{ + "w_subject_common_name_included", + "w_ext_subject_key_identifier_not_recommended_subscriber", + "w_ct_sct_policy_count_unsatisfied", + "e_scts_from_same_operator", }) + test.AssertNotError(t, err, "creating test lint registry") + checker.lints = lints + _, problems = checker.checkCert(context.Background(), cert) test.AssertEquals(t, len(problems), 0) } + +func TestPrecertCorrespond(t *testing.T) { + checker := newChecker(nil, clock.New(), pa, kp, time.Hour, testValidityDurations, nil, blog.NewMock()) + checker.getPrecert = func(_ context.Context, _ string) ([]byte, error) { + return []byte("hello"), nil + } + testKey, _ := rsa.GenerateKey(rand.Reader, 2048) + expiry := time.Now().AddDate(0, 0, 1) + serial := big.NewInt(1337) + rawCert := x509.Certificate{ + Subject: pkix.Name{ + CommonName: "example.com", + }, + NotAfter: expiry, + DNSNames: []string{"example-a.com"}, + SerialNumber: serial, + } + certDer, _ := x509.CreateCertificate(rand.Reader, &rawCert, &rawCert, &testKey.PublicKey, testKey) + cert := &corepb.Certificate{ + Serial: core.SerialToString(serial), + Digest: core.Fingerprint256(certDer), + Der: certDer, + Issued: timestamppb.New(time.Now()), + Expires: timestamppb.New(expiry), + } + _, problems := checker.checkCert(context.Background(), cert) + if len(problems) == 0 { + t.Errorf("expected precert correspondence problem") + } + // Ensure that at least one of the problems was related to checking correspondence + for _, p := range problems { + if strings.Contains(p, "does not correspond to precert") { + return + } + } + t.Fatalf("expected precert correspondence problem, but got: %v", problems) +} diff --git a/cmd/cert-checker/testdata/quite_invalid.pem b/cmd/cert-checker/testdata/quite_invalid.pem index 632b8b67e21..5a5b86c02b9 100644 --- a/cmd/cert-checker/testdata/quite_invalid.pem +++ b/cmd/cert-checker/testdata/quite_invalid.pem @@ -1,5 +1,5 @@ -----BEGIN CERTIFICATE----- -MIIDUzCCAjugAwIBAgIILgLqdMwyzT4wDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +MIIDWTCCAkGgAwIBAgIILgLqdMwyzT4wDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE AxMVbWluaWNhIHJvb3QgY2EgOTMzZTM5MB4XDTIxMTExMTIwMjMzMloXDTIzMTIx MTIwMjMzMlowHDEaMBgGA1UEAwwRcXVpdGVfaW52YWxpZC5jb20wggEiMA0GCSqG SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDi4jBbqMyvhMonDngNsvie9SHPB16mdpiy @@ -7,14 +7,14 @@ Y/agreU84xUz/roKK07TpVmeqvwWvDkvHTFov7ytKdnCY+z/NXKJ3hNqflWCwU7h Uk9TmpBp0vg+5NvalYul/+bq/B4qDhEvTBzAX3k/UYzd0GQdMyAbwXtG41f5cSK6 cWTQYfJL3gGR5/KLoTz3/VemLgEgAP/CvgcUJPbQceQViiZ4opi9hFIfUqxX2NsD 49klw8cDFu/BG2LEC+XtbdT8XevD0aGIOuYVr+Pa2mxb2QCDXu4tXOsDXH9Y/Cmk -8103QbdB8Y+usOiHG/IXxK2q4J7QNPal4ER4/PGA06V0gwrjNH8BAgMBAAGjgZQw -gZEwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD +8103QbdB8Y+usOiHG/IXxK2q4J7QNPal4ER4/PGA06V0gwrjNH8BAgMBAAGjgZow +gZcwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD AjAMBgNVHRMBAf8EAjAAMB8GA1UdIwQYMBaAFNIcaCjv32YRafE065dZO57ONWuk -MDEGA1UdEQQqMCiCEXF1aXRlX2ludmFsaWQuY29tghNhbC0tc28tLXdyLS1vbmcu -Y29tMA0GCSqGSIb3DQEBCwUAA4IBAQAjSv0o5G4VuLnnwHON4P53bLvGnYqaqYju -TEafi3hSgHAfBuhOQUVgwujoYpPp1w1fm5spfcbSwNNRte79HgV97kAuZ4R4RHk1 -5Xux1ITLalaHR/ilu002N0eJ7dFYawBgV2xMudULzohwmW2RjPJ5811iWwtiVf1b -A3V5SZJWSJll1BhANBs7R0pBbyTSNHR470N8TGG0jfXqgTKd0xZaH91HrwEMo+96 -llbfp90Y5OfHIfym/N1sH2hVgd+ZAkhiVEiNBWZlbSyOgbZ1cCBvBXg6TuwpQMZK -9RWjlpni8yuzLGduPl8qHG1dqsUvbVqcG+WhHLbaZMNhiMfiWInL +MDcGA1UdEQQwMC6CEXF1aXRlX2ludmFsaWQuY29tghNhbC0tc28tLXdyLS1vbmcu +Y29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAjSv0o5G4VuLnnwHON4P53bLvG +nYqaqYjuTEafi3hSgHAfBuhOQUVgwujoYpPp1w1fm5spfcbSwNNRte79HgV97kAu +Z4R4RHk15Xux1ITLalaHR/ilu002N0eJ7dFYawBgV2xMudULzohwmW2RjPJ5811i +WwtiVf1bA3V5SZJWSJll1BhANBs7R0pBbyTSNHR470N8TGG0jfXqgTKd0xZaH91H +rwEMo+96llbfp90Y5OfHIfym/N1sH2hVgd+ZAkhiVEiNBWZlbSyOgbZ1cCBvBXg6 +TuwpQMZK9RWjlpni8yuzLGduPl8qHG1dqsUvbVqcG+WhHLbaZMNhiMfiWInL -----END CERTIFICATE----- diff --git a/cmd/clock_generic.go b/cmd/clock_generic.go deleted file mode 100644 index 32634ae22a9..00000000000 --- a/cmd/clock_generic.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !integration - -package cmd - -import "github.com/jmhodges/clock" - -// Clock functions similarly to clock.New(), but the returned value can be -// changed using the FAKECLOCK environment variable if the 'integration' build -// flag is set. -// -// This function returns the default Clock. -func Clock() clock.Clock { - return clock.New() -} diff --git a/cmd/clock_integration.go b/cmd/clock_integration.go deleted file mode 100644 index 46dd6fd7365..00000000000 --- a/cmd/clock_integration.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build integration - -package cmd - -import ( - "fmt" - "os" - "time" - - "github.com/jmhodges/clock" - blog "github.com/letsencrypt/boulder/log" -) - -// Clock functions similarly to clock.Default(), but the returned value can be -// changed using the FAKECLOCK environment variable if the 'integration' build -// flag is set. -// -// The FAKECLOCK env var is in the time.UnixDate format, returned by `date -d`. -func Clock() clock.Clock { - if tgt := os.Getenv("FAKECLOCK"); tgt != "" { - targetTime, err := time.Parse(time.UnixDate, tgt) - FailOnError(err, fmt.Sprintf("cmd.Clock: bad format for FAKECLOCK: %v\n", err)) - - cl := clock.NewFake() - cl.Set(targetTime) - blog.Get().Infof("Time was set to %v via FAKECLOCK", targetTime) - return cl - } - return clock.Default() -} diff --git a/cmd/config.go b/cmd/config.go index e1d75cfe650..d9b62fd32d3 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -3,25 +3,25 @@ package cmd import ( "crypto/tls" "crypto/x509" - "encoding/json" + "encoding/hex" "errors" "fmt" - "hash/fnv" - "io/ioutil" - "math" + "net" "os" - "path" "strings" - "time" - "github.com/go-sql-driver/mysql" - "github.com/honeycombio/beeline-go" + "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "google.golang.org/grpc/resolver" + + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/core" + "github.com/letsencrypt/boulder/identifier" ) // PasswordConfig contains a path to a file containing a password. type PasswordConfig struct { - PasswordFile string + PasswordFile string `validate:"required"` } // Pass returns a password, extracted from the PasswordConfig's PasswordFile @@ -30,7 +30,7 @@ func (pc *PasswordConfig) Pass() (string, error) { if pc.PasswordFile == "" { return "", nil } - contents, err := ioutil.ReadFile(pc.PasswordFile) + contents, err := os.ReadFile(pc.PasswordFile) if err != nil { return "", err } @@ -41,82 +41,61 @@ func (pc *PasswordConfig) Pass() (string, error) { // be embedded in other config structs. type ServiceConfig struct { // DebugAddr is the address to run the /debug handlers on. - DebugAddr string + DebugAddr string `validate:"omitempty,hostname_port"` GRPC *GRPCServerConfig TLS TLSConfig + + // HealthCheckInterval is the duration between deep health checks of the + // service. Defaults to 5 seconds. + HealthCheckInterval config.Duration `validate:"-"` } -// DBConfig defines how to connect to a database. The connect string may be +// DBConfig defines how to connect to a database. The connect string is // stored in a file separate from the config, because it can contain a password, // which we want to keep out of configs. type DBConfig struct { - DBConnect string // A file containing a connect URL for the DB. - DBConnectFile string + DBConnectFile string `validate:"required"` // MaxOpenConns sets the maximum number of open connections to the // database. If MaxIdleConns is greater than 0 and MaxOpenConns is // less than MaxIdleConns, then MaxIdleConns will be reduced to // match the new MaxOpenConns limit. If n < 0, then there is no // limit on the number of open connections. - MaxOpenConns int + MaxOpenConns int `validate:"min=-1"` // MaxIdleConns sets the maximum number of connections in the idle // connection pool. If MaxOpenConns is greater than 0 but less than // MaxIdleConns, then MaxIdleConns will be reduced to match the // MaxOpenConns limit. If n < 0, no idle connections are retained. - MaxIdleConns int + MaxIdleConns int `validate:"min=-1"` // ConnMaxLifetime sets the maximum amount of time a connection may // be reused. Expired connections may be closed lazily before reuse. // If d < 0, connections are not closed due to a connection's age. - ConnMaxLifetime ConfigDuration + ConnMaxLifetime config.Duration `validate:"-"` // ConnMaxIdleTime sets the maximum amount of time a connection may // be idle. Expired connections may be closed lazily before reuse. // If d < 0, connections are not closed due to a connection's idle // time. - ConnMaxIdleTime ConfigDuration + ConnMaxIdleTime config.Duration `validate:"-"` } -// URL returns the DBConnect URL represented by this DBConfig object, either -// loading it from disk or returning a default value. Leading and trailing -// whitespace is stripped. +// URL returns the DBConnect URL represented by this DBConfig object, loading it +// from the file on disk. Leading and trailing whitespace is stripped. func (d *DBConfig) URL() (string, error) { - if d.DBConnectFile != "" { - url, err := ioutil.ReadFile(d.DBConnectFile) - return strings.TrimSpace(string(url)), err - } - return d.DBConnect, nil -} - -// DSNAddressAndUser returns the Address and User of the DBConnect DSN from -// this object. -func (d *DBConfig) DSNAddressAndUser() (string, string, error) { - dsnStr, err := d.URL() - if err != nil { - return "", "", fmt.Errorf("failed to load DBConnect URL: %s", err) - } - config, err := mysql.ParseDSN(dsnStr) - if err != nil { - return "", "", fmt.Errorf("failed to parse DSN from the DBConnect URL: %s", err) - } - return config.Addr, config.User, nil -} - -type SMTPConfig struct { - PasswordConfig - Server string - Port string - Username string + url, err := os.ReadFile(d.DBConnectFile) + return strings.TrimSpace(string(url)), err } // PAConfig specifies how a policy authority should connect to its // database, what policies it should enforce, and what challenges // it should offer. type PAConfig struct { - DBConfig - Challenges map[core.AcmeChallenge]bool + DBConfig `validate:"-"` + Challenges map[core.AcmeChallenge]bool `validate:"omitempty,dive,keys,oneof=http-01 dns-01 tls-alpn-01 dns-account-01,endkeys"` + Identifiers map[identifier.IdentifierType]bool `validate:"omitempty,dive,keys,oneof=dns ip,endkeys"` } // CheckChallenges checks whether the list of challenges in the PA config @@ -127,7 +106,18 @@ func (pc PAConfig) CheckChallenges() error { } for c := range pc.Challenges { if !c.IsValid() { - return fmt.Errorf("Invalid challenge in PA config: %s", c) + return fmt.Errorf("invalid challenge in PA config: %s", c) + } + } + return nil +} + +// CheckIdentifiers checks whether the list of identifiers in the PA config +// actually contains valid identifier type names +func (pc PAConfig) CheckIdentifiers() error { + for i := range pc.Identifiers { + if !i.IsValid() { + return fmt.Errorf("invalid identifier type in PA config: %s", i) } } return nil @@ -136,198 +126,445 @@ func (pc PAConfig) CheckChallenges() error { // HostnamePolicyConfig specifies a file from which to load a policy regarding // what hostnames to issue for. type HostnamePolicyConfig struct { - HostnamePolicyFile string + HostnamePolicyFile string `validate:"required"` } // TLSConfig represents certificates and a key for authenticated TLS. type TLSConfig struct { - CertFile *string - KeyFile *string - CACertFile *string + CertFile string `validate:"required"` + KeyFile string `validate:"required"` + // The CACertFile file may contain any number of root certificates and will + // be deduplicated internally. + CACertFile string `validate:"required"` } // Load reads and parses the certificates and key listed in the TLSConfig, and -// returns a *tls.Config suitable for either client or server use. -func (t *TLSConfig) Load() (*tls.Config, error) { +// returns a *tls.Config suitable for either client or server use. The +// CACertFile file may contain any number of root certificates and will be +// deduplicated internally. Prometheus metrics for various certificate fields +// will be exported. +func (t *TLSConfig) Load(scope prometheus.Registerer) (*tls.Config, error) { if t == nil { return nil, fmt.Errorf("nil TLS section in config") } - if t.CertFile == nil { + if t.CertFile == "" { return nil, fmt.Errorf("nil CertFile in TLSConfig") } - if t.KeyFile == nil { + if t.KeyFile == "" { return nil, fmt.Errorf("nil KeyFile in TLSConfig") } - if t.CACertFile == nil { + if t.CACertFile == "" { return nil, fmt.Errorf("nil CACertFile in TLSConfig") } - caCertBytes, err := ioutil.ReadFile(*t.CACertFile) + caCertBytes, err := os.ReadFile(t.CACertFile) if err != nil { - return nil, fmt.Errorf("reading CA cert from %q: %s", *t.CACertFile, err) + return nil, fmt.Errorf("reading CA cert from %q: %s", t.CACertFile, err) } rootCAs := x509.NewCertPool() if ok := rootCAs.AppendCertsFromPEM(caCertBytes); !ok { - return nil, fmt.Errorf("parsing CA certs from %s failed", *t.CACertFile) + return nil, fmt.Errorf("parsing CA certs from %s failed", t.CACertFile) } - cert, err := tls.LoadX509KeyPair(*t.CertFile, *t.KeyFile) + cert, err := tls.LoadX509KeyPair(t.CertFile, t.KeyFile) if err != nil { return nil, fmt.Errorf("loading key pair from %q and %q: %s", - *t.CertFile, *t.KeyFile, err) + t.CertFile, t.KeyFile, err) } + + tlsNotBefore := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "tlsconfig_notbefore_seconds", + Help: "TLS certificate NotBefore field expressed as Unix epoch time", + }, + []string{"serial"}) + err = scope.Register(tlsNotBefore) + if err != nil { + are := prometheus.AlreadyRegisteredError{} + if errors.As(err, &are) { + tlsNotBefore = are.ExistingCollector.(*prometheus.GaugeVec) + } else { + return nil, err + } + } + + tlsNotAfter := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "tlsconfig_notafter_seconds", + Help: "TLS certificate NotAfter field expressed as Unix epoch time", + }, + []string{"serial"}) + err = scope.Register(tlsNotAfter) + if err != nil { + are := prometheus.AlreadyRegisteredError{} + if errors.As(err, &are) { + tlsNotAfter = are.ExistingCollector.(*prometheus.GaugeVec) + } else { + return nil, err + } + } + + leaf, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return nil, err + } + + serial := leaf.SerialNumber.String() + tlsNotBefore.WithLabelValues(serial).Set(float64(leaf.NotBefore.Unix())) + tlsNotAfter.WithLabelValues(serial).Set(float64(leaf.NotAfter.Unix())) + return &tls.Config{ RootCAs: rootCAs, ClientCAs: rootCAs, ClientAuth: tls.RequireAndVerifyClientCert, Certificates: []tls.Certificate{cert}, - // Set the only acceptable TLS version to 1.2 and the only acceptable cipher suite - // to ECDHE-RSA-CHACHA20-POLY1305. - MinVersion: tls.VersionTLS12, - MaxVersion: tls.VersionTLS12, - CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305}, + // Set the only acceptable TLS to v1.3. + MinVersion: tls.VersionTLS13, }, nil } // SyslogConfig defines the config for syslogging. +// 3 means "error", 4 means "warning", 6 is "info" and 7 is "debug". +// Configuring a given level causes all messages at that level and below to +// be logged. type SyslogConfig struct { - StdoutLevel int - SyslogLevel int + // When absent or zero, this causes no logs to be emitted on stdout/stderr. + // Errors and warnings will be emitted on stderr if the configured level + // allows. + StdoutLevel int `validate:"min=-1,max=7"` + // When absent or zero, this defaults to logging all messages of level 6 + // or below. To disable syslog logging entirely, set this to -1. + SyslogLevel int `validate:"min=-1,max=7"` } -// ConfigDuration is just an alias for time.Duration that allows -// serialization to YAML as well as JSON. -type ConfigDuration struct { - time.Duration +// ServiceDomain contains the service and domain name the gRPC or bdns provider +// will use to construct a SRV DNS query to lookup backends. +type ServiceDomain struct { + // Service is the service name to be used for SRV lookups. For example: if + // record is 'foo.service.consul', then the Service is 'foo'. + Service string `validate:"required"` + + // Domain is the domain name to be used for SRV lookups. For example: if the + // record is 'foo.service.consul', then the Domain is 'service.consul'. + Domain string `validate:"required"` } -// ErrDurationMustBeString is returned when a non-string value is -// presented to be deserialized as a ConfigDuration -var ErrDurationMustBeString = errors.New("cannot JSON unmarshal something other than a string into a ConfigDuration") +// GRPCClientConfig contains the information necessary to setup a gRPC client +// connection. The following field combinations are allowed: +// +// ServerAddress, DNSAuthority, [Timeout], [HostOverride] +// SRVLookup, DNSAuthority, [Timeout], [HostOverride], [SRVResolver] +// SRVLookups, DNSAuthority, [Timeout], [HostOverride], [SRVResolver] +type GRPCClientConfig struct { + // DNSAuthority is a single : of the DNS server + // to be used for resolution of gRPC backends. If the address contains a + // hostname the gRPC client will resolve it via the system DNS. If the + // address contains a port, the client will use it directly, otherwise port + // 53 is used. + DNSAuthority string `validate:"required_with=SRVLookup SRVLookups,omitempty,ip|hostname|hostname_port"` + + // SRVLookup contains the service and domain name the gRPC client will use + // to construct a SRV DNS query to lookup backends. For example: if the + // resource record is 'foo.service.consul', then the 'Service' is 'foo' and + // the 'Domain' is 'service.consul'. The expected dNSName to be + // authenticated in the server certificate would be 'foo.service.consul'. + // + // Note: The 'proto' field of the SRV record MUST contain 'tcp' and the + // 'port' field MUST be a valid port. In a Consul configuration file you + // would specify 'foo.service.consul' as: + // + // services { + // id = "some-unique-id-1" + // name = "foo" + // address = "10.77.77.77" + // port = 8080 + // tags = ["tcp"] + // } + // services { + // id = "some-unique-id-2" + // name = "foo" + // address = "10.77.77.77" + // port = 8180 + // tags = ["tcp"] + // } + // + // If you've added the above to your Consul configuration file (and reloaded + // Consul) then you should be able to resolve the following dig query: + // + // $ dig @10.77.77.10 -t SRV _foo._tcp.service.consul +short + // 1 1 8080 0a585858.addr.dc1.consul. + // 1 1 8080 0a4d4d4d.addr.dc1.consul. + SRVLookup *ServiceDomain `validate:"required_without_all=SRVLookups ServerAddress"` + + // SRVLookups allows you to pass multiple SRV records to the gRPC client. + // The gRPC client will resolves each SRV record and use the results to + // construct a list of backends to connect to. For more details, see the + // documentation for the SRVLookup field. Note: while you can pass multiple + // targets to the gRPC client using this field, all of the targets will use + // the same HostOverride and TLS configuration. + SRVLookups []*ServiceDomain `validate:"required_without_all=SRVLookup ServerAddress"` + + // SRVResolver is an optional override to indicate that a specific + // implementation of the SRV resolver should be used. The default is 'srv' + // For more details, see the documentation in: + // grpc/internal/resolver/dns/dns_resolver.go. + SRVResolver string `validate:"excluded_with=ServerAddress,isdefault|oneof=srv nonce-srv"` + + // ServerAddress is a single : or `:` that + // the gRPC client will, if necessary, resolve via DNS and then connect to. + // If the address provided is 'foo.service.consul:8080' then the dNSName to + // be authenticated in the server certificate would be 'foo.service.consul'. + // + // In a Consul configuration file you would specify 'foo.service.consul' as: + // + // services { + // id = "some-unique-id-1" + // name = "foo" + // address = "10.77.77.77" + // } + // services { + // id = "some-unique-id-2" + // name = "foo" + // address = "10.88.88.88" + // } + // + // If you've added the above to your Consul configuration file (and reloaded + // Consul) then you should be able to resolve the following dig query: + // + // $ dig A @10.77.77.10 foo.service.consul +short + // 10.77.77.77 + // 10.88.88.88 + ServerAddress string `validate:"required_without_all=SRVLookup SRVLookups,omitempty,hostname_port"` + + // HostOverride is an optional override for the dNSName the client will + // verify in the certificate presented by the server. + HostOverride string `validate:"omitempty,hostname"` + Timeout config.Duration + + // NoWaitForReady turns off our (current) default of setting grpc.WaitForReady(true). + // This means if all of a GRPC client's backends are down, it will error immediately. + // The current default, grpc.WaitForReady(true), means that if all of a GRPC client's + // backends are down, it will wait until either one becomes available or the RPC + // times out. + NoWaitForReady bool +} -// UnmarshalJSON parses a string into a ConfigDuration using -// time.ParseDuration. If the input does not unmarshal as a -// string, then UnmarshalJSON returns ErrDurationMustBeString. -func (d *ConfigDuration) UnmarshalJSON(b []byte) error { - s := "" - err := json.Unmarshal(b, &s) - if err != nil { - var jsonUnmarshalTypeErr *json.UnmarshalTypeError - if errors.As(err, &jsonUnmarshalTypeErr) { - return ErrDurationMustBeString +// MakeTargetAndHostOverride constructs the target URI that the gRPC client will +// connect to and the hostname (only for 'ServerAddress' and 'SRVLookup') that +// will be validated during the mTLS handshake. An error is returned if the +// provided configuration is invalid. +func (c *GRPCClientConfig) MakeTargetAndHostOverride() (string, string, error) { + var hostOverride string + if c.ServerAddress != "" { + if c.SRVLookup != nil { + return "", "", errors.New( + "both 'serverAddress' and 'SRVLookup' in gRPC client config. Only one should be provided", + ) + } + // Lookup backends using DNS A records. + targetHost, _, err := net.SplitHostPort(c.ServerAddress) + if err != nil { + return "", "", err } - return err - } - dd, err := time.ParseDuration(s) - d.Duration = dd - return err -} -// MarshalJSON returns the string form of the duration, as a byte array. -func (d ConfigDuration) MarshalJSON() ([]byte, error) { - return []byte(d.Duration.String()), nil -} + hostOverride = targetHost + if c.HostOverride != "" { + hostOverride = c.HostOverride + } + return fmt.Sprintf("dns://%s/%s", c.DNSAuthority, c.ServerAddress), hostOverride, nil -// UnmarshalYAML uses the same format as JSON, but is called by the YAML -// parser (vs. the JSON parser). -func (d *ConfigDuration) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - err := unmarshal(&s) - if err != nil { - return err - } - dur, err := time.ParseDuration(s) - if err != nil { - return err - } + } else if c.SRVLookup != nil { + if c.DNSAuthority == "" { + return "", "", errors.New("field 'dnsAuthority' is required in gRPC client config with SRVLookup") + } + scheme, err := c.makeSRVScheme() + if err != nil { + return "", "", err + } + // Lookup backends using DNS SRV records. + targetHost := c.SRVLookup.Service + "." + c.SRVLookup.Domain - d.Duration = dur - return nil + hostOverride = targetHost + if c.HostOverride != "" { + hostOverride = c.HostOverride + } + return fmt.Sprintf("%s://%s/%s", scheme, c.DNSAuthority, targetHost), hostOverride, nil + + } else if c.SRVLookups != nil { + if c.DNSAuthority == "" { + return "", "", errors.New("field 'dnsAuthority' is required in gRPC client config with SRVLookups") + } + scheme, err := c.makeSRVScheme() + if err != nil { + return "", "", err + } + // Lookup backends using multiple DNS SRV records. + var targetHosts []string + for _, s := range c.SRVLookups { + targetHosts = append(targetHosts, s.Service+"."+s.Domain) + } + if c.HostOverride != "" { + hostOverride = c.HostOverride + } + return fmt.Sprintf("%s://%s/%s", scheme, c.DNSAuthority, strings.Join(targetHosts, ",")), hostOverride, nil + + } else { + return "", "", errors.New( + "at least one of 'serverAddress', 'SRVLookup', or 'SRVLookups' required in gRPC client config", + ) + } } -// GRPCClientConfig contains the information needed to talk to the gRPC service -type GRPCClientConfig struct { - ServerAddress string - Timeout ConfigDuration +// makeSRVScheme returns the scheme to use for SRV lookups. If the SRVResolver +// field is empty, it returns "srv". Otherwise it checks that the specified +// SRVResolver is registered with the gRPC runtime and returns it. +func (c *GRPCClientConfig) makeSRVScheme() (string, error) { + if c.SRVResolver == "" { + return "srv", nil + } + rb := resolver.Get(c.SRVResolver) + if rb == nil { + return "", fmt.Errorf("resolver %q is not registered", c.SRVResolver) + } + return c.SRVResolver, nil } -// GRPCServerConfig contains the information needed to run a gRPC service +// GRPCServerConfig contains the information needed to start a gRPC server. type GRPCServerConfig struct { - Address string `json:"address"` - // ClientNames is a list of allowed client certificate subject alternate names - // (SANs). The server will reject clients that do not present a certificate - // with a SAN present on the `ClientNames` list. - ClientNames []string `json:"clientNames"` + Address string `json:"address" validate:"omitempty,hostname_port"` + // Services is a map of service names to configuration specific to that service. + // These service names must match the service names advertised by gRPC itself, + // which are identical to the names set in our gRPC .proto files prefixed by + // the package names set in those files (e.g. "ca.CertificateAuthority"). + Services map[string]*GRPCServiceConfig `json:"services" validate:"required,dive,required"` // MaxConnectionAge specifies how long a connection may live before the server sends a GoAway to the // client. Because gRPC connections re-resolve DNS after a connection close, // this controls how long it takes before a client learns about changes to its // backends. // https://pkg.go.dev/google.golang.org/grpc/keepalive#ServerParameters - MaxConnectionAge ConfigDuration + MaxConnectionAge config.Duration `validate:"required"` } -// PortConfig specifies what ports the VA should call to on the remote -// host when performing its checks. -type PortConfig struct { - HTTPPort int - HTTPSPort int - TLSPort int +// GRPCServiceConfig contains the information needed to configure a gRPC service. +type GRPCServiceConfig struct { + // ClientNames is the list of accepted gRPC client certificate SANs. + // Connections from clients not in this list will be rejected by the + // upstream listener, and RPCs from unlisted clients will be denied by the + // server interceptor. + ClientNames []string `json:"clientNames" validate:"min=1,dive,hostname,required"` } -// BeelineConfig provides config options for the Honeycomb beeline-go library, -// which are passed to its beeline.Init() method. -type BeelineConfig struct { - // WriteKey is the API key needed to send data Honeycomb. This can be given - // directly in the JSON config for local development, or as a path to a - // separate file for production deployment. - WriteKey PasswordConfig - // Dataset is the event collection, e.g. Staging or Prod. - Dataset string - // SampleRate is the (positive integer) denominator of the sample rate. - // Default: 1 (meaning all traces are sent). Set higher to send fewer traces. - SampleRate uint32 - // Mute disables honeycomb entirely; useful in test environments. - Mute bool - // Many other fields of beeline.Config are omitted as they are not yet used. +// OpenTelemetryConfig configures tracing via OpenTelemetry. +// To enable tracing, set a nonzero SampleRatio and configure an Endpoint +type OpenTelemetryConfig struct { + // Endpoint to connect to with the OTLP protocol over gRPC. + // It should be of the form "localhost:4317" + // + // It always connects over plaintext, and so is only intended to connect + // to a local OpenTelemetry collector. This should not be used over an + // insecure network. + Endpoint string + + // SampleRatio is the ratio of new traces to head sample. + // This only affects new traces without a parent with its own sampling + // decision, and otherwise use the parent's sampling decision. + // + // Set to something between 0 and 1, where 1 is sampling all traces. + // This is primarily meant as a pressure relief if the Endpoint we connect to + // is being overloaded, and we otherwise handle sampling in the collectors. + // See otel trace.ParentBased and trace.TraceIDRatioBased for details. + SampleRatio float64 } -// makeSampler constructs a SamplerHook which will deterministically decide if -// any given span should be sampled based on its TraceID, which is shared by all -// spans within a trace. If a trace_id can't be found, the span will be sampled. -// A sample rate of 0 defaults to a sample rate of 1 (i.e. all events are sent). -func makeSampler(rate uint32) func(fields map[string]interface{}) (bool, int) { - if rate == 0 { - rate = 1 - } - upperBound := math.MaxUint32 / rate +// OpenTelemetryHTTPConfig configures the otelhttp server tracing. +type OpenTelemetryHTTPConfig struct { + // TrustIncomingSpans should only be set true if there's a trusted service + // connecting to Boulder, such as a load balancer that's tracing-aware. + // If false, the default, incoming traces won't be set as the parent. + // See otelhttp.WithPublicEndpoint + TrustIncomingSpans bool +} - return func(fields map[string]interface{}) (bool, int) { - id, ok := fields["trace.trace_id"].(string) - if !ok { - return true, 1 - } - h := fnv.New32() - h.Write([]byte(id)) - return h.Sum32() < upperBound, int(rate) +// Options returns the otelhttp options for this configuration. They can be +// passed to otelhttp.NewHandler or Boulder's wrapper, measured_http.New. +func (c *OpenTelemetryHTTPConfig) Options() []otelhttp.Option { + var options []otelhttp.Option + if !c.TrustIncomingSpans { + options = append(options, otelhttp.WithPublicEndpoint()) } + return options +} + +// DNSProvider contains the configuration for a DNS provider in the bdns package +// which supports dynamic reloading of its backends. +type DNSProvider struct { + // DNSAuthority is the single : of the DNS + // server to be used for resolution of DNS backends. If the address contains + // a hostname it will be resolved via the system DNS. If the port is left + // unspecified it will default to '53'. If this field is left unspecified + // the system DNS will be used for resolution of DNS backends. + DNSAuthority string `validate:"required,ip|hostname|hostname_port"` + + // SRVLookup contains the service and domain name used to construct a SRV + // DNS query to lookup DNS backends. 'Domain' is required. 'Service' is + // optional and will be defaulted to 'dns' if left unspecified. + // + // Usage: If the resource record is 'unbound.service.consul', then the + // 'Service' is 'unbound' and the 'Domain' is 'service.consul'. The expected + // dNSName to be authenticated in the server certificate would be + // 'unbound.service.consul'. The 'proto' field of the SRV record MUST + // contain 'udp' and the 'port' field MUST be a valid port. In a Consul + // configuration file you would specify 'unbound.service.consul' as: + // + // services { + // id = "unbound-1" // Must be unique + // name = "unbound" + // address = "10.77.77.77" + // port = 8053 + // tags = ["udp"] + // } + // + // services { + // id = "unbound-2" // Must be unique + // name = "unbound" + // address = "10.77.77.77" + // port = 8153 + // tags = ["udp"] + // } + // + // If you've added the above to your Consul configuration file (and reloaded + // Consul) then you should be able to resolve the following dig query: + // + // $ dig @10.77.77.10 -t SRV _unbound._udp.service.consul +short + // 1 1 8053 0a4d4d4d.addr.dc1.consul. + // 1 1 8153 0a4d4d4d.addr.dc1.consul. + SRVLookup ServiceDomain `validate:"required"` } -// Load converts a BeelineConfig to a beeline.Config, loading the api WriteKey -// and setting the ServiceName automatically. -func (bc *BeelineConfig) Load() (beeline.Config, error) { - exec, err := os.Executable() +// HMACKeyConfig specifies a path to a file containing a hexadecimal-encoded +// HMAC key. The key must represent exactly 256 bits (32 bytes) of random data +// to be suitable for use as a 256-bit hashing key (e.g., the output of `openssl +// rand -hex 32`). +type HMACKeyConfig struct { + KeyFile string `validate:"required"` +} + +// Load reads the HMAC key from the file, decodes it from hexadecimal, ensures +// it represents exactly 256 bits (32 bytes), and returns it as a byte slice. +func (hc *HMACKeyConfig) Load() ([]byte, error) { + contents, err := os.ReadFile(hc.KeyFile) if err != nil { - return beeline.Config{}, fmt.Errorf("failed to get executable name: %w", err) + return nil, err } - writekey, err := bc.WriteKey.Pass() + decoded, err := hex.DecodeString(strings.TrimSpace(string(contents))) if err != nil { - return beeline.Config{}, fmt.Errorf("failed to get write key: %w", err) + return nil, fmt.Errorf("invalid hexadecimal encoding: %w", err) } - return beeline.Config{ - WriteKey: writekey, - Dataset: bc.Dataset, - ServiceName: path.Base(exec), - SamplerHook: makeSampler(bc.SampleRate), - Mute: bc.Mute, - }, nil + if len(decoded) != 32 { + return nil, fmt.Errorf( + "validating HMAC key, must be exactly 256 bits (32 bytes) after decoding, got %d", + len(decoded), + ) + } + return decoded, nil } diff --git a/cmd/config_test.go b/cmd/config_test.go index 528b418737d..2935889b507 100644 --- a/cmd/config_test.go +++ b/cmd/config_test.go @@ -1,11 +1,21 @@ package cmd import ( - "fmt" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "os" + "path" "regexp" "strings" "testing" + "time" + "github.com/letsencrypt/boulder/metrics" "github.com/letsencrypt/boulder/test" ) @@ -52,42 +62,71 @@ func TestPasswordConfig(t *testing.T) { func TestTLSConfigLoad(t *testing.T) { null := "/dev/null" nonExistent := "[nonexistent]" - cert := "testdata/cert.pem" - key := "testdata/key.pem" - caCert := "testdata/minica.pem" + tmp := t.TempDir() + cert := path.Join(tmp, "TestTLSConfigLoad.cert.pem") + key := path.Join(tmp, "TestTLSConfigLoad.key.pem") + caCert := path.Join(tmp, "TestTLSConfigLoad.cacert.pem") + + rootKey, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + test.AssertNotError(t, err, "creating test root key") + rootTemplate := &x509.Certificate{ + Subject: pkix.Name{CommonName: "test root"}, + SerialNumber: big.NewInt(12345), + NotBefore: time.Now().Add(-24 * time.Hour), + NotAfter: time.Now().Add(24 * time.Hour), + IsCA: true, + } + rootCert, err := x509.CreateCertificate(rand.Reader, rootTemplate, rootTemplate, rootKey.Public(), rootKey) + test.AssertNotError(t, err, "creating test root cert") + err = os.WriteFile(caCert, pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: rootCert}), os.ModeAppend) + test.AssertNotError(t, err, "writing test root cert to disk") + + intKey, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + test.AssertNotError(t, err, "creating test intermediate key") + intKeyBytes, err := x509.MarshalECPrivateKey(intKey) + test.AssertNotError(t, err, "marshalling test intermediate key") + err = os.WriteFile(key, pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: intKeyBytes}), os.ModeAppend) + test.AssertNotError(t, err, "writing test intermediate key cert to disk") + + intTemplate := &x509.Certificate{ + Subject: pkix.Name{CommonName: "test intermediate"}, + SerialNumber: big.NewInt(67890), + NotBefore: time.Now().Add(-12 * time.Hour), + NotAfter: time.Now().Add(12 * time.Hour), + IsCA: true, + } + intCert, err := x509.CreateCertificate(rand.Reader, intTemplate, rootTemplate, intKey.Public(), rootKey) + test.AssertNotError(t, err, "creating test intermediate cert") + err = os.WriteFile(cert, pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: intCert}), os.ModeAppend) + test.AssertNotError(t, err, "writing test intermediate cert to disk") + testCases := []struct { TLSConfig want string }{ - {TLSConfig{nil, &null, &null}, "nil CertFile in TLSConfig"}, - {TLSConfig{&null, nil, &null}, "nil KeyFile in TLSConfig"}, - {TLSConfig{&null, &null, nil}, "nil CACertFile in TLSConfig"}, - {TLSConfig{&nonExistent, &key, &caCert}, "loading key pair.*no such file or directory"}, - {TLSConfig{&cert, &nonExistent, &caCert}, "loading key pair.*no such file or directory"}, - {TLSConfig{&cert, &key, &nonExistent}, "reading CA cert from.*no such file or directory"}, - {TLSConfig{&null, &key, &caCert}, "loading key pair.*failed to find any PEM data"}, - {TLSConfig{&cert, &null, &caCert}, "loading key pair.*failed to find any PEM data"}, - {TLSConfig{&cert, &key, &null}, "parsing CA certs"}, + {TLSConfig{"", null, null}, "nil CertFile in TLSConfig"}, + {TLSConfig{null, "", null}, "nil KeyFile in TLSConfig"}, + {TLSConfig{null, null, ""}, "nil CACertFile in TLSConfig"}, + {TLSConfig{nonExistent, key, caCert}, "loading key pair.*no such file or directory"}, + {TLSConfig{cert, nonExistent, caCert}, "loading key pair.*no such file or directory"}, + {TLSConfig{cert, key, nonExistent}, "reading CA cert from.*no such file or directory"}, + {TLSConfig{null, key, caCert}, "loading key pair.*failed to find any PEM data"}, + {TLSConfig{cert, null, caCert}, "loading key pair.*failed to find any PEM data"}, + {TLSConfig{cert, key, null}, "parsing CA certs"}, + {TLSConfig{cert, key, caCert}, ""}, } for _, tc := range testCases { - var title [3]string - if tc.CertFile == nil { - title[0] = "nil" - } else { - title[0] = *tc.CertFile - } - if tc.KeyFile == nil { - title[1] = "nil" - } else { - title[1] = *tc.KeyFile - } - if tc.CACertFile == nil { - title[2] = "nil" - } else { - title[2] = *tc.CACertFile + title := [3]string{tc.CertFile, tc.KeyFile, tc.CACertFile} + for i := range title { + if title[i] == "" { + title[i] = "nil" + } } t.Run(strings.Join(title[:], "_"), func(t *testing.T) { - _, err := tc.TLSConfig.Load() + _, err := tc.TLSConfig.Load(metrics.NoopRegisterer) + if err == nil && tc.want == "" { + return + } if err == nil { t.Errorf("got no error") } @@ -98,35 +137,57 @@ func TestTLSConfigLoad(t *testing.T) { } } -func TestSampler(t *testing.T) { - testCases := []struct { - samplerate uint32 - span map[string]interface{} - sampled bool - rate int +func TestHMACKeyConfigLoad(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + content string + expectedErr bool }{ - // At sample rate 1, both of these should get sampled. - {1, map[string]interface{}{"trace.trace_id": "foo"}, true, 1}, - {1, map[string]interface{}{"trace.trace_id": ""}, true, 1}, - // At sample rate 0, it should behave the same as sample rate 1. - {0, map[string]interface{}{"trace.trace_id": "foo"}, true, 1}, - {0, map[string]interface{}{"trace.trace_id": ""}, true, 1}, - // At sample rate 2, only one of these should be sampled. - {2, map[string]interface{}{"trace.trace_id": "foo"}, true, 2}, - {2, map[string]interface{}{"trace.trace_id": ""}, false, 2}, - // At sample rate 100, neither of these should be sampled. - {100, map[string]interface{}{"trace.trace_id": "foo"}, false, 100}, - {100, map[string]interface{}{"trace.trace_id": ""}, false, 100}, - // A missing or non-string trace_id should result in sampling. - {100, map[string]interface{}{}, true, 1}, - {100, map[string]interface{}{"trace.trace_id": 123}, true, 1}, + { + name: "Valid key", + content: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + expectedErr: false, + }, + { + name: "Empty file", + content: "", + expectedErr: true, + }, + { + name: "Just under 256-bit", + content: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789ab", + expectedErr: true, + }, + { + name: "Just over 256-bit", + content: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01", + expectedErr: true, + }, } - for _, tc := range testCases { - t.Run(fmt.Sprintf("Rate(%d) Span(%s)", tc.samplerate, tc.span), func(t *testing.T) { - s := makeSampler(tc.samplerate) - b, i := s(tc.span) - test.AssertEquals(t, b, tc.sampled) - test.AssertEquals(t, i, tc.rate) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + tempKeyFile, err := os.CreateTemp("", "*") + if err != nil { + t.Fatalf("failed to create temp file: %v", err) + } + defer os.Remove(tempKeyFile.Name()) + + _, err = tempKeyFile.WriteString(tt.content) + if err != nil { + t.Fatalf("failed to write to temp file: %v", err) + } + tempKeyFile.Close() + + hmacKeyConfig := HMACKeyConfig{KeyFile: tempKeyFile.Name()} + _, err = hmacKeyConfig.Load() + if (err != nil) != tt.expectedErr { + t.Errorf("expected error: %v, got: %v", tt.expectedErr, err) + } }) } } diff --git a/cmd/contact-auditor/README.md b/cmd/contact-auditor/README.md deleted file mode 100644 index 39083c894dd..00000000000 --- a/cmd/contact-auditor/README.md +++ /dev/null @@ -1,84 +0,0 @@ -# Contact-Auditor - -Audits subscriber registrations for e-mail addresses that -`notify-mailer` is currently configured to skip. - -# Usage: - -```shell - -config string - File containing a JSON config. - -to-file - Write the audit results to a file. - -to-stdout - Print the audit results to stdout. -``` - -## Results format: - -``` - "" "" -``` - -## Example output: - -### Successful run with no violations encountered and `--to-file`: - -``` -I004823 contact-auditor nfWK_gM Running contact-auditor -I004823 contact-auditor qJ_zsQ4 Beginning database query -I004823 contact-auditor je7V9QM Query completed successfully -I004823 contact-auditor 7LzGvQI Audit finished successfully -I004823 contact-auditor 5Pbk_QM Audit results were written to: audit-2006-01-02T15:04.tsv -``` - -### Contact contains entries that violate policy and `--to-stdout`: - -``` -I004823 contact-auditor nfWK_gM Running contact-auditor -I004823 contact-auditor qJ_zsQ4 Beginning database query -I004823 contact-auditor je7V9QM Query completed successfully -1 2006-01-02 15:04:05 validation "" "" -... -I004823 contact-auditor 2fv7-QY Audit finished successfully -``` - -### Contact is not valid JSON and `--to-stdout`: - -``` -I004823 contact-auditor nfWK_gM Running contact-auditor -I004823 contact-auditor qJ_zsQ4 Beginning database query -I004823 contact-auditor je7V9QM Query completed successfully -3 2006-01-02 15:04:05 unmarshal "" "" -... -I004823 contact-auditor 2fv7-QY Audit finished successfully -``` - -### Audit incomplete, query ended prematurely: - -``` -I004823 contact-auditor nfWK_gM Running contact-auditor -I004823 contact-auditor qJ_zsQ4 Beginning database query -... -E004823 contact-auditor 8LmTgww [AUDIT] Audit was interrupted, results may be incomplete: -exit status 1 -``` - -# Configuration file: -The path to a database config file like the one below must be provided -following the `-config` flag. - -```json -{ - "contactAuditor": { - "db": { - "dbConnectFile": , - "maxOpenConns": , - "maxIdleConns": , - "connMaxLifetime": , - "connMaxIdleTime": - } - } - } - -``` diff --git a/cmd/contact-auditor/main.go b/cmd/contact-auditor/main.go deleted file mode 100644 index e595af473ea..00000000000 --- a/cmd/contact-auditor/main.go +++ /dev/null @@ -1,206 +0,0 @@ -package notmain - -import ( - "database/sql" - "encoding/json" - "errors" - "flag" - "fmt" - "io/ioutil" - "os" - "strings" - "time" - - "github.com/letsencrypt/boulder/cmd" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/policy" - "github.com/letsencrypt/boulder/sa" -) - -type contactAuditor struct { - db *sql.DB - resultsFile *os.File - writeToStdout bool - logger blog.Logger -} - -type result struct { - id int64 - contacts []string - createdAt string -} - -func unmarshalContact(contact []byte) ([]string, error) { - var contacts []string - err := json.Unmarshal(contact, &contacts) - if err != nil { - return nil, err - } - return contacts, nil -} - -func validateContacts(id int64, createdAt string, contacts []string) error { - // Setup a buffer to store any validation problems we encounter. - var probsBuff strings.Builder - - // Helper to write validation problems to our buffer. - writeProb := func(contact string, prob string) { - // Add validation problem to buffer. - fmt.Fprintf(&probsBuff, "%d\t%s\tvalidation\t%q\t%q\n", id, createdAt, contact, prob) - } - - for _, contact := range contacts { - if strings.HasPrefix(contact, "mailto:") { - err := policy.ValidEmail(strings.TrimPrefix(contact, "mailto:")) - if err != nil { - writeProb(contact, err.Error()) - } - } else { - writeProb(contact, "missing 'mailto:' prefix") - } - } - - if probsBuff.Len() != 0 { - return errors.New(probsBuff.String()) - } - return nil -} - -// beginAuditQuery executes the audit query and returns a cursor used to -// stream the results. -func (c contactAuditor) beginAuditQuery() (*sql.Rows, error) { - rows, err := c.db.Query( - `SELECT DISTINCT r.id, r.contact, r.createdAt - FROM registrations AS r - INNER JOIN certificates AS c on c.registrationID = r.id - WHERE r.contact NOT IN ('[]', 'null');`) - if err != nil { - return nil, err - } - return rows, nil -} - -func (c contactAuditor) writeResults(result string) { - if c.writeToStdout { - _, err := fmt.Print(result) - if err != nil { - c.logger.Errf("Error while writing result to stdout: %s", err) - } - } - - if c.resultsFile != nil { - _, err := c.resultsFile.WriteString(result) - if err != nil { - c.logger.Errf("Error while writing result to file: %s", err) - } - } -} - -// run retrieves a cursor from `beginAuditQuery` and then audits the -// `contact` column of all returned rows for abnormalities or policy -// violations. -func (c contactAuditor) run(resChan chan *result) error { - c.logger.Infof("Beginning database query") - rows, err := c.beginAuditQuery() - if err != nil { - return err - } - - for rows.Next() { - var id int64 - var contact []byte - var createdAt string - err := rows.Scan(&id, &contact, &createdAt) - if err != nil { - return err - } - - contacts, err := unmarshalContact(contact) - if err != nil { - c.writeResults(fmt.Sprintf("%d\t%s\tunmarshal\t%q\t%q\n", id, createdAt, contact, err)) - } - - err = validateContacts(id, createdAt, contacts) - if err != nil { - c.writeResults(err.Error()) - } - - // Only used for testing. - if resChan != nil { - resChan <- &result{id, contacts, createdAt} - } - } - // Ensure the query wasn't interrupted before it could complete. - err = rows.Close() - if err != nil { - return err - } else { - c.logger.Info("Query completed successfully") - } - - // Only used for testing. - if resChan != nil { - close(resChan) - } - - return nil -} - -type Config struct { - ContactAuditor struct { - DB cmd.DBConfig - } -} - -func main() { - configFile := flag.String("config", "", "File containing a JSON config.") - writeToStdout := flag.Bool("to-stdout", false, "Print the audit results to stdout.") - writeToFile := flag.Bool("to-file", false, "Write the audit results to a file.") - flag.Parse() - - logger := cmd.NewLogger(cmd.SyslogConfig{StdoutLevel: 7}) - - // Load config from JSON. - configData, err := ioutil.ReadFile(*configFile) - cmd.FailOnError(err, fmt.Sprintf("Error reading config file: %q", *configFile)) - - var cfg Config - err = json.Unmarshal(configData, &cfg) - cmd.FailOnError(err, "Couldn't unmarshal config") - - db, err := sa.InitSqlDb(cfg.ContactAuditor.DB, nil) - cmd.FailOnError(err, "Couldn't setup database client") - - var resultsFile *os.File - if *writeToFile { - resultsFile, err = os.Create( - fmt.Sprintf("contact-audit-%s.tsv", time.Now().Format("2006-01-02T15:04")), - ) - cmd.FailOnError(err, "Failed to create results file") - } - - // Setup and run contact-auditor. - auditor := contactAuditor{ - db: db, - resultsFile: resultsFile, - writeToStdout: *writeToStdout, - logger: logger, - } - - logger.Info("Running contact-auditor") - - err = auditor.run(nil) - cmd.FailOnError(err, "Audit was interrupted, results may be incomplete") - - logger.Info("Audit finished successfully") - - if *writeToFile { - logger.Infof("Audit results were written to: %s", resultsFile.Name()) - resultsFile.Close() - } - -} - -func init() { - cmd.RegisterCommand("contact-auditor", main) -} diff --git a/cmd/contact-auditor/main_test.go b/cmd/contact-auditor/main_test.go deleted file mode 100644 index cd78af3fa96..00000000000 --- a/cmd/contact-auditor/main_test.go +++ /dev/null @@ -1,391 +0,0 @@ -package notmain - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" - "fmt" - "io/ioutil" - "math/big" - "net" - "os" - "strings" - "testing" - "time" - - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/core" - corepb "github.com/letsencrypt/boulder/core/proto" - "github.com/letsencrypt/boulder/db" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/sa" - "github.com/letsencrypt/boulder/test" - "github.com/letsencrypt/boulder/test/vars" -) - -var ( - regA *corepb.Registration - regB *corepb.Registration - regC *corepb.Registration - regD *corepb.Registration -) - -const ( - emailARaw = "test@example.com" - emailBRaw = "example@notexample.com" - emailCRaw = "test-example@notexample.com" - telNum = "666-666-7777" -) - -func TestMailAuditor(t *testing.T) { - testCtx := setup(t) - defer testCtx.cleanUp() - - // Add some test registrations. - testCtx.addRegistrations(t) - - // Should be 0 since we haven't added registrations. - resChan := make(chan *result, 10) - err := testCtx.c.run(resChan) - test.AssertNotError(t, err, "received error") - test.AssertEquals(t, len(resChan), 0) -} - -func TestMailAuditorWithResults(t *testing.T) { - testCtx := setup(t) - defer testCtx.cleanUp() - - // Add some test registrations. - testCtx.addRegistrations(t) - - // Now add some certificates. - testCtx.addCertificates(t) - - resChan := make(chan *result, 10) - err := testCtx.c.run(resChan) - test.AssertNotError(t, err, "received error") - - // We should get back A, B, C, and D - test.AssertEquals(t, len(resChan), 4) - for entry := range resChan { - err := validateContacts(entry.id, entry.createdAt, entry.contacts) - switch entry.id { - case regA.Id: - // Contact validation policy sad path. - test.AssertDeepEquals(t, entry.contacts, []string{"mailto:test@example.com"}) - test.AssertError(t, err, "failed to error on a contact that violates our e-mail policy") - case regB.Id: - // Ensure grace period was respected. - test.AssertDeepEquals(t, entry.contacts, []string{"mailto:example@notexample.com"}) - test.AssertNotError(t, err, "received error for a valid contact entry") - case regC.Id: - // Contact validation happy path. - test.AssertDeepEquals(t, entry.contacts, []string{"mailto:test-example@notexample.com"}) - test.AssertNotError(t, err, "received error for a valid contact entry") - - // Unmarshal Contact sad path. - _, err := unmarshalContact([]byte("[ mailto:test@example.com ]")) - test.AssertError(t, err, "failed to error while unmarshaling invalid Contact JSON") - - // Fix our JSON and ensure that the contact field returns - // errors for our 2 additional contacts - contacts, err := unmarshalContact([]byte(`[ "mailto:test@example.com", "tel:666-666-7777" ]`)) - test.AssertNotError(t, err, "received error while unmarshaling valid Contact JSON") - - // Ensure Contact validation now fails. - err = validateContacts(entry.id, entry.createdAt, contacts) - test.AssertError(t, err, "failed to error on 2 invalid Contact entries") - case regD.Id: - test.AssertDeepEquals(t, entry.contacts, []string{"tel:666-666-7777"}) - test.AssertError(t, err, "failed to error on an invalid contact entry") - default: - t.Errorf("ID: %d was not expected", entry.id) - } - } - - // Load results file. - data, err := ioutil.ReadFile(testCtx.c.resultsFile.Name()) - if err != nil { - t.Error(err) - } - - // Results file should contain 2 newlines, 1 for each result. - contentLines := strings.Split(strings.TrimRight(string(data), "\n"), "\n") - test.AssertEquals(t, len(contentLines), 2) - - // Each result entry should contain five tab separated columns. - for _, line := range contentLines { - test.AssertEquals(t, len(strings.Split(line, "\t")), 5) - } -} - -type testCtx struct { - c contactAuditor - dbMap *db.WrappedMap - ssa *sa.SQLStorageAuthority - cleanUp func() -} - -func (tc testCtx) addRegistrations(t *testing.T) { - emailA := "mailto:" + emailARaw - emailB := "mailto:" + emailBRaw - emailC := "mailto:" + emailCRaw - tel := "tel:" + telNum - - // Every registration needs a unique JOSE key - jsonKeyA := []byte(`{ - "kty":"RSA", - "n":"0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw", - "e":"AQAB" -}`) - jsonKeyB := []byte(`{ - "kty":"RSA", - "n":"z8bp-jPtHt4lKBqepeKF28g_QAEOuEsCIou6sZ9ndsQsEjxEOQxQ0xNOQezsKa63eogw8YS3vzjUcPP5BJuVzfPfGd5NVUdT-vSSwxk3wvk_jtNqhrpcoG0elRPQfMVsQWmxCAXCVRz3xbcFI8GTe-syynG3l-g1IzYIIZVNI6jdljCZML1HOMTTW4f7uJJ8mM-08oQCeHbr5ejK7O2yMSSYxW03zY-Tj1iVEebROeMv6IEEJNFSS4yM-hLpNAqVuQxFGetwtwjDMC1Drs1dTWrPuUAAjKGrP151z1_dE74M5evpAhZUmpKv1hY-x85DC6N0hFPgowsanmTNNiV75w", - "e":"AAEAAQ" -}`) - jsonKeyC := []byte(`{ - "kty":"RSA", - "n":"rFH5kUBZrlPj73epjJjyCxzVzZuV--JjKgapoqm9pOuOt20BUTdHqVfC2oDclqM7HFhkkX9OSJMTHgZ7WaVqZv9u1X2yjdx9oVmMLuspX7EytW_ZKDZSzL-sCOFCuQAuYKkLbsdcA3eHBK_lwc4zwdeHFMKIulNvLqckkqYB9s8GpgNXBDIQ8GjR5HuJke_WUNjYHSd8jY1LU9swKWsLQe2YoQUz_ekQvBvBCoaFEtrtRaSJKNLIVDObXFr2TLIiFiM0Em90kK01-eQ7ZiruZTKomll64bRFPoNo4_uwubddg3xTqur2vdF3NyhTrYdvAgTem4uC0PFjEQ1bK_djBQ", - "e":"AQAB" -}`) - jsonKeyD := []byte(`{ - "kty":"RSA", - "n":"rFH5kUBZrlPj73epjJjyCxzVzZuV--JjKgapoqm9pOuOt20BUTdHqVfC2oDclqM7HFhkkX9OSJMTHgZ7WaVqZv9u1X2yjdx9oVmMLuspX7EytW_ZKDZSzL-FCOFCuQAuYKkLbsdcA3eHBK_lwc4zwdeHFMKIulNvLqckkqYB9s8GpgNXBDIQ8GjR5HuJke_WUNjYHSd8jY1LU9swKWsLQe2YoQUz_ekQvBvBCoaFEtrtRaSJKNLIVDObXFr2TLIiFiM0Em90kK01-eQ7ZiruZTKomll64bRFPoNo4_uwubddg3xTqur2vdF3NyhTrYdvAgTem4uC0PFjEQ1bK_djBQ", - "e":"AQAB" -}`) - - initialIP, err := net.ParseIP("127.0.0.1").MarshalText() - test.AssertNotError(t, err, "Couldn't create initialIP") - - regA = &corepb.Registration{ - Id: 1, - Contact: []string{emailA}, - Key: jsonKeyA, - InitialIP: initialIP, - } - regB = &corepb.Registration{ - Id: 2, - Contact: []string{emailB}, - Key: jsonKeyB, - InitialIP: initialIP, - } - regC = &corepb.Registration{ - Id: 3, - Contact: []string{emailC}, - Key: jsonKeyC, - InitialIP: initialIP, - } - // Reg D has a `tel:` contact ACME URL - regD = &corepb.Registration{ - Id: 4, - Contact: []string{tel}, - Key: jsonKeyD, - InitialIP: initialIP, - } - - // Add the four test registrations - ctx := context.Background() - regA, err = tc.ssa.NewRegistration(ctx, regA) - test.AssertNotError(t, err, "Couldn't store regA") - regB, err = tc.ssa.NewRegistration(ctx, regB) - test.AssertNotError(t, err, "Couldn't store regB") - regC, err = tc.ssa.NewRegistration(ctx, regC) - test.AssertNotError(t, err, "Couldn't store regC") - regD, err = tc.ssa.NewRegistration(ctx, regD) - test.AssertNotError(t, err, "Couldn't store regD") -} - -func (tc testCtx) addCertificates(t *testing.T) { - serial1 := big.NewInt(1336) - serial1String := core.SerialToString(serial1) - serial2 := big.NewInt(1337) - serial2String := core.SerialToString(serial2) - serial3 := big.NewInt(1338) - serial3String := core.SerialToString(serial3) - serial4 := big.NewInt(1339) - serial4String := core.SerialToString(serial4) - n := bigIntFromB64("n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw==") - e := intFromB64("AQAB") - d := bigIntFromB64("bWUC9B-EFRIo8kpGfh0ZuyGPvMNKvYWNtB_ikiH9k20eT-O1q_I78eiZkpXxXQ0UTEs2LsNRS-8uJbvQ-A1irkwMSMkK1J3XTGgdrhCku9gRldY7sNA_AKZGh-Q661_42rINLRCe8W-nZ34ui_qOfkLnK9QWDDqpaIsA-bMwWWSDFu2MUBYwkHTMEzLYGqOe04noqeq1hExBTHBOBdkMXiuFhUq1BU6l-DqEiWxqg82sXt2h-LMnT3046AOYJoRioz75tSUQfGCshWTBnP5uDjd18kKhyv07lhfSJdrPdM5Plyl21hsFf4L_mHCuoFau7gdsPfHPxxjVOcOpBrQzwQ==") - p := bigIntFromB64("uKE2dh-cTf6ERF4k4e_jy78GfPYUIaUyoSSJuBzp3Cubk3OCqs6grT8bR_cu0Dm1MZwWmtdqDyI95HrUeq3MP15vMMON8lHTeZu2lmKvwqW7anV5UzhM1iZ7z4yMkuUwFWoBvyY898EXvRD-hdqRxHlSqAZ192zB3pVFJ0s7pFc=") - q := bigIntFromB64("uKE2dh-cTf6ERF4k4e_jy78GfPYUIaUyoSSJuBzp3Cubk3OCqs6grT8bR_cu0Dm1MZwWmtdqDyI95HrUeq3MP15vMMON8lHTeZu2lmKvwqW7anV5UzhM1iZ7z4yMkuUwFWoBvyY898EXvRD-hdqRxHlSqAZ192zB3pVFJ0s7pFc=") - - testKey := rsa.PrivateKey{ - PublicKey: rsa.PublicKey{N: n, E: e}, - D: d, - Primes: []*big.Int{p, q}, - } - - fc := newFakeClock(t) - - // Add one cert for RegA that expires in 30 days - rawCertA := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy A", - }, - NotAfter: fc.Now().Add(30 * 24 * time.Hour), - DNSNames: []string{"example-a.com"}, - SerialNumber: serial1, - } - certDerA, _ := x509.CreateCertificate(rand.Reader, &rawCertA, &rawCertA, &testKey.PublicKey, &testKey) - certA := &core.Certificate{ - RegistrationID: regA.Id, - Serial: serial1String, - Expires: rawCertA.NotAfter, - DER: certDerA, - } - err := tc.dbMap.Insert(certA) - test.AssertNotError(t, err, "Couldn't add certA") - _, err = tc.dbMap.Exec( - "INSERT INTO issuedNames (reversedName, serial, notBefore) VALUES (?,?,0)", - "com.example-a", - serial1String, - ) - test.AssertNotError(t, err, "Couldn't add issued name for certA") - - // Add one cert for RegB that already expired 30 days ago - rawCertB := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy B", - }, - NotAfter: fc.Now().Add(-30 * 24 * time.Hour), - DNSNames: []string{"example-b.com"}, - SerialNumber: serial2, - } - certDerB, _ := x509.CreateCertificate(rand.Reader, &rawCertB, &rawCertB, &testKey.PublicKey, &testKey) - certB := &core.Certificate{ - RegistrationID: regB.Id, - Serial: serial2String, - Expires: rawCertB.NotAfter, - DER: certDerB, - } - err = tc.dbMap.Insert(certB) - test.AssertNotError(t, err, "Couldn't add certB") - _, err = tc.dbMap.Exec( - "INSERT INTO issuedNames (reversedName, serial, notBefore) VALUES (?,?,0)", - "com.example-b", - serial2String, - ) - test.AssertNotError(t, err, "Couldn't add issued name for certB") - - // Add one cert for RegC that expires in 30 days - rawCertC := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy C", - }, - NotAfter: fc.Now().Add(30 * 24 * time.Hour), - DNSNames: []string{"example-c.com"}, - SerialNumber: serial3, - } - certDerC, _ := x509.CreateCertificate(rand.Reader, &rawCertC, &rawCertC, &testKey.PublicKey, &testKey) - certC := &core.Certificate{ - RegistrationID: regC.Id, - Serial: serial3String, - Expires: rawCertC.NotAfter, - DER: certDerC, - } - err = tc.dbMap.Insert(certC) - test.AssertNotError(t, err, "Couldn't add certC") - _, err = tc.dbMap.Exec( - "INSERT INTO issuedNames (reversedName, serial, notBefore) VALUES (?,?,0)", - "com.example-c", - serial3String, - ) - test.AssertNotError(t, err, "Couldn't add issued name for certC") - - // Add one cert for RegD that expires in 30 days - rawCertD := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy D", - }, - NotAfter: fc.Now().Add(30 * 24 * time.Hour), - DNSNames: []string{"example-d.com"}, - SerialNumber: serial4, - } - certDerD, _ := x509.CreateCertificate(rand.Reader, &rawCertD, &rawCertD, &testKey.PublicKey, &testKey) - certD := &core.Certificate{ - RegistrationID: regD.Id, - Serial: serial4String, - Expires: rawCertD.NotAfter, - DER: certDerD, - } - err = tc.dbMap.Insert(certD) - test.AssertNotError(t, err, "Couldn't add certD") - _, err = tc.dbMap.Exec( - "INSERT INTO issuedNames (reversedName, serial, notBefore) VALUES (?,?,0)", - "com.example-d", - serial4String, - ) - test.AssertNotError(t, err, "Couldn't add issued name for certD") -} - -func setup(t *testing.T) testCtx { - log := blog.UseMock() - - // Using DBConnSAFullPerms to be able to insert registrations and - // certificates - dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) - if err != nil { - t.Fatalf("Couldn't connect to the database: %s", err) - } - - // Make temp results file - file, err := ioutil.TempFile("", fmt.Sprintf("audit-%s", time.Now().Format("2006-01-02T15:04"))) - if err != nil { - t.Fatal(err) - } - - cleanUp := func() { - test.ResetSATestDatabase(t) - file.Close() - os.Remove(file.Name()) - } - - db, err := sa.InitSqlDb(cmd.DBConfig{DBConnect: vars.DBConnSAMailer}, nil) - if err != nil { - t.Fatalf("Couldn't connect to the database: %s", err) - } - - ssa, err := sa.NewSQLStorageAuthority(dbMap, dbMap, nil, nil, clock.New(), log, metrics.NoopRegisterer, 1) - if err != nil { - t.Fatalf("unable to create SQLStorageAuthority: %s", err) - } - - return testCtx{ - c: contactAuditor{ - db: db, - resultsFile: file, - logger: blog.NewMock(), - }, - dbMap: dbMap, - ssa: ssa, - cleanUp: cleanUp, - } -} - -func bigIntFromB64(b64 string) *big.Int { - bytes, _ := base64.URLEncoding.DecodeString(b64) - x := big.NewInt(0) - x.SetBytes(bytes) - return x -} - -func intFromB64(b64 string) int { - return int(bigIntFromB64(b64).Int64()) -} - -func newFakeClock(t *testing.T) clock.FakeClock { - const fakeTimeFormat = "2006-01-02T15:04:05.999999999Z" - ft, err := time.Parse(fakeTimeFormat, fakeTimeFormat) - if err != nil { - t.Fatal(err) - } - fc := clock.NewFake() - fc.Set(ft.UTC()) - return fc -} diff --git a/cmd/crl-checker/main.go b/cmd/crl-checker/main.go new file mode 100644 index 00000000000..c5767f407e8 --- /dev/null +++ b/cmd/crl-checker/main.go @@ -0,0 +1,152 @@ +package notmain + +import ( + "crypto/x509" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/core" + "github.com/letsencrypt/boulder/crl/checker" +) + +func downloadShard(url string) (*x509.RevocationList, error) { + resp, err := http.Get(url) + if err != nil { + return nil, fmt.Errorf("downloading crl: %w", err) + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("downloading crl: http status %d", resp.StatusCode) + } + + crlBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("reading CRL bytes: %w", err) + } + + crl, err := x509.ParseRevocationList(crlBytes) + if err != nil { + return nil, fmt.Errorf("parsing CRL: %w", err) + } + + return crl, nil +} + +func main() { + urlFile := flag.String("crls", "", "path to a file containing a JSON Array of CRL URLs") + issuerFile := flag.String("issuer", "", "path to an issuer certificate on disk, required, '-' to disable validation") + ageLimitStr := flag.String("ageLimit", "168h", "maximum allowable age of a CRL shard") + emitRevoked := flag.Bool("emitRevoked", false, "emit revoked serial numbers on stdout, one per line, hex-encoded") + save := flag.Bool("save", false, "save CRLs to files named after the URL") + flag.Parse() + + logger := cmd.NewLogger(cmd.SyslogConfig{StdoutLevel: 6, SyslogLevel: -1}) + cmd.LogStartup(logger) + + urlFileContents, err := os.ReadFile(*urlFile) + cmd.FailOnError(err, "Reading CRL URLs file") + + var urls []string + err = json.Unmarshal(urlFileContents, &urls) + cmd.FailOnError(err, "Parsing JSON Array of CRL URLs") + + if *issuerFile == "" { + cmd.Fail("-issuer is required, but may be '-' to disable validation") + } + + var issuer *x509.Certificate + if *issuerFile != "-" { + issuer, err = core.LoadCert(*issuerFile) + cmd.FailOnError(err, "Loading issuer certificate") + } else { + logger.Warning("CRL signature validation disabled") + } + + ageLimit, err := time.ParseDuration(*ageLimitStr) + cmd.FailOnError(err, "Parsing age limit") + + errCount := 0 + seenSerials := make(map[string]struct{}) + totalBytes := 0 + oldestTimestamp := time.Time{} + for _, u := range urls { + crl, err := downloadShard(u) + if err != nil { + errCount += 1 + logger.Errf("fetching CRL %q failed: %s", u, err) + continue + } + + if *save { + parsedURL, err := url.Parse(u) + if err != nil { + logger.Errf("parsing url: %s", err) + continue + } + filename := fmt.Sprintf("%s%s", parsedURL.Host, strings.ReplaceAll(parsedURL.Path, "/", "_")) + err = os.WriteFile(filename, crl.Raw, 0660) + if err != nil { + logger.Errf("writing file: %s", err) + continue + } + } + + totalBytes += len(crl.Raw) + + zcrl, err := x509.ParseRevocationList(crl.Raw) + if err != nil { + errCount += 1 + logger.Errf("parsing CRL %q failed: %s", u, err) + continue + } + + err = checker.Validate(zcrl, issuer, ageLimit) + if err != nil { + errCount += 1 + logger.Errf("checking CRL %q failed: %s", u, err) + continue + } + + if oldestTimestamp.IsZero() || crl.ThisUpdate.Before(oldestTimestamp) { + oldestTimestamp = crl.ThisUpdate + } + + for _, c := range crl.RevokedCertificateEntries { + serial := core.SerialToString(c.SerialNumber) + if _, seen := seenSerials[serial]; seen { + errCount += 1 + logger.Errf("serial seen in multiple shards: %s", serial) + continue + } + seenSerials[serial] = struct{}{} + } + } + + if *emitRevoked { + for serial := range seenSerials { + fmt.Println(serial) + } + } + + if errCount != 0 { + cmd.Fail(fmt.Sprintf("Encountered %d errors", errCount)) + } + + logger.AuditInfo("CRL checking complete", map[string]string{ + "numCRLs": fmt.Sprintf("%d", len(urls)), + "numSerials": fmt.Sprintf("%d", len(seenSerials)), + "numBytes": fmt.Sprintf("%d", totalBytes), + "oldestCRL": oldestTimestamp.Format(time.RFC3339), + }) +} + +func init() { + cmd.RegisterCommand("crl-checker", main, nil) +} diff --git a/cmd/crl-storer/main.go b/cmd/crl-storer/main.go new file mode 100644 index 00000000000..acc15684be4 --- /dev/null +++ b/cmd/crl-storer/main.go @@ -0,0 +1,145 @@ +package notmain + +import ( + "context" + "flag" + "net/http" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/s3" + awsl "github.com/aws/smithy-go/logging" + "github.com/jmhodges/clock" + + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/crl/storer" + cspb "github.com/letsencrypt/boulder/crl/storer/proto" + "github.com/letsencrypt/boulder/features" + bgrpc "github.com/letsencrypt/boulder/grpc" + "github.com/letsencrypt/boulder/issuance" + blog "github.com/letsencrypt/boulder/log" +) + +type Config struct { + CRLStorer struct { + cmd.ServiceConfig + + // IssuerCerts is a list of paths to issuer certificates on disk. These will + // be used to validate the CRLs received by this service before uploading + // them. + IssuerCerts []string `validate:"min=1,dive,required"` + + // S3Endpoint is the URL at which the S3-API-compatible object storage + // service can be reached. This can be used to point to a non-Amazon storage + // service, or to point to a fake service for testing. It should be left + // blank by default. + S3Endpoint string + // S3Bucket is the AWS Bucket that uploads should go to. Must be created + // (and have appropriate permissions set) beforehand. + S3Bucket string + // AWSConfigFile is the path to a file on disk containing an AWS config. + // The format of the configuration file is specified at + // https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html. + AWSConfigFile string + // AWSCredsFile is the path to a file on disk containing AWS credentials. + // The format of the credentials file is specified at + // https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html. + AWSCredsFile string + + Features features.Config + } + + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig +} + +// awsLogger implements the github.com/aws/smithy-go/logging.Logger interface. +type awsLogger struct { + blog.Logger +} + +func (log awsLogger) Logf(c awsl.Classification, format string, v ...any) { + switch c { + case awsl.Debug: + log.Debugf(format, v...) + case awsl.Warn: + log.Warningf(format, v...) + } +} + +func main() { + grpcAddr := flag.String("addr", "", "gRPC listen address override") + debugAddr := flag.String("debug-addr", "", "Debug server address override") + configFile := flag.String("config", "", "File path to the configuration file for this service") + flag.Parse() + if *configFile == "" { + flag.Usage() + os.Exit(1) + } + + var c Config + err := cmd.ReadConfigFile(*configFile, &c) + cmd.FailOnError(err, "Reading JSON config file into config structure") + + features.Set(c.CRLStorer.Features) + + if *grpcAddr != "" { + c.CRLStorer.GRPC.Address = *grpcAddr + } + if *debugAddr != "" { + c.CRLStorer.DebugAddr = *debugAddr + } + + scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.CRLStorer.DebugAddr) + defer oTelShutdown(context.Background()) + cmd.LogStartup(logger) + clk := clock.New() + + tlsConfig, err := c.CRLStorer.TLS.Load(scope) + cmd.FailOnError(err, "TLS config") + + issuers := make([]*issuance.Certificate, 0, len(c.CRLStorer.IssuerCerts)) + for _, filepath := range c.CRLStorer.IssuerCerts { + cert, err := issuance.LoadCertificate(filepath) + cmd.FailOnError(err, "Failed to load issuer cert") + issuers = append(issuers, cert) + } + + // Load the "default" AWS configuration, but override the set of config and + // credential files it reads from to just those specified in our JSON config, + // to ensure that it's not accidentally reading anything from the homedir or + // its other default config locations. + awsConfig, err := config.LoadDefaultConfig( + context.Background(), + config.WithSharedConfigFiles([]string{c.CRLStorer.AWSConfigFile}), + config.WithSharedCredentialsFiles([]string{c.CRLStorer.AWSCredsFile}), + config.WithHTTPClient(new(http.Client)), + config.WithLogger(awsLogger{logger}), + config.WithClientLogMode(aws.LogRequestEventMessage|aws.LogResponseEventMessage), + ) + cmd.FailOnError(err, "Failed to load AWS config") + + s3opts := make([]func(*s3.Options), 0) + if c.CRLStorer.S3Endpoint != "" { + s3opts = append( + s3opts, + s3.WithEndpointResolver(s3.EndpointResolverFromURL(c.CRLStorer.S3Endpoint)), + func(o *s3.Options) { o.UsePathStyle = true }, + ) + } + s3client := s3.NewFromConfig(awsConfig, s3opts...) + + csi, err := storer.New(issuers, s3client, c.CRLStorer.S3Bucket, scope, logger, clk) + cmd.FailOnError(err, "Failed to create CRLStorer impl") + + start, err := bgrpc.NewServer(c.CRLStorer.GRPC, logger).Add( + &cspb.CRLStorer_ServiceDesc, csi).Build(tlsConfig, scope, clk) + cmd.FailOnError(err, "Unable to setup CRLStorer gRPC server") + + cmd.FailOnError(start(), "CRLStorer gRPC service failed") +} + +func init() { + cmd.RegisterCommand("crl-storer", main, &cmd.ConfigValidator{Config: &Config{}}) +} diff --git a/cmd/crl-updater/main.go b/cmd/crl-updater/main.go new file mode 100644 index 00000000000..dda581c6ba8 --- /dev/null +++ b/cmd/crl-updater/main.go @@ -0,0 +1,220 @@ +package notmain + +import ( + "context" + "errors" + "flag" + "os" + "time" + + "github.com/jmhodges/clock" + + capb "github.com/letsencrypt/boulder/ca/proto" + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" + cspb "github.com/letsencrypt/boulder/crl/storer/proto" + "github.com/letsencrypt/boulder/crl/updater" + "github.com/letsencrypt/boulder/features" + bgrpc "github.com/letsencrypt/boulder/grpc" + "github.com/letsencrypt/boulder/issuance" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +type Config struct { + CRLUpdater struct { + DebugAddr string `validate:"omitempty,hostname_port"` + + // TLS client certificate, private key, and trusted root bundle. + TLS cmd.TLSConfig + + SAService *cmd.GRPCClientConfig + CRLGeneratorService *cmd.GRPCClientConfig + CRLStorerService *cmd.GRPCClientConfig + + // IssuerCerts is a list of paths to issuer certificates on disk. This + // controls the set of CRLs which will be published by this updater: it will + // publish one set of NumShards CRL shards for each issuer in this list. + IssuerCerts []string `validate:"min=1,dive,required"` + + // NumShards is the number of shards into which each issuer's "full and + // complete" CRL will be split. + // WARNING: When this number is changed, the "JSON Array of CRL URLs" field + // in CCADB MUST be updated. + NumShards int `validate:"min=1"` + + // ShardWidth is the amount of time (width on a timeline) that a single + // shard should cover. Ideally, NumShards*ShardWidth should be an amount of + // time noticeably larger than the current longest certificate lifetime, + // but the updater will continue to work if this is not the case (albeit + // with more confusing mappings of serials to shards). + // WARNING: When this number is changed, revocation entries will move + // between shards. + ShardWidth config.Duration `validate:"-"` + + // LookbackPeriod is how far back the updater should look for revoked expired + // certificates. We are required to include every revoked cert in at least + // one CRL, even if it is revoked seconds before it expires, so this must + // always be greater than the UpdatePeriod, and should be increased when + // recovering from an outage to ensure continuity of coverage. + LookbackPeriod config.Duration `validate:"-"` + + // UpdatePeriod controls how frequently the crl-updater runs and publishes + // new versions of every CRL shard. The Baseline Requirements, Section 4.9.7: + // "MUST update and publish a new CRL within twentyâ€four (24) hours after + // recording a Certificate as revoked." + UpdatePeriod config.Duration + + // UpdateTimeout controls how long a single CRL shard is allowed to attempt + // to update before being timed out. The total CRL updating process may take + // significantly longer, since a full update cycle may consist of updating + // many shards with varying degrees of parallelism. This value must be + // strictly less than the UpdatePeriod. Defaults to 10 minutes, one order + // of magnitude greater than our p99 update latency. + UpdateTimeout config.Duration `validate:"-"` + + // MaxParallelism controls how many workers may be running in parallel. + // A higher value reduces the total time necessary to update all CRL shards + // that this updater is responsible for, but also increases the memory used + // by this updater. Only relevant in -runOnce mode. + MaxParallelism int `validate:"min=0"` + + // MaxAttempts control how many times the updater will attempt to generate + // a single CRL shard. A higher number increases the likelihood of a fully + // successful run, but also increases the worst-case runtime and db/network + // load of said run. The default is 1. + MaxAttempts int `validate:"omitempty,min=1"` + + // ExpiresMargin adds a small increment to the CRL's HTTP Expires time. + // + // When uploading a CRL, its Expires field in S3 is set to the expected time + // the next CRL will be uploaded (by this instance). That allows our CDN + // instances to cache for that long. However, since the next update might be + // slow or delayed, we add a margin of error. + // + // Tradeoffs: A large ExpiresMargin reduces the chance that a CRL becomes + // uncacheable and floods S3 with traffic (which might result in 503s while + // S3 scales out). + // + // A small ExpiresMargin means revocations become visible sooner, including + // admin-invoked revocations that may have a time requirement. + ExpiresMargin config.Duration + + // CacheControl is a string passed verbatim to the crl-storer to store on + // the S3 object. + // + // Note: if this header contains max-age, it will override + // Expires. https://www.rfc-editor.org/rfc/rfc9111.html#name-calculating-freshness-lifet + // Cache-Control: max-age has the disadvantage that it caches for a fixed + // amount of time, regardless of how close the CRL is to replacement. So + // if max-age is used, the worst-case time for a revocation to become visible + // is UpdatePeriod + the value of max age. + // + // The stale-if-error and stale-while-revalidate headers may be useful here: + // https://aws.amazon.com/about-aws/whats-new/2023/05/amazon-cloudfront-stale-while-revalidate-stale-if-error-cache-control-directives/ + // + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control + CacheControl string + + Features features.Config + } + + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig +} + +func main() { + configFile := flag.String("config", "", "File path to the configuration file for this service") + debugAddr := flag.String("debug-addr", "", "Debug server address override") + runOnce := flag.Bool("runOnce", false, "If true, run once immediately and then exit") + flag.Parse() + if *configFile == "" { + flag.Usage() + os.Exit(1) + } + + var c Config + err := cmd.ReadConfigFile(*configFile, &c) + cmd.FailOnError(err, "Reading JSON config file into config structure") + + if *debugAddr != "" { + c.CRLUpdater.DebugAddr = *debugAddr + } + + features.Set(c.CRLUpdater.Features) + + scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.CRLUpdater.DebugAddr) + defer oTelShutdown(context.Background()) + cmd.LogStartup(logger) + clk := clock.New() + + tlsConfig, err := c.CRLUpdater.TLS.Load(scope) + cmd.FailOnError(err, "TLS config") + + issuers := make([]*issuance.Certificate, 0, len(c.CRLUpdater.IssuerCerts)) + for _, filepath := range c.CRLUpdater.IssuerCerts { + cert, err := issuance.LoadCertificate(filepath) + cmd.FailOnError(err, "Failed to load issuer cert") + issuers = append(issuers, cert) + } + + if c.CRLUpdater.ShardWidth.Duration == 0 { + c.CRLUpdater.ShardWidth.Duration = 16 * time.Hour + } + if c.CRLUpdater.LookbackPeriod.Duration == 0 { + c.CRLUpdater.LookbackPeriod.Duration = 24 * time.Hour + } + if c.CRLUpdater.UpdateTimeout.Duration == 0 { + c.CRLUpdater.UpdateTimeout.Duration = 10 * time.Minute + } + + saConn, err := bgrpc.ClientSetup(c.CRLUpdater.SAService, tlsConfig, scope, clk) + cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA") + sac := sapb.NewStorageAuthorityClient(saConn) + + caConn, err := bgrpc.ClientSetup(c.CRLUpdater.CRLGeneratorService, tlsConfig, scope, clk) + cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to CRLGenerator") + cac := capb.NewCRLGeneratorClient(caConn) + + csConn, err := bgrpc.ClientSetup(c.CRLUpdater.CRLStorerService, tlsConfig, scope, clk) + cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to CRLStorer") + csc := cspb.NewCRLStorerClient(csConn) + + u, err := updater.NewUpdater( + issuers, + c.CRLUpdater.NumShards, + c.CRLUpdater.ShardWidth.Duration, + c.CRLUpdater.LookbackPeriod.Duration, + c.CRLUpdater.UpdatePeriod.Duration, + c.CRLUpdater.UpdateTimeout.Duration, + c.CRLUpdater.MaxParallelism, + c.CRLUpdater.MaxAttempts, + c.CRLUpdater.CacheControl, + c.CRLUpdater.ExpiresMargin.Duration, + sac, + cac, + csc, + scope, + logger, + clk, + ) + cmd.FailOnError(err, "Failed to create crl-updater") + + ctx, cancel := context.WithCancel(context.Background()) + go cmd.CatchSignals(cancel) + + if *runOnce { + err = u.RunOnce(ctx) + if err != nil && !errors.Is(err, context.Canceled) { + cmd.FailOnError(err, "") + } + } else { + err = u.Run(ctx) + if err != nil && !errors.Is(err, context.Canceled) { + cmd.FailOnError(err, "") + } + } +} + +func init() { + cmd.RegisterCommand("crl-updater", main, &cmd.ConfigValidator{Config: &Config{}}) +} diff --git a/cmd/email-exporter/main.go b/cmd/email-exporter/main.go new file mode 100644 index 00000000000..2a222b8b709 --- /dev/null +++ b/cmd/email-exporter/main.go @@ -0,0 +1,170 @@ +package notmain + +import ( + "context" + "flag" + "os" + + "github.com/jmhodges/clock" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/letsencrypt/boulder/cmd" + bgrpc "github.com/letsencrypt/boulder/grpc" + "github.com/letsencrypt/boulder/salesforce" + emailpb "github.com/letsencrypt/boulder/salesforce/email/proto" + salesforcepb "github.com/letsencrypt/boulder/salesforce/proto" +) + +// Config holds the configuration for the email-exporter service. +type Config struct { + EmailExporter struct { + cmd.ServiceConfig + + // PerDayLimit enforces the daily request limit imposed by the Pardot + // API. The total daily limit, which varies based on the Salesforce + // Pardot subscription tier, must be distributed among all + // email-exporter instances. For more information, see: + // https://developer.salesforce.com/docs/marketing/pardot/guide/overview.html?q=rate+limits#daily-requests-limits + PerDayLimit float64 `validate:"required,min=1"` + + // MaxConcurrentRequests enforces the concurrent request limit imposed + // by the Pardot API. This limit must be distributed among all + // email-exporter instances and be proportional to each instance's + // PerDayLimit. For example, if the total daily limit is 50,000 and one + // instance is assigned 40% (20,000 requests), it should also receive + // 40% of the max concurrent requests (2 out of 5). For more + // information, see: + // https://developer.salesforce.com/docs/marketing/pardot/guide/overview.html?q=rate+limits#concurrent-requests + MaxConcurrentRequests int `validate:"required,min=1,max=5"` + + // PardotBusinessUnit is the Pardot business unit to use. + PardotBusinessUnit string `validate:"required"` + + // ClientId is the OAuth API client ID provided by Salesforce. + ClientId cmd.PasswordConfig + + // ClientSecret is the OAuth API client secret provided by Salesforce. + ClientSecret cmd.PasswordConfig + + // SalesforceBaseURL is the base URL for the Salesforce API. (e.g., + // "https://company.salesforce.com") + SalesforceBaseURL string `validate:"required"` + + // PardotBaseURL is the base URL for the Pardot API. (e.g., + // "https://pi.pardot.com") + PardotBaseURL string `validate:"required"` + + // EmailCacheSize controls how many hashed email addresses are retained + // in memory to prevent duplicates from being sent to the Pardot API. + // Each entry consumes ~120 bytes, so 100,000 entries uses around 12 MB + // of memory. If left unset, no caching is performed. + EmailCacheSize int `validate:"omitempty,min=1"` + } + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig +} + +// legacyEmailExporterServer is an adapter that implements the email.Exporter +// gRPC interface by delegating to an inner salesforce.Exporter server. +// +// TODO(#8410): Remove legacyEmailExporterServer once fully migrated to +// salesforcepb.Exporter. +type legacyEmailExporterServer struct { + emailpb.UnimplementedExporterServer + inner salesforcepb.ExporterServer +} + +// SendContacts is an interface adapter that forwards the request to the same +// method on the inner salesforce.Exporter server. +func (s legacyEmailExporterServer) SendContacts(ctx context.Context, req *emailpb.SendContactsRequest) (*emptypb.Empty, error) { + return s.inner.SendContacts(ctx, &salesforcepb.SendContactsRequest{Emails: req.GetEmails()}) +} + +// SendCase is an interface adapter that forwards the request to the same method +// on the inner salesforce.Exporter server. +func (s legacyEmailExporterServer) SendCase(ctx context.Context, req *emailpb.SendCaseRequest) (*emptypb.Empty, error) { + return s.inner.SendCase(ctx, &salesforcepb.SendCaseRequest{ + Origin: req.GetOrigin(), + Subject: req.GetSubject(), + Description: req.GetDescription(), + ContactEmail: req.GetContactEmail(), + Organization: req.GetOrganization(), + AccountId: req.GetAccountId(), + RateLimitName: req.GetRateLimitName(), + RateLimitTier: req.GetRateLimitTier(), + UseCase: req.GetUseCase(), + }) +} + +func main() { + configFile := flag.String("config", "", "Path to configuration file") + grpcAddr := flag.String("addr", "", "gRPC listen address override") + debugAddr := flag.String("debug-addr", "", "Debug server address override") + flag.Parse() + + if *configFile == "" { + flag.Usage() + os.Exit(1) + } + + var c Config + err := cmd.ReadConfigFile(*configFile, &c) + cmd.FailOnError(err, "Reading JSON config file into config structure") + + if *grpcAddr != "" { + c.EmailExporter.ServiceConfig.GRPC.Address = *grpcAddr + } + if *debugAddr != "" { + c.EmailExporter.ServiceConfig.DebugAddr = *debugAddr + } + + scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.EmailExporter.ServiceConfig.DebugAddr) + defer oTelShutdown(context.Background()) + + cmd.LogStartup(logger) + + clk := clock.New() + clientId, err := c.EmailExporter.ClientId.Pass() + cmd.FailOnError(err, "Loading clientId") + clientSecret, err := c.EmailExporter.ClientSecret.Pass() + cmd.FailOnError(err, "Loading clientSecret") + + var cache *salesforce.EmailCache + if c.EmailExporter.EmailCacheSize > 0 { + cache = salesforce.NewHashedEmailCache(c.EmailExporter.EmailCacheSize, scope) + } + + sfClient, err := salesforce.NewSalesforceClientImpl( + clk, + c.EmailExporter.PardotBusinessUnit, + clientId, + clientSecret, + c.EmailExporter.SalesforceBaseURL, + c.EmailExporter.PardotBaseURL, + ) + cmd.FailOnError(err, "Creating Pardot API client") + server := salesforce.NewExporterImpl(sfClient, cache, c.EmailExporter.PerDayLimit, c.EmailExporter.MaxConcurrentRequests, scope, logger) + + tlsConfig, err := c.EmailExporter.TLS.Load(scope) + cmd.FailOnError(err, "Loading email-exporter TLS config") + + daemonCtx, shutdown := context.WithCancel(context.Background()) + go server.Start(daemonCtx) + + start, err := bgrpc.NewServer(c.EmailExporter.GRPC, logger).Add( + &salesforcepb.Exporter_ServiceDesc, server).Add( + // TODO(#8410): Remove emailpb.Exporter once fully migrated to + // salesforcepb.Exporter. + &emailpb.Exporter_ServiceDesc, legacyEmailExporterServer{inner: server}).Build( + tlsConfig, scope, clk) + cmd.FailOnError(err, "Configuring email-exporter gRPC server") + + err = start() + shutdown() + server.Drain() + cmd.FailOnError(err, "email-exporter gRPC service failed to start") +} + +func init() { + cmd.RegisterCommand("email-exporter", main, &cmd.ConfigValidator{Config: &Config{}}) +} diff --git a/cmd/expiration-mailer/main.go b/cmd/expiration-mailer/main.go deleted file mode 100644 index 9e9a4d4a8a8..00000000000 --- a/cmd/expiration-mailer/main.go +++ /dev/null @@ -1,635 +0,0 @@ -package notmain - -import ( - "bytes" - "context" - "crypto/x509" - "encoding/json" - "errors" - "flag" - "fmt" - "io/ioutil" - "math" - netmail "net/mail" - "net/url" - "os" - "sort" - "strings" - "text/template" - "time" - - "github.com/honeycombio/beeline-go" - "github.com/jmhodges/clock" - "google.golang.org/grpc" - - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/core" - corepb "github.com/letsencrypt/boulder/core/proto" - "github.com/letsencrypt/boulder/db" - "github.com/letsencrypt/boulder/features" - bgrpc "github.com/letsencrypt/boulder/grpc" - blog "github.com/letsencrypt/boulder/log" - bmail "github.com/letsencrypt/boulder/mail" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/sa" - sapb "github.com/letsencrypt/boulder/sa/proto" - "github.com/prometheus/client_golang/prometheus" -) - -const ( - defaultNagCheckInterval = 24 * time.Hour - defaultExpirationSubject = "Let's Encrypt certificate expiration notice for domain {{.ExpirationSubject}}" -) - -type regStore interface { - GetRegistration(ctx context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*corepb.Registration, error) -} - -type mailer struct { - log blog.Logger - dbMap *db.WrappedMap - rs regStore - mailer bmail.Mailer - emailTemplate *template.Template - subjectTemplate *template.Template - nagTimes []time.Duration - limit int - clk clock.Clock - stats mailerStats -} - -type mailerStats struct { - nagsAtCapacity *prometheus.GaugeVec - errorCount *prometheus.CounterVec - renewalCount *prometheus.CounterVec - sendLatency prometheus.Histogram - processingLatency prometheus.Histogram -} - -func (m *mailer) sendNags(contacts []string, certs []*x509.Certificate) error { - if len(contacts) == 0 { - return nil - } - if len(certs) == 0 { - return errors.New("no certs given to send nags for") - } - emails := []string{} - for _, contact := range contacts { - parsed, err := url.Parse(contact) - if err != nil { - m.log.AuditErrf("parsing contact email %s: %s", contact, err) - continue - } - if parsed.Scheme == "mailto" { - emails = append(emails, parsed.Opaque) - } - } - if len(emails) == 0 { - return nil - } - - expiresIn := time.Duration(math.MaxInt64) - expDate := m.clk.Now() - domains := []string{} - serials := []string{} - - // Pick out the expiration date that is closest to being hit. - for _, cert := range certs { - domains = append(domains, cert.DNSNames...) - serials = append(serials, core.SerialToString(cert.SerialNumber)) - possible := cert.NotAfter.Sub(m.clk.Now()) - if possible < expiresIn { - expiresIn = possible - expDate = cert.NotAfter - } - } - domains = core.UniqueLowerNames(domains) - sort.Strings(domains) - m.log.Debugf("Sending mail for %s (%s)", strings.Join(domains, ", "), strings.Join(serials, ", ")) - - // Construct the information about the expiring certificates for use in the - // subject template - expiringSubject := fmt.Sprintf("%q", domains[0]) - if len(domains) > 1 { - expiringSubject += fmt.Sprintf(" (and %d more)", len(domains)-1) - } - - // Execute the subjectTemplate by filling in the ExpirationSubject - subjBuf := new(bytes.Buffer) - err := m.subjectTemplate.Execute(subjBuf, struct { - ExpirationSubject string - }{ - ExpirationSubject: expiringSubject, - }) - if err != nil { - m.stats.errorCount.With(prometheus.Labels{"type": "SubjectTemplateFailure"}).Inc() - return err - } - - email := struct { - ExpirationDate string - DaysToExpiration int - DNSNames string - }{ - ExpirationDate: expDate.UTC().Format(time.RFC822Z), - DaysToExpiration: int(expiresIn.Hours() / 24), - DNSNames: strings.Join(domains, "\n"), - } - msgBuf := new(bytes.Buffer) - err = m.emailTemplate.Execute(msgBuf, email) - if err != nil { - m.stats.errorCount.With(prometheus.Labels{"type": "TemplateFailure"}).Inc() - return err - } - - logItem := struct { - Rcpt []string - Serials []string - DaysToExpiration int - DNSNames []string - }{ - Rcpt: emails, - Serials: serials, - DaysToExpiration: email.DaysToExpiration, - DNSNames: domains, - } - logStr, err := json.Marshal(logItem) - if err != nil { - m.log.Errf("logItem could not be serialized to JSON. Raw: %+v", logItem) - return err - } - m.log.Infof("attempting send JSON=%s", string(logStr)) - - startSending := m.clk.Now() - err = m.mailer.SendMail(emails, subjBuf.String(), msgBuf.String()) - if err != nil { - m.log.Errf("failed send JSON=%s", string(logStr)) - return err - } - finishSending := m.clk.Now() - elapsed := finishSending.Sub(startSending) - m.stats.sendLatency.Observe(elapsed.Seconds()) - return nil -} - -func (m *mailer) updateCertStatus(serial string) error { - _, err := m.dbMap.Exec( - "UPDATE certificateStatus SET lastExpirationNagSent = ? WHERE serial = ?", - m.clk.Now(), serial) - return err -} - -func (m *mailer) certIsRenewed(names []string, issued time.Time) (bool, error) { - namehash := sa.HashNames(names) - - var present bool - err := m.dbMap.SelectOne( - &present, - // TODO(#5670): Remove this OR when the partitioning is fixed. - `SELECT EXISTS (SELECT id FROM fqdnSets WHERE setHash = ? AND issued > ? LIMIT 1) - OR EXISTS (SELECT id FROM fqdnSets_old WHERE setHash = ? AND issued > ? LIMIT 1)`, - namehash, - issued, - namehash, - issued, - ) - return present, err -} - -func (m *mailer) processCerts(ctx context.Context, allCerts []core.Certificate) { - regIDToCerts := make(map[int64][]core.Certificate) - - for _, cert := range allCerts { - cs := regIDToCerts[cert.RegistrationID] - cs = append(cs, cert) - regIDToCerts[cert.RegistrationID] = cs - } - - err := m.mailer.Connect() - if err != nil { - m.log.AuditErrf("Error connecting to send nag emails: %s", err) - return - } - defer func() { - _ = m.mailer.Close() - }() - - for regID, certs := range regIDToCerts { - reg, err := m.rs.GetRegistration(ctx, &sapb.RegistrationID{Id: regID}) - if err != nil { - m.log.AuditErrf("Error fetching registration %d: %s", regID, err) - m.stats.errorCount.With(prometheus.Labels{"type": "GetRegistration"}).Inc() - continue - } - - parsedCerts := []*x509.Certificate{} - for _, cert := range certs { - parsedCert, err := x509.ParseCertificate(cert.DER) - if err != nil { - // TODO(#1420): tell registration about this error - m.log.AuditErrf("Error parsing certificate %s: %s", cert.Serial, err) - m.stats.errorCount.With(prometheus.Labels{"type": "ParseCertificate"}).Inc() - continue - } - - renewed, err := m.certIsRenewed(parsedCert.DNSNames, parsedCert.NotBefore) - if err != nil { - m.log.AuditErrf("expiration-mailer: error fetching renewal state: %v", err) - // assume not renewed - } else if renewed { - m.log.Debugf("Cert %s is already renewed", cert.Serial) - m.stats.renewalCount.With(prometheus.Labels{}).Inc() - err := m.updateCertStatus(cert.Serial) - if err != nil { - m.log.AuditErrf("Error updating certificate status for %s: %s", cert.Serial, err) - m.stats.errorCount.With(prometheus.Labels{"type": "UpdateCertificateStatus"}).Inc() - } - continue - } - - parsedCerts = append(parsedCerts, parsedCert) - } - - if len(parsedCerts) == 0 { - // all certificates are renewed - continue - } - - if reg.Contact == nil { - continue - } - - err = m.sendNags(reg.Contact, parsedCerts) - if err != nil { - m.stats.errorCount.With(prometheus.Labels{"type": "SendNags"}).Inc() - m.log.AuditErrf("Error sending nag emails: %s", err) - continue - } - for _, cert := range parsedCerts { - serial := core.SerialToString(cert.SerialNumber) - err = m.updateCertStatus(serial) - if err != nil { - m.log.AuditErrf("Error updating certificate status for %s: %s", serial, err) - m.stats.errorCount.With(prometheus.Labels{"type": "UpdateCertificateStatus"}).Inc() - continue - } - } - } -} - -func (m *mailer) findExpiringCertificates(ctx context.Context) error { - now := m.clk.Now() - // E.g. m.nagTimes = [2, 4, 8, 15] days from expiration - for i, expiresIn := range m.nagTimes { - left := now - if i > 0 { - left = left.Add(m.nagTimes[i-1]) - } - right := now.Add(expiresIn) - - m.log.Infof("expiration-mailer: Searching for certificates that expire between %s and %s and had last nag >%s before expiry", - left.UTC(), right.UTC(), expiresIn) - - // First we do a query on the certificateStatus table to find certificates - // nearing expiry meeting our criteria for email notification. We later - // sequentially fetch the certificate details. This avoids an expensive - // JOIN. - var serials []string - _, err := m.dbMap.WithContext(ctx).Select( - &serials, - `SELECT - cs.serial - FROM certificateStatus AS cs - WHERE cs.notAfter > :cutoffA - AND cs.notAfter <= :cutoffB - AND cs.status != "revoked" - AND COALESCE(TIMESTAMPDIFF(SECOND, cs.lastExpirationNagSent, cs.notAfter) > :nagCutoff, 1) - ORDER BY cs.notAfter ASC - LIMIT :limit`, - map[string]interface{}{ - "cutoffA": left, - "cutoffB": right, - "nagCutoff": expiresIn.Seconds(), - "limit": m.limit, - }, - ) - if err != nil { - m.log.AuditErrf("expiration-mailer: Error loading certificate serials: %s", err) - return err - } - - // If the number of rows was exactly `m.limit` rows we need to increment - // a stat indicating that this nag group is at capacity based on the - // configured cert limit. If this condition continually occurs across mailer - // runs then we will not catch up, resulting in under-sending expiration - // mails. The effects of this were initially described in issue #2002[0]. - // - // 0: https://github.com/letsencrypt/boulder/issues/2002 - atCapacity := float64(0) - if len(serials) == m.limit { - m.log.Infof("nag group %s expiring certificates at configured capacity (select limit %d)", - expiresIn.String(), m.limit) - atCapacity = float64(1) - } - m.stats.nagsAtCapacity.With(prometheus.Labels{"nag_group": expiresIn.String()}).Set(atCapacity) - - // Now we can sequentially retrieve the certificate details for each of the - // certificate status rows - var certs []core.Certificate - for _, serial := range serials { - var cert core.Certificate - cert, err := sa.SelectCertificate(m.dbMap.WithContext(ctx), serial) - if err != nil { - // We can get a NoRowsErr when processing a serial number corresponding - // to a precertificate with no final certificate. Since this certificate - // is not being used by a subscriber, we don't send expiration email about - // it. - if db.IsNoRows(err) { - continue - } - m.log.AuditErrf("expiration-mailer: Error loading cert %q: %s", cert.Serial, err) - return err - } - certs = append(certs, cert) - } - - m.log.Infof("Found %d certificates expiring between %s and %s", len(certs), - left.Format("2006-01-02 03:04"), right.Format("2006-01-02 03:04")) - - if len(certs) == 0 { - continue // nothing to do - } - - processingStarted := m.clk.Now() - m.processCerts(ctx, certs) - processingEnded := m.clk.Now() - elapsed := processingEnded.Sub(processingStarted) - m.stats.processingLatency.Observe(elapsed.Seconds()) - } - - return nil -} - -type durationSlice []time.Duration - -func (ds durationSlice) Len() int { - return len(ds) -} - -func (ds durationSlice) Less(a, b int) bool { - return ds[a] < ds[b] -} - -func (ds durationSlice) Swap(a, b int) { - ds[a], ds[b] = ds[b], ds[a] -} - -type Config struct { - Mailer struct { - cmd.ServiceConfig - DB cmd.DBConfig - cmd.SMTPConfig - - From string - Subject string - - CertLimit int - NagTimes []string - // How much earlier (than configured nag intervals) to - // send reminders, to account for the expected delay - // before the next expiration-mailer invocation. - NagCheckInterval string - // Path to a text/template email template - EmailTemplate string - - Frequency cmd.ConfigDuration - - TLS cmd.TLSConfig - SAService *cmd.GRPCClientConfig - - // Path to a file containing a list of trusted root certificates for use - // during the SMTP connection (as opposed to the gRPC connections). - SMTPTrustedRootFile string - - Features map[string]bool - } - - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig -} - -func initStats(stats prometheus.Registerer) mailerStats { - nagsAtCapacity := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "nags_at_capacity", - Help: "Count of nag groups at capcacity", - }, - []string{"nag_group"}) - stats.MustRegister(nagsAtCapacity) - - errorCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "errors", - Help: "Number of errors", - }, - []string{"type"}) - stats.MustRegister(errorCount) - - renewalCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "renewals", - Help: "Number of messages skipped for being renewals", - }, - nil) - stats.MustRegister(renewalCount) - - sendLatency := prometheus.NewHistogram( - prometheus.HistogramOpts{ - Name: "send_latency", - Help: "Time the mailer takes sending messages in seconds", - Buckets: metrics.InternetFacingBuckets, - }) - stats.MustRegister(sendLatency) - - processingLatency := prometheus.NewHistogram( - prometheus.HistogramOpts{ - Name: "processing_latency", - Help: "Time the mailer takes processing certificates in seconds", - Buckets: []float64{1, 15, 30, 60, 75, 90, 120}, - }) - stats.MustRegister(processingLatency) - - return mailerStats{ - nagsAtCapacity: nagsAtCapacity, - errorCount: errorCount, - renewalCount: renewalCount, - sendLatency: sendLatency, - processingLatency: processingLatency, - } -} - -func main() { - configFile := flag.String("config", "", "File path to the configuration file for this service") - certLimit := flag.Int("cert_limit", 0, "Count of certificates to process per expiration period") - reconnBase := flag.Duration("reconnectBase", 1*time.Second, "Base sleep duration between reconnect attempts") - reconnMax := flag.Duration("reconnectMax", 5*60*time.Second, "Max sleep duration between reconnect attempts after exponential backoff") - daemon := flag.Bool("daemon", false, "Run in daemon mode") - - flag.Parse() - - if *configFile == "" { - flag.Usage() - os.Exit(1) - } - - var c Config - err := cmd.ReadConfigFile(*configFile, &c) - cmd.FailOnError(err, "Reading JSON config file into config structure") - err = features.Set(c.Mailer.Features) - cmd.FailOnError(err, "Failed to set feature flags") - - bc, err := c.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() - - scope, logger := cmd.StatsAndLogging(c.Syslog, c.Mailer.DebugAddr) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) - - if *certLimit > 0 { - c.Mailer.CertLimit = *certLimit - } - // Default to 100 if no certLimit is set - if c.Mailer.CertLimit == 0 { - c.Mailer.CertLimit = 100 - } - - dbMap, err := sa.InitWrappedDb(c.Mailer.DB, scope, logger) - cmd.FailOnError(err, "While initializing dbMap") - - tlsConfig, err := c.Mailer.TLS.Load() - cmd.FailOnError(err, "TLS config") - - clk := cmd.Clock() - - clientMetrics := bgrpc.NewClientMetrics(scope) - conn, err := bgrpc.ClientSetup(c.Mailer.SAService, tlsConfig, clientMetrics, clk) - cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA") - sac := sapb.NewStorageAuthorityClient(conn) - - var smtpRoots *x509.CertPool - if c.Mailer.SMTPTrustedRootFile != "" { - pem, err := ioutil.ReadFile(c.Mailer.SMTPTrustedRootFile) - cmd.FailOnError(err, "Loading trusted roots file") - smtpRoots = x509.NewCertPool() - if !smtpRoots.AppendCertsFromPEM(pem) { - cmd.FailOnError(nil, "Failed to parse root certs PEM") - } - } - - // Load email template - emailTmpl, err := ioutil.ReadFile(c.Mailer.EmailTemplate) - cmd.FailOnError(err, fmt.Sprintf("Could not read email template file [%s]", c.Mailer.EmailTemplate)) - tmpl, err := template.New("expiry-email").Parse(string(emailTmpl)) - cmd.FailOnError(err, "Could not parse email template") - - // If there is no configured subject template, use a default - if c.Mailer.Subject == "" { - c.Mailer.Subject = defaultExpirationSubject - } - // Load subject template - subjTmpl, err := template.New("expiry-email-subject").Parse(c.Mailer.Subject) - cmd.FailOnError(err, "Could not parse email subject template") - - fromAddress, err := netmail.ParseAddress(c.Mailer.From) - cmd.FailOnError(err, fmt.Sprintf("Could not parse from address: %s", c.Mailer.From)) - - smtpPassword, err := c.Mailer.PasswordConfig.Pass() - cmd.FailOnError(err, "Failed to load SMTP password") - mailClient := bmail.New( - c.Mailer.Server, - c.Mailer.Port, - c.Mailer.Username, - smtpPassword, - smtpRoots, - *fromAddress, - logger, - scope, - *reconnBase, - *reconnMax) - - nagCheckInterval := defaultNagCheckInterval - if s := c.Mailer.NagCheckInterval; s != "" { - nagCheckInterval, err = time.ParseDuration(s) - if err != nil { - logger.AuditErrf("Failed to parse NagCheckInterval string %q: %s", s, err) - return - } - } - - var nags durationSlice - for _, nagDuration := range c.Mailer.NagTimes { - dur, err := time.ParseDuration(nagDuration) - if err != nil { - logger.AuditErrf("Failed to parse nag duration string [%s]: %s", nagDuration, err) - return - } - nags = append(nags, dur+nagCheckInterval) - } - // Make sure durations are sorted in increasing order - sort.Sort(nags) - - m := mailer{ - log: logger, - dbMap: dbMap, - rs: sac, - mailer: mailClient, - subjectTemplate: subjTmpl, - emailTemplate: tmpl, - nagTimes: nags, - limit: c.Mailer.CertLimit, - clk: clk, - stats: initStats(scope), - } - - // Prefill this labelled stat with the possible label values, so each value is - // set to 0 on startup, rather than being missing from stats collection until - // the first mail run. - for _, expiresIn := range nags { - m.stats.nagsAtCapacity.With(prometheus.Labels{"nag_group": expiresIn.String()}).Set(0) - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go cmd.CatchSignals(logger, func() { - fmt.Printf("exiting\n") - cancel() - select {} // wait for the `findExpiringCertificates` calls below to exit - }) - - if *daemon { - if c.Mailer.Frequency.Duration == 0 { - fmt.Fprintln(os.Stderr, "mailer.runPeriod is not set") - os.Exit(1) - } - t := time.NewTicker(c.Mailer.Frequency.Duration) - for { - select { - case <-t.C: - err = m.findExpiringCertificates(ctx) - cmd.FailOnError(err, "expiration-mailer has failed") - case <-ctx.Done(): - os.Exit(0) - } - } - } else { - err = m.findExpiringCertificates(ctx) - cmd.FailOnError(err, "expiration-mailer has failed") - } -} - -func init() { - cmd.RegisterCommand("expiration-mailer", main) -} diff --git a/cmd/expiration-mailer/main_test.go b/cmd/expiration-mailer/main_test.go deleted file mode 100644 index 4ea208fecc3..00000000000 --- a/cmd/expiration-mailer/main_test.go +++ /dev/null @@ -1,837 +0,0 @@ -package notmain - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" - "fmt" - "math/big" - "net" - "strings" - "testing" - "text/template" - "time" - - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/core" - corepb "github.com/letsencrypt/boulder/core/proto" - "github.com/letsencrypt/boulder/db" - berrors "github.com/letsencrypt/boulder/errors" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/mocks" - "github.com/letsencrypt/boulder/sa" - sapb "github.com/letsencrypt/boulder/sa/proto" - "github.com/letsencrypt/boulder/sa/satest" - "github.com/letsencrypt/boulder/test" - isa "github.com/letsencrypt/boulder/test/inmem/sa" - "github.com/letsencrypt/boulder/test/vars" - "github.com/prometheus/client_golang/prometheus" - io_prometheus_client "github.com/prometheus/client_model/go" - "google.golang.org/grpc" -) - -func bigIntFromB64(b64 string) *big.Int { - bytes, _ := base64.URLEncoding.DecodeString(b64) - x := big.NewInt(0) - x.SetBytes(bytes) - return x -} - -func intFromB64(b64 string) int { - return int(bigIntFromB64(b64).Int64()) -} - -type fakeRegStore struct { - RegByID map[int64]*corepb.Registration -} - -func (f fakeRegStore) GetRegistration(ctx context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*corepb.Registration, error) { - r, ok := f.RegByID[req.Id] - if !ok { - return r, berrors.NotFoundError("no registration found for %q", req.Id) - } - return r, nil -} - -func newFakeRegStore() fakeRegStore { - return fakeRegStore{RegByID: make(map[int64]*corepb.Registration)} -} - -func newFakeClock(t *testing.T) clock.FakeClock { - const fakeTimeFormat = "2006-01-02T15:04:05.999999999Z" - ft, err := time.Parse(fakeTimeFormat, fakeTimeFormat) - if err != nil { - t.Fatal(err) - } - fc := clock.NewFake() - fc.Set(ft.UTC()) - return fc -} - -const testTmpl = `hi, cert for DNS names {{.DNSNames}} is going to expire in {{.DaysToExpiration}} days ({{.ExpirationDate}})` -const testEmailSubject = `email subject for test` -const emailARaw = "rolandshoemaker@gmail.com" -const emailBRaw = "test@gmail.com" - -var ( - emailA = "mailto:" + emailARaw - emailB = "mailto:" + emailBRaw - jsonKeyA = []byte(`{ - "kty":"RSA", - "n":"0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw", - "e":"AQAB" -}`) - jsonKeyB = []byte(`{ - "kty":"RSA", - "n":"z8bp-jPtHt4lKBqepeKF28g_QAEOuEsCIou6sZ9ndsQsEjxEOQxQ0xNOQezsKa63eogw8YS3vzjUcPP5BJuVzfPfGd5NVUdT-vSSwxk3wvk_jtNqhrpcoG0elRPQfMVsQWmxCAXCVRz3xbcFI8GTe-syynG3l-g1IzYIIZVNI6jdljCZML1HOMTTW4f7uJJ8mM-08oQCeHbr5ejK7O2yMSSYxW03zY-Tj1iVEebROeMv6IEEJNFSS4yM-hLpNAqVuQxFGetwtwjDMC1Drs1dTWrPuUAAjKGrP151z1_dE74M5evpAhZUmpKv1hY-x85DC6N0hFPgowsanmTNNiV75w", - "e":"AAEAAQ" -}`) - jsonKeyC = []byte(`{ - "kty":"RSA", - "n":"rFH5kUBZrlPj73epjJjyCxzVzZuV--JjKgapoqm9pOuOt20BUTdHqVfC2oDclqM7HFhkkX9OSJMTHgZ7WaVqZv9u1X2yjdx9oVmMLuspX7EytW_ZKDZSzL-sCOFCuQAuYKkLbsdcA3eHBK_lwc4zwdeHFMKIulNvLqckkqYB9s8GpgNXBDIQ8GjR5HuJke_WUNjYHSd8jY1LU9swKWsLQe2YoQUz_ekQvBvBCoaFEtrtRaSJKNLIVDObXFr2TLIiFiM0Em90kK01-eQ7ZiruZTKomll64bRFPoNo4_uwubddg3xTqur2vdF3NyhTrYdvAgTem4uC0PFjEQ1bK_djBQ", - "e":"AQAB" -}`) - log = blog.UseMock() - tmpl = template.Must(template.New("expiry-email").Parse(testTmpl)) - subjTmpl = template.Must(template.New("expiry-email-subject").Parse("Testing: " + defaultExpirationSubject)) - ctx = context.Background() -) - -func TestSendNags(t *testing.T) { - mc := mocks.Mailer{} - rs := newFakeRegStore() - fc := newFakeClock(t) - - staticTmpl := template.Must(template.New("expiry-email-subject-static").Parse(testEmailSubject)) - - m := mailer{ - log: log, - mailer: &mc, - emailTemplate: tmpl, - // Explicitly override the default subject to use testEmailSubject - subjectTemplate: staticTmpl, - rs: rs, - clk: fc, - stats: initStats(metrics.NoopRegisterer), - } - - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0x0304), - Subject: pkix.Name{ - CommonName: "happy", - }, - NotAfter: fc.Now().AddDate(0, 0, 2), - DNSNames: []string{"example.com"}, - } - - err := m.sendNags([]string{emailA}, []*x509.Certificate{cert}) - test.AssertNotError(t, err, "Failed to send warning messages") - test.AssertEquals(t, len(mc.Messages), 1) - test.AssertEquals(t, mocks.MailerMessage{ - To: emailARaw, - Subject: testEmailSubject, - Body: fmt.Sprintf(`hi, cert for DNS names example.com is going to expire in 2 days (%s)`, cert.NotAfter.Format(time.RFC822Z)), - }, mc.Messages[0]) - - mc.Clear() - err = m.sendNags([]string{emailA, emailB}, []*x509.Certificate{cert}) - test.AssertNotError(t, err, "Failed to send warning messages") - test.AssertEquals(t, len(mc.Messages), 2) - test.AssertEquals(t, mocks.MailerMessage{ - To: emailARaw, - Subject: testEmailSubject, - Body: fmt.Sprintf(`hi, cert for DNS names example.com is going to expire in 2 days (%s)`, cert.NotAfter.Format(time.RFC822Z)), - }, mc.Messages[0]) - test.AssertEquals(t, mocks.MailerMessage{ - To: emailBRaw, - Subject: testEmailSubject, - Body: fmt.Sprintf(`hi, cert for DNS names example.com is going to expire in 2 days (%s)`, cert.NotAfter.Format(time.RFC822Z)), - }, mc.Messages[1]) - - mc.Clear() - err = m.sendNags([]string{}, []*x509.Certificate{cert}) - test.AssertNotError(t, err, "Not an error to pass no email contacts") - test.AssertEquals(t, len(mc.Messages), 0) - - sendLogs := log.GetAllMatching("INFO: attempting send JSON=.*") - if len(sendLogs) != 2 { - t.Errorf("expected 2 'attempting send' log line, got %d: %s", len(sendLogs), strings.Join(sendLogs, "\n")) - } - if !strings.Contains(sendLogs[0], `"Rcpt":["rolandshoemaker@gmail.com"]`) { - t.Errorf("expected first 'attempting send' log line to have one address, got %q", sendLogs[0]) - } - if !strings.Contains(sendLogs[0], `"Serials":["000000000000000000000000000000000304"]`) { - t.Errorf("expected first 'attempting send' log line to have one serial, got %q", sendLogs[0]) - } - if !strings.Contains(sendLogs[0], `"DaysToExpiration":2`) { - t.Errorf("expected first 'attempting send' log line to have 2 days to expiration, got %q", sendLogs[0]) - } - if !strings.Contains(sendLogs[0], `"DNSNames":["example.com"]`) { - t.Errorf("expected first 'attempting send' log line to have 1 domain, 'example.com', got %q", sendLogs[0]) - } -} - -var n = bigIntFromB64("n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw==") -var e = intFromB64("AQAB") -var d = bigIntFromB64("bWUC9B-EFRIo8kpGfh0ZuyGPvMNKvYWNtB_ikiH9k20eT-O1q_I78eiZkpXxXQ0UTEs2LsNRS-8uJbvQ-A1irkwMSMkK1J3XTGgdrhCku9gRldY7sNA_AKZGh-Q661_42rINLRCe8W-nZ34ui_qOfkLnK9QWDDqpaIsA-bMwWWSDFu2MUBYwkHTMEzLYGqOe04noqeq1hExBTHBOBdkMXiuFhUq1BU6l-DqEiWxqg82sXt2h-LMnT3046AOYJoRioz75tSUQfGCshWTBnP5uDjd18kKhyv07lhfSJdrPdM5Plyl21hsFf4L_mHCuoFau7gdsPfHPxxjVOcOpBrQzwQ==") -var p = bigIntFromB64("uKE2dh-cTf6ERF4k4e_jy78GfPYUIaUyoSSJuBzp3Cubk3OCqs6grT8bR_cu0Dm1MZwWmtdqDyI95HrUeq3MP15vMMON8lHTeZu2lmKvwqW7anV5UzhM1iZ7z4yMkuUwFWoBvyY898EXvRD-hdqRxHlSqAZ192zB3pVFJ0s7pFc=") -var q = bigIntFromB64("uKE2dh-cTf6ERF4k4e_jy78GfPYUIaUyoSSJuBzp3Cubk3OCqs6grT8bR_cu0Dm1MZwWmtdqDyI95HrUeq3MP15vMMON8lHTeZu2lmKvwqW7anV5UzhM1iZ7z4yMkuUwFWoBvyY898EXvRD-hdqRxHlSqAZ192zB3pVFJ0s7pFc=") - -var serial1 = big.NewInt(0x1336) -var serial1String = core.SerialToString(serial1) -var serial2 = big.NewInt(0x1337) -var serial2String = core.SerialToString(serial2) -var serial3 = big.NewInt(0x1338) -var serial3String = core.SerialToString(serial3) -var serial4 = big.NewInt(0x1339) -var serial4String = core.SerialToString(serial4) -var serial5 = big.NewInt(0x1340) -var serial5String = core.SerialToString(serial5) -var serial6 = big.NewInt(0x1341) -var serial7 = big.NewInt(0x1342) -var serial8 = big.NewInt(0x1343) -var serial9 = big.NewInt(0x1344) - -var testKey = rsa.PrivateKey{ - PublicKey: rsa.PublicKey{N: n, E: e}, - D: d, - Primes: []*big.Int{p, q}, -} - -func TestProcessCerts(t *testing.T) { - testCtx := setup(t, []time.Duration{time.Hour * 24 * 7}) - - certs := addExpiringCerts(t, testCtx) - log.Clear() - testCtx.m.processCerts(context.Background(), certs) - // Test that the lastExpirationNagSent was updated for the certificate - // corresponding to serial4, which is set up as "already renewed" by - // addExpiringCerts. - if len(log.GetAllMatching("DEBUG: SQL: UPDATE certificateStatus .*2006-01-02 15:04:05.999999999.*\"000000000000000000000000000000001339\"")) != 1 { - t.Errorf("Expected an update to certificateStatus, got these log lines:\n%s", - strings.Join(log.GetAllMatching(".*"), "\n")) - } -} - -func TestFindExpiringCertificates(t *testing.T) { - testCtx := setup(t, []time.Duration{time.Hour * 24, time.Hour * 24 * 4, time.Hour * 24 * 7}) - - addExpiringCerts(t, testCtx) - - log.Clear() - err := testCtx.m.findExpiringCertificates(context.Background()) - test.AssertNotError(t, err, "Failed on no certificates") - test.AssertEquals(t, len(log.GetAllMatching("Searching for certificates that expire between.*")), 3) - - log.Clear() - err = testCtx.m.findExpiringCertificates(context.Background()) - test.AssertNotError(t, err, "Failed to find expiring certs") - // Should get 001 and 003 - test.AssertEquals(t, len(testCtx.mc.Messages), 2) - - test.AssertEquals(t, mocks.MailerMessage{ - To: emailARaw, - // A certificate with only one domain should have only one domain listed in - // the subject - Subject: "Testing: Let's Encrypt certificate expiration notice for domain \"example-a.com\"", - Body: "hi, cert for DNS names example-a.com is going to expire in 0 days (03 Jan 06 14:04 +0000)", - }, testCtx.mc.Messages[0]) - test.AssertEquals(t, mocks.MailerMessage{ - To: emailBRaw, - // A certificate with two domains should have only one domain listed and an - // additional count included - Subject: "Testing: Let's Encrypt certificate expiration notice for domain \"another.example-c.com\" (and 1 more)", - Body: "hi, cert for DNS names another.example-c.com\nexample-c.com is going to expire in 7 days (09 Jan 06 16:04 +0000)", - }, testCtx.mc.Messages[1]) - - // Check that regC's only certificate being renewed does not cause a log - test.AssertEquals(t, len(log.GetAllMatching("no certs given to send nags for")), 0) - - // A consecutive run shouldn't find anything - testCtx.mc.Clear() - log.Clear() - err = testCtx.m.findExpiringCertificates(context.Background()) - test.AssertNotError(t, err, "Failed to find expiring certs") - test.AssertEquals(t, len(testCtx.mc.Messages), 0) -} - -func addExpiringCerts(t *testing.T, ctx *testCtx) []core.Certificate { - // Add some expiring certificates and registrations - ipA, _ := net.ParseIP("2.3.2.3").MarshalText() - regA := &corepb.Registration{ - Id: 1, - Contact: []string{emailA}, - Key: jsonKeyA, - InitialIP: ipA, - } - regB := &corepb.Registration{ - Id: 2, - Contact: []string{emailB}, - Key: jsonKeyB, - InitialIP: ipA, - } - ipC, _ := net.ParseIP("210.3.2.3").MarshalText() - regC := &corepb.Registration{ - Id: 3, - Contact: []string{emailB}, - Key: jsonKeyC, - InitialIP: ipC, - } - bg := context.Background() - regA, err := ctx.ssa.NewRegistration(bg, regA) - test.AssertNotError(t, err, "Couldn't store regA") - regB, err = ctx.ssa.NewRegistration(bg, regB) - test.AssertNotError(t, err, "Couldn't store regB") - regC, err = ctx.ssa.NewRegistration(bg, regC) - test.AssertNotError(t, err, "Couldn't store regC") - - // Expires in <1d, last nag was the 4d nag - rawCertA := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy A", - }, - NotAfter: ctx.fc.Now().Add(23 * time.Hour), - DNSNames: []string{"example-a.com"}, - SerialNumber: serial1, - } - certDerA, _ := x509.CreateCertificate(rand.Reader, &rawCertA, &rawCertA, &testKey.PublicKey, &testKey) - certA := &core.Certificate{ - RegistrationID: regA.Id, - Serial: serial1String, - Expires: rawCertA.NotAfter, - DER: certDerA, - } - - // Expires in 3d, already sent 4d nag at 4.5d - rawCertB := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy B", - }, - NotAfter: ctx.fc.Now().AddDate(0, 0, 3), - DNSNames: []string{"example-b.com"}, - SerialNumber: serial2, - } - certDerB, _ := x509.CreateCertificate(rand.Reader, &rawCertB, &rawCertB, &testKey.PublicKey, &testKey) - certB := &core.Certificate{ - RegistrationID: regA.Id, - Serial: serial2String, - Expires: rawCertB.NotAfter, - DER: certDerB, - } - - // Expires in 7d and change, no nag sent at all yet - rawCertC := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy C", - }, - NotAfter: ctx.fc.Now().Add((7*24 + 1) * time.Hour), - DNSNames: []string{"example-c.com", "another.example-c.com"}, - SerialNumber: serial3, - } - certDerC, _ := x509.CreateCertificate(rand.Reader, &rawCertC, &rawCertC, &testKey.PublicKey, &testKey) - certC := &core.Certificate{ - RegistrationID: regB.Id, - Serial: serial3String, - Expires: rawCertC.NotAfter, - DER: certDerC, - } - - // Expires in 3d, renewed - rawCertD := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy D", - }, - NotAfter: ctx.fc.Now().AddDate(0, 0, 3), - DNSNames: []string{"example-d.com"}, - SerialNumber: serial4, - } - certDerD, _ := x509.CreateCertificate(rand.Reader, &rawCertD, &rawCertD, &testKey.PublicKey, &testKey) - certD := &core.Certificate{ - RegistrationID: regC.Id, - Serial: serial4String, - Expires: rawCertD.NotAfter, - DER: certDerD, - } - fqdnStatusD := &core.FQDNSet{ - SetHash: sa.HashNames(rawCertD.DNSNames), - Serial: serial4String, - Issued: ctx.fc.Now().AddDate(0, 0, -87), - Expires: ctx.fc.Now().AddDate(0, 0, 3), - } - fqdnStatusDRenewed := &core.FQDNSet{ - SetHash: sa.HashNames(rawCertD.DNSNames), - Serial: serial5String, - Issued: ctx.fc.Now().AddDate(0, 0, -3), - Expires: ctx.fc.Now().AddDate(0, 0, 87), - } - - setupDBMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) - test.AssertNotError(t, err, "sa.NewDbMap failed") - err = setupDBMap.Insert(certA) - test.AssertNotError(t, err, "Couldn't add certA") - err = setupDBMap.Insert(certB) - test.AssertNotError(t, err, "Couldn't add certB") - err = setupDBMap.Insert(certC) - test.AssertNotError(t, err, "Couldn't add certC") - err = setupDBMap.Insert(certD) - test.AssertNotError(t, err, "Couldn't add certD") - _, err = setupDBMap.Exec("INSERT INTO certificateStatus (serial, lastExpirationNagSent, status, notAfter, ocspLastUpdated, revokedDate, revokedReason) VALUES (?,?,?,?,?,?,?)", serial1String, ctx.fc.Now().AddDate(0, 0, -3), string(core.OCSPStatusGood), rawCertA.NotAfter, time.Time{}, time.Time{}, 0) - test.AssertNotError(t, err, "Couldn't add certStatusA") - _, err = setupDBMap.Exec("INSERT INTO certificateStatus (serial, lastExpirationNagSent, status, notAfter, ocspLastUpdated, revokedDate, revokedReason) VALUES (?,?,?,?,?,?,?)", serial2String, ctx.fc.Now().Add(-36*time.Hour), string(core.OCSPStatusGood), rawCertB.NotAfter, time.Time{}, time.Time{}, 0) - test.AssertNotError(t, err, "Couldn't add certStatusB") - _, err = setupDBMap.Exec("INSERT INTO certificateStatus (serial, status, notAfter, lastExpirationNagSent, ocspLastUpdated, revokedDate, revokedReason) VALUES (?,?,?,?,?,?,?)", serial3String, string(core.OCSPStatusGood), rawCertC.NotAfter, time.Time{}, time.Time{}, time.Time{}, 0) - test.AssertNotError(t, err, "Couldn't add certStatusC") - _, err = setupDBMap.Exec("INSERT INTO certificateStatus (serial, status, notAfter, lastExpirationNagSent, ocspLastUpdated, revokedDate, revokedReason) VALUES (?,?,?,?,?,?,?)", serial4String, string(core.OCSPStatusGood), rawCertD.NotAfter, time.Time{}, time.Time{}, time.Time{}, 0) - test.AssertNotError(t, err, "Couldn't add certStatusD") - err = setupDBMap.Insert(fqdnStatusD) - test.AssertNotError(t, err, "Couldn't add fqdnStatusD") - err = setupDBMap.Insert(fqdnStatusDRenewed) - test.AssertNotError(t, err, "Couldn't add fqdnStatusDRenewed") - return []core.Certificate{*certA, *certB, *certC, *certD} -} - -func countGroupsAtCapacity(group string, counter *prometheus.GaugeVec) int { - ch := make(chan prometheus.Metric, 10) - counter.With(prometheus.Labels{"nag_group": group}).Collect(ch) - m := <-ch - var iom io_prometheus_client.Metric - _ = m.Write(&iom) - return int(iom.Gauge.GetValue()) -} - -func TestFindCertsAtCapacity(t *testing.T) { - testCtx := setup(t, []time.Duration{time.Hour * 24}) - - addExpiringCerts(t, testCtx) - - log.Clear() - - // Set the limit to 1 so we are "at capacity" with one result - testCtx.m.limit = 1 - - err := testCtx.m.findExpiringCertificates(context.Background()) - test.AssertNotError(t, err, "Failed to find expiring certs") - test.AssertEquals(t, len(testCtx.mc.Messages), 1) - - // The "48h0m0s" nag group should have its prometheus stat incremented once. - // Note: this is not the 24h0m0s nag as you would expect sending time.Hour - // * 24 to setup() for the nag duration. This is because all of the nags are - // offset by defaultNagCheckInterval, which is 24hrs. - test.AssertEquals(t, countGroupsAtCapacity("48h0m0s", testCtx.m.stats.nagsAtCapacity), 1) - - // A consecutive run shouldn't find anything - similarly we do not EXPECT() - // anything on statter to be called, and if it is then we have a test failure - testCtx.mc.Clear() - log.Clear() - err = testCtx.m.findExpiringCertificates(context.Background()) - test.AssertNotError(t, err, "Failed to find expiring certs") - test.AssertEquals(t, len(testCtx.mc.Messages), 0) - - // The "48h0m0s" nag group should now be reporting that it isn't at capacity - test.AssertEquals(t, countGroupsAtCapacity("48h0m0s", testCtx.m.stats.nagsAtCapacity), 0) -} - -func TestCertIsRenewed(t *testing.T) { - testCtx := setup(t, []time.Duration{time.Hour * 24, time.Hour * 24 * 4, time.Hour * 24 * 7}) - - reg := satest.CreateWorkingRegistration(t, testCtx.ssa) - - testCerts := []*struct { - Serial *big.Int - stringSerial string - DNS []string - NotBefore time.Time - NotAfter time.Time - // this field is the test assertion - IsRenewed bool - }{ - { - Serial: serial1, - DNS: []string{"a.example.com", "a2.example.com"}, - NotBefore: testCtx.fc.Now().Add((-1 * 24) * time.Hour), - NotAfter: testCtx.fc.Now().Add((89 * 24) * time.Hour), - IsRenewed: true, - }, - { - Serial: serial2, - DNS: []string{"a.example.com", "a2.example.com"}, - NotBefore: testCtx.fc.Now().Add((0 * 24) * time.Hour), - NotAfter: testCtx.fc.Now().Add((90 * 24) * time.Hour), - IsRenewed: false, - }, - { - Serial: serial3, - DNS: []string{"b.example.net"}, - NotBefore: testCtx.fc.Now().Add((0 * 24) * time.Hour), - NotAfter: testCtx.fc.Now().Add((90 * 24) * time.Hour), - IsRenewed: false, - }, - { - Serial: serial4, - DNS: []string{"c.example.org"}, - NotBefore: testCtx.fc.Now().Add((-100 * 24) * time.Hour), - NotAfter: testCtx.fc.Now().Add((-10 * 24) * time.Hour), - IsRenewed: true, - }, - { - Serial: serial5, - DNS: []string{"c.example.org"}, - NotBefore: testCtx.fc.Now().Add((-80 * 24) * time.Hour), - NotAfter: testCtx.fc.Now().Add((10 * 24) * time.Hour), - IsRenewed: true, - }, - { - Serial: serial6, - DNS: []string{"c.example.org"}, - NotBefore: testCtx.fc.Now().Add((-75 * 24) * time.Hour), - NotAfter: testCtx.fc.Now().Add((15 * 24) * time.Hour), - IsRenewed: true, - }, - { - Serial: serial7, - DNS: []string{"c.example.org"}, - NotBefore: testCtx.fc.Now().Add((-1 * 24) * time.Hour), - NotAfter: testCtx.fc.Now().Add((89 * 24) * time.Hour), - IsRenewed: false, - }, - { - Serial: serial8, - DNS: []string{"d.example.com", "d2.example.com"}, - NotBefore: testCtx.fc.Now().Add((-1 * 24) * time.Hour), - NotAfter: testCtx.fc.Now().Add((89 * 24) * time.Hour), - IsRenewed: false, - }, - { - Serial: serial9, - DNS: []string{"d.example.com", "d2.example.com", "d3.example.com"}, - NotBefore: testCtx.fc.Now().Add((0 * 24) * time.Hour), - NotAfter: testCtx.fc.Now().Add((90 * 24) * time.Hour), - IsRenewed: false, - }, - } - - setupDBMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) - if err != nil { - t.Fatal(err) - } - - for _, testData := range testCerts { - testData.stringSerial = core.SerialToString(testData.Serial) - - rawCert := x509.Certificate{ - Subject: pkix.Name{ - CommonName: testData.DNS[0], - }, - NotBefore: testData.NotBefore, - NotAfter: testData.NotAfter, - DNSNames: testData.DNS, - SerialNumber: testData.Serial, - } - certDer, err := x509.CreateCertificate(rand.Reader, &rawCert, &rawCert, &testKey.PublicKey, &testKey) - if err != nil { - t.Fatal(err) - } - cert := &core.Certificate{ - RegistrationID: reg.Id, - Serial: testData.stringSerial, - Issued: testData.NotBefore, - Expires: testData.NotAfter, - DER: certDer, - } - fqdnStatus := &core.FQDNSet{ - SetHash: sa.HashNames(testData.DNS), - Serial: testData.stringSerial, - Issued: testData.NotBefore, - Expires: testData.NotAfter, - } - - err = setupDBMap.Insert(cert) - test.AssertNotError(t, err, fmt.Sprintf("Couldn't add cert %s", testData.stringSerial)) - _, err = setupDBMap.Exec("INSERT INTO certificateStatus (serial, status, lastExpirationNagSent, ocspLastUpdated, revokedDate, revokedReason) VALUES (?,?,?,?,?,?)", fmt.Sprintf("%x", testData.Serial.Bytes()), string(core.OCSPStatusGood), time.Time{}, time.Time{}, time.Time{}, 0) - test.AssertNotError(t, err, fmt.Sprintf("Couldn't add certStatus %s", testData.stringSerial)) - err = setupDBMap.Insert(fqdnStatus) - test.AssertNotError(t, err, fmt.Sprintf("Couldn't add fqdnStatus %s", testData.stringSerial)) - } - - for _, testData := range testCerts { - renewed, err := testCtx.m.certIsRenewed(testData.DNS, testData.NotBefore) - if err != nil { - t.Errorf("error checking renewal state for %s: %v", testData.stringSerial, err) - continue - } - if renewed != testData.IsRenewed { - t.Errorf("for %s: got %v, expected %v", testData.stringSerial, renewed, testData.IsRenewed) - } - } -} - -func TestLifetimeOfACert(t *testing.T) { - testCtx := setup(t, []time.Duration{time.Hour * 24, time.Hour * 24 * 4, time.Hour * 24 * 7}) - defer testCtx.cleanUp() - - ipA, err := net.ParseIP("1.2.2.1").MarshalText() - test.AssertNotError(t, err, "Couldn't create initialIP") - regA := &corepb.Registration{ - Id: 1, - Contact: []string{emailA}, - Key: jsonKeyA, - InitialIP: ipA, - } - regA, err = testCtx.ssa.NewRegistration(ctx, regA) - test.AssertNotError(t, err, "Couldn't store regA") - rawCertA := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy A", - }, - - NotAfter: testCtx.fc.Now(), - DNSNames: []string{"example-a.com"}, - SerialNumber: serial1, - } - certDerA, _ := x509.CreateCertificate(rand.Reader, &rawCertA, &rawCertA, &testKey.PublicKey, &testKey) - certA := &core.Certificate{ - RegistrationID: regA.Id, - Serial: serial1String, - Expires: rawCertA.NotAfter, - DER: certDerA, - } - - setupDBMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) - test.AssertNotError(t, err, "sa.NewDbMap failed") - err = setupDBMap.Insert(certA) - test.AssertNotError(t, err, "unable to insert Certificate") - _, err = setupDBMap.Exec("INSERT INTO certificateStatus (serial, status, notAfter, lastExpirationNagSent, ocspLastUpdated, revokedDate, revokedReason) VALUES (?,?,?,?,?,?,?)", serial1String, string(core.OCSPStatusGood), rawCertA.NotAfter, time.Time{}, time.Time{}, time.Time{}, 0) - test.AssertNotError(t, err, "unable to insert CertificateStatus") - - type lifeTest struct { - timeLeft time.Duration - numMsgs int - context string - } - tests := []lifeTest{ - { - timeLeft: 9 * 24 * time.Hour, // 9 days before expiration - - numMsgs: 0, - context: "Expected no emails sent because we are more than 7 days out.", - }, - { - (7*24 + 12) * time.Hour, // 7.5 days before - 1, - "Sent 1 for 7 day notice.", - }, - { - 7 * 24 * time.Hour, - 1, - "The 7 day email was already sent.", - }, - { - (4*24 - 1) * time.Hour, // <4 days before, the mailer did not run yesterday - 2, - "Sent 1 for the 7 day notice, and 1 for the 4 day notice.", - }, - { - 36 * time.Hour, // within 1day + nagMargin - 3, - "Sent 1 for the 7 day notice, 1 for the 4 day notice, and 1 for the 1 day notice.", - }, - { - 12 * time.Hour, - 3, - "The 1 day before email was already sent.", - }, - { - -2 * 24 * time.Hour, // 2 days after expiration - 3, - "No expiration warning emails are sent after expiration", - }, - } - - for _, tt := range tests { - testCtx.fc.Add(-tt.timeLeft) - err = testCtx.m.findExpiringCertificates(context.Background()) - test.AssertNotError(t, err, "error calling findExpiringCertificates") - if len(testCtx.mc.Messages) != tt.numMsgs { - t.Errorf(tt.context+" number of messages: expected %d, got %d", tt.numMsgs, len(testCtx.mc.Messages)) - } - testCtx.fc.Add(tt.timeLeft) - } -} - -func TestDontFindRevokedCert(t *testing.T) { - expiresIn := 24 * time.Hour - testCtx := setup(t, []time.Duration{expiresIn}) - - emailA := "mailto:one@mail.com" - - ipA, err := net.ParseIP("1.2.2.1").MarshalText() - test.AssertNotError(t, err, "Couldn't create initialIP") - regA := &corepb.Registration{ - Id: 1, - Contact: []string{emailA}, - Key: jsonKeyA, - InitialIP: ipA, - } - regA, err = testCtx.ssa.NewRegistration(ctx, regA) - test.AssertNotError(t, err, "Couldn't store regA") - rawCertA := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy A", - }, - - NotAfter: testCtx.fc.Now().Add(expiresIn), - DNSNames: []string{"example-a.com"}, - SerialNumber: serial1, - } - certDerA, _ := x509.CreateCertificate(rand.Reader, &rawCertA, &rawCertA, &testKey.PublicKey, &testKey) - certA := &core.Certificate{ - RegistrationID: regA.Id, - Serial: serial1String, - Expires: rawCertA.NotAfter, - DER: certDerA, - } - - setupDBMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) - test.AssertNotError(t, err, "sa.NewDbMap failed") - err = setupDBMap.Insert(certA) - test.AssertNotError(t, err, "unable to insert Certificate") - _, err = setupDBMap.Exec("INSERT INTO certificateStatus (serial,status, lastExpirationNagSent, ocspLastUpdated, revokedDate, revokedReason) VALUES (?,?,?,?,?,?)", serial1String, string(core.OCSPStatusRevoked), time.Time{}, time.Time{}, time.Time{}, 0) - test.AssertNotError(t, err, "unable to insert CertificateStatus") - - err = testCtx.m.findExpiringCertificates(context.Background()) - test.AssertNotError(t, err, "err from findExpiringCertificates") - - if len(testCtx.mc.Messages) != 0 { - t.Errorf("no emails should have been sent, but sent %d", len(testCtx.mc.Messages)) - } -} - -func TestDedupOnRegistration(t *testing.T) { - expiresIn := 96 * time.Hour - testCtx := setup(t, []time.Duration{expiresIn}) - - ipA, err := net.ParseIP("1.2.2.1").MarshalText() - test.AssertNotError(t, err, "Couldn't create initialIP") - regA := &corepb.Registration{ - Id: 1, - Contact: []string{emailA}, - Key: jsonKeyA, - InitialIP: ipA, - } - regA, err = testCtx.ssa.NewRegistration(ctx, regA) - test.AssertNotError(t, err, "Couldn't store regA") - rawCertA := newX509Cert("happy A", - testCtx.fc.Now().Add(72*time.Hour), - []string{"example-a.com", "shared-example.com"}, - serial1, - ) - - certDerA, _ := x509.CreateCertificate(rand.Reader, rawCertA, rawCertA, &testKey.PublicKey, &testKey) - certA := &core.Certificate{ - RegistrationID: regA.Id, - Serial: serial1String, - Expires: rawCertA.NotAfter, - DER: certDerA, - } - - rawCertB := newX509Cert("happy B", - testCtx.fc.Now().Add(48*time.Hour), - []string{"example-b.com", "shared-example.com"}, - serial2, - ) - certDerB, _ := x509.CreateCertificate(rand.Reader, rawCertB, rawCertB, &testKey.PublicKey, &testKey) - certB := &core.Certificate{ - RegistrationID: regA.Id, - Serial: serial2String, - Expires: rawCertB.NotAfter, - DER: certDerB, - } - - setupDBMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) - test.AssertNotError(t, err, "sa.NewDbMap failed") - err = setupDBMap.Insert(certA) - test.AssertNotError(t, err, "Couldn't add certA") - err = setupDBMap.Insert(certB) - test.AssertNotError(t, err, "Couldn't add certB") - _, err = setupDBMap.Exec("INSERT INTO certificateStatus (serial, lastExpirationNagSent, status, notAfter, ocspLastUpdated, revokedDate, revokedReason) VALUES (?,?,?,?,?,?,?)", serial1String, time.Unix(0, 0), string(core.OCSPStatusGood), rawCertA.NotAfter, time.Time{}, time.Time{}, 0) - test.AssertNotError(t, err, "Couldn't add certStatusA") - _, err = setupDBMap.Exec("INSERT INTO certificateStatus (serial, lastExpirationNagSent, status, notAfter, ocspLastUpdated, revokedDate, revokedReason) VALUES (?,?,?,?,?,?,?)", serial2String, time.Unix(0, 0), string(core.OCSPStatusGood), rawCertB.NotAfter, time.Time{}, time.Time{}, 0) - test.AssertNotError(t, err, "Couldn't add certStatusB") - - err = testCtx.m.findExpiringCertificates(context.Background()) - test.AssertNotError(t, err, "error calling findExpiringCertificates") - if len(testCtx.mc.Messages) > 1 { - t.Errorf("num of messages, want %d, got %d", 1, len(testCtx.mc.Messages)) - } - if len(testCtx.mc.Messages) == 0 { - t.Fatalf("no messages sent") - } - domains := "example-a.com\nexample-b.com\nshared-example.com" - expected := mocks.MailerMessage{ - To: emailARaw, - // A certificate with three domain names should have one in the subject and - // a count of '2 more' at the end - Subject: "Testing: Let's Encrypt certificate expiration notice for domain \"example-a.com\" (and 2 more)", - Body: fmt.Sprintf(`hi, cert for DNS names %s is going to expire in 1 days (%s)`, - domains, - rawCertB.NotAfter.Format(time.RFC822Z)), - } - test.AssertEquals(t, expected, testCtx.mc.Messages[0]) -} - -type testCtx struct { - dbMap *db.WrappedMap - ssa sapb.StorageAuthorityClient - mc *mocks.Mailer - fc clock.FakeClock - m *mailer - cleanUp func() -} - -func setup(t *testing.T, nagTimes []time.Duration) *testCtx { - // We use the test_setup user (which has full permissions to everything) - // because the SA we return is used for inserting data to set up the test. - dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) - if err != nil { - t.Fatalf("Couldn't connect the database: %s", err) - } - - fc := newFakeClock(t) - ssa, err := sa.NewSQLStorageAuthority(dbMap, dbMap, nil, nil, fc, log, metrics.NoopRegisterer, 1) - if err != nil { - t.Fatalf("unable to create SQLStorageAuthority: %s", err) - } - cleanUp := test.ResetSATestDatabase(t) - - mc := &mocks.Mailer{} - - offsetNags := make([]time.Duration, len(nagTimes)) - for i, t := range nagTimes { - offsetNags[i] = t + defaultNagCheckInterval - } - - m := &mailer{ - log: log, - mailer: mc, - emailTemplate: tmpl, - subjectTemplate: subjTmpl, - dbMap: dbMap, - rs: isa.SA{Impl: ssa}, - nagTimes: offsetNags, - limit: 100, - clk: fc, - stats: initStats(metrics.NoopRegisterer), - } - return &testCtx{ - dbMap: dbMap, - ssa: isa.SA{Impl: ssa}, - mc: mc, - fc: fc, - m: m, - cleanUp: cleanUp, - } -} diff --git a/cmd/expiration-mailer/send_test.go b/cmd/expiration-mailer/send_test.go deleted file mode 100644 index 80c5a7e3f3b..00000000000 --- a/cmd/expiration-mailer/send_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package notmain - -import ( - "crypto/x509" - "crypto/x509/pkix" - "fmt" - "math/big" - "testing" - "time" - - "github.com/letsencrypt/boulder/mocks" - "github.com/letsencrypt/boulder/test" -) - -var ( - email1 = "mailto:one@example.com" - email2 = "mailto:two@example.com" -) - -func TestSendEarliestCertInfo(t *testing.T) { - expiresIn := 24 * time.Hour - ctx := setup(t, []time.Duration{expiresIn}) - defer ctx.cleanUp() - - rawCertA := newX509Cert("happy A", - ctx.fc.Now().AddDate(0, 0, 5), - []string{"example-A.com", "SHARED-example.com"}, - serial1, - ) - rawCertB := newX509Cert("happy B", - ctx.fc.Now().AddDate(0, 0, 2), - []string{"shared-example.com", "example-b.com"}, - serial2, - ) - - err := ctx.m.sendNags([]string{email1, email2}, []*x509.Certificate{rawCertA, rawCertB}) - if err != nil { - t.Fatal(err) - } - if len(ctx.mc.Messages) != 2 { - t.Errorf("num of messages, want %d, got %d", 2, len(ctx.mc.Messages)) - } - if len(ctx.mc.Messages) == 0 { - t.Fatalf("no message sent") - } - domains := "example-a.com\nexample-b.com\nshared-example.com" - expected := mocks.MailerMessage{ - Subject: "Testing: Let's Encrypt certificate expiration notice for domain \"example-a.com\" (and 2 more)", - Body: fmt.Sprintf(`hi, cert for DNS names %s is going to expire in 2 days (%s)`, - domains, - rawCertB.NotAfter.Format(time.RFC822Z)), - } - expected.To = "one@example.com" - test.AssertEquals(t, expected, ctx.mc.Messages[0]) - expected.To = "two@example.com" - test.AssertEquals(t, expected, ctx.mc.Messages[1]) -} - -func newX509Cert(commonName string, notAfter time.Time, dnsNames []string, serial *big.Int) *x509.Certificate { - return &x509.Certificate{ - Subject: pkix.Name{ - CommonName: commonName, - }, - NotAfter: notAfter, - DNSNames: dnsNames, - SerialNumber: serial, - } - -} diff --git a/cmd/id-exporter/main.go b/cmd/id-exporter/main.go deleted file mode 100644 index eaa004202f0..00000000000 --- a/cmd/id-exporter/main.go +++ /dev/null @@ -1,301 +0,0 @@ -package notmain - -import ( - "bufio" - "encoding/json" - "errors" - "flag" - "fmt" - "io/ioutil" - "os" - "strings" - "time" - - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/db" - "github.com/letsencrypt/boulder/features" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/sa" -) - -type idExporter struct { - log blog.Logger - dbMap *db.WrappedMap - clk clock.Clock - grace time.Duration -} - -// resultEntry is a JSON marshalable exporter result entry. -type resultEntry struct { - // ID is exported to support marshaling to JSON. - ID int64 `json:"id"` - - // Hostname is exported to support marshaling to JSON. Not all queries - // will fill this field, so it's JSON field tag marks at as - // omittable. - Hostname string `json:"hostname,omitempty"` -} - -// reverseHostname converts (reversed) names sourced from the -// registrations table to standard hostnames. -func (r *resultEntry) reverseHostname() { - r.Hostname = sa.ReverseName(r.Hostname) -} - -// idExporterResults is passed as a selectable 'holder' for the results -// of id-exporter database queries -type idExporterResults []*resultEntry - -// marshalToJSON returns JSON as bytes for all elements of the inner `id` -// slice. -func (i *idExporterResults) marshalToJSON() ([]byte, error) { - data, err := json.Marshal(i) - if err != nil { - return nil, err - } - data = append(data, '\n') - return data, nil -} - -// writeToFile writes the contents of the inner `ids` slice, as JSON, to -// a file -func (i *idExporterResults) writeToFile(outfile string) error { - data, err := i.marshalToJSON() - if err != nil { - return err - } - return ioutil.WriteFile(outfile, data, 0644) -} - -// findIDs gathers all registration IDs with unexpired certificates. -func (c idExporter) findIDs() (idExporterResults, error) { - var holder idExporterResults - _, err := c.dbMap.Select( - &holder, - `SELECT DISTINCT r.id - FROM registrations AS r - INNER JOIN certificates AS c on c.registrationID = r.id - WHERE r.contact NOT IN ('[]', 'null') - AND c.expires >= :expireCutoff;`, - map[string]interface{}{ - "expireCutoff": c.clk.Now().Add(-c.grace), - }) - if err != nil { - c.log.AuditErrf("Error finding IDs: %s", err) - return nil, err - } - return holder, nil -} - -// findIDsWithExampleHostnames gathers all registration IDs with -// unexpired certificates and a corresponding example hostname. -func (c idExporter) findIDsWithExampleHostnames() (idExporterResults, error) { - var holder idExporterResults - _, err := c.dbMap.Select( - &holder, - `SELECT SQL_BIG_RESULT - cert.registrationID AS id, - name.reversedName AS hostname - FROM certificates AS cert - INNER JOIN issuedNames AS name ON name.serial = cert.serial - WHERE cert.expires >= :expireCutoff - GROUP BY cert.registrationID;`, - map[string]interface{}{ - "expireCutoff": c.clk.Now().Add(-c.grace), - }) - if err != nil { - c.log.AuditErrf("Error finding IDs and example hostnames: %s", err) - return nil, err - } - - for _, result := range holder { - result.reverseHostname() - } - return holder, nil -} - -// findIDsForHostnames gathers all registration IDs with unexpired -// certificates for each `hostnames` entry. -func (c idExporter) findIDsForHostnames(hostnames []string) (idExporterResults, error) { - var holder idExporterResults - for _, hostname := range hostnames { - // Pass the same list in each time, gorp will happily just append to the slice - // instead of overwriting it each time - // https://github.com/go-gorp/gorp/blob/2ae7d174a4cf270240c4561092402affba25da5e/select.go#L348-L355 - _, err := c.dbMap.Select( - &holder, - `SELECT DISTINCT c.registrationID AS id - FROM certificates AS c - INNER JOIN issuedNames AS n ON c.serial = n.serial - WHERE c.expires >= :expireCutoff - AND n.reversedName = :reversedName;`, - map[string]interface{}{ - "expireCutoff": c.clk.Now().Add(-c.grace), - "reversedName": sa.ReverseName(hostname), - }, - ) - if err != nil { - if db.IsNoRows(err) { - continue - } - return nil, err - } - } - - return holder, nil -} - -const usageIntro = ` -Introduction: - -The ID exporter exists to retrieve the IDs of all registered -users with currently unexpired certificates. This list of registration IDs can -then be given as input to the notification mailer to send bulk notifications. - -The -grace parameter can be used to allow registrations with certificates that -have already expired to be included in the export. The argument is a Go duration -obeying the usual suffix rules (e.g. 24h). - -Registration IDs are favoured over email addresses as the intermediate format in -order to ensure the most up to date contact information is used at the time of -notification. The notification mailer will resolve the ID to email(s) when the -mailing is underway, ensuring we use the correct address if a user has updated -their contact information between the time of export and the time of -notification. - -By default, the ID exporter's output will be JSON of the form: - [ - { "id": 1 }, - ... - { "id": n } - ] - -Operations that return a hostname will be JSON of the form: - [ - { "id": 1, "hostname": "example-1.com" }, - ... - { "id": n, "hostname": "example-n.com" } - ] - -Examples: - Export all registration IDs with unexpired certificates to "regs.json": - - id-exporter -config test/config/id-exporter.json -outfile regs.json - - Export all registration IDs with certificates that are unexpired or expired - within the last two days to "regs.json": - - id-exporter -config test/config/id-exporter.json -grace 48h -outfile - "regs.json" - -Required arguments: -- config -- outfile` - -// unmarshalHostnames unmarshals a hostnames file and ensures that the file -// contained at least one entry. -func unmarshalHostnames(filePath string) ([]string, error) { - file, err := os.Open(filePath) - if err != nil { - return nil, err - } - defer file.Close() - - scanner := bufio.NewScanner(file) - scanner.Split(bufio.ScanLines) - - var hostnames []string - for scanner.Scan() { - line := scanner.Text() - if strings.Contains(line, " ") { - return nil, fmt.Errorf( - "line: %q contains more than one entry, entries must be separated by newlines", line) - } - hostnames = append(hostnames, line) - } - - if len(hostnames) == 0 { - return nil, errors.New("provided file contains 0 hostnames") - } - return hostnames, nil -} - -type Config struct { - ContactExporter struct { - DB cmd.DBConfig - cmd.PasswordConfig - Features map[string]bool - } -} - -func main() { - outFile := flag.String("outfile", "", "File to output results JSON to.") - grace := flag.Duration("grace", 2*24*time.Hour, "Include results with certificates that expired in < grace ago.") - hostnamesFile := flag.String( - "hostnames", "", "Only include results with unexpired certificates that contain hostnames\nlisted (newline separated) in this file.") - withExampleHostnames := flag.Bool( - "with-example-hostnames", false, "Include an example hostname for each registration ID with an unexpired certificate.") - configFile := flag.String("config", "", "File containing a JSON config.") - - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "%s\n\n", usageIntro) - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - flag.PrintDefaults() - } - - // Parse flags and check required. - flag.Parse() - if *outFile == "" || *configFile == "" { - flag.Usage() - os.Exit(1) - } - - log := cmd.NewLogger(cmd.SyslogConfig{StdoutLevel: 7}) - - // Load configuration file. - configData, err := ioutil.ReadFile(*configFile) - cmd.FailOnError(err, fmt.Sprintf("Reading %q", *configFile)) - - // Unmarshal JSON config file. - var cfg Config - err = json.Unmarshal(configData, &cfg) - cmd.FailOnError(err, "Unmarshaling config") - - err = features.Set(cfg.ContactExporter.Features) - cmd.FailOnError(err, "Failed to set feature flags") - - dbMap, err := sa.InitWrappedDb(cfg.ContactExporter.DB, nil, log) - cmd.FailOnError(err, "While initializing dbMap") - - exporter := idExporter{ - log: log, - dbMap: dbMap, - clk: cmd.Clock(), - grace: *grace, - } - - var results idExporterResults - if *hostnamesFile != "" { - hostnames, err := unmarshalHostnames(*hostnamesFile) - cmd.FailOnError(err, "Problem unmarshalling hostnames") - - results, err = exporter.findIDsForHostnames(hostnames) - cmd.FailOnError(err, "Could not find IDs for hostnames") - - } else if *withExampleHostnames { - results, err = exporter.findIDsWithExampleHostnames() - cmd.FailOnError(err, "Could not find IDs with hostnames") - - } else { - results, err = exporter.findIDs() - cmd.FailOnError(err, "Could not find IDs") - } - - err = results.writeToFile(*outFile) - cmd.FailOnError(err, fmt.Sprintf("Could not write result to outfile %q", *outFile)) -} - -func init() { - cmd.RegisterCommand("id-exporter", main) -} diff --git a/cmd/id-exporter/main_test.go b/cmd/id-exporter/main_test.go deleted file mode 100644 index afd50b1ce3b..00000000000 --- a/cmd/id-exporter/main_test.go +++ /dev/null @@ -1,488 +0,0 @@ -package notmain - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" - "fmt" - "io/ioutil" - "math/big" - "net" - "os" - "testing" - "time" - - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/core" - corepb "github.com/letsencrypt/boulder/core/proto" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/sa" - sapb "github.com/letsencrypt/boulder/sa/proto" - "github.com/letsencrypt/boulder/test" - isa "github.com/letsencrypt/boulder/test/inmem/sa" - "github.com/letsencrypt/boulder/test/vars" -) - -var ( - regA *corepb.Registration - regB *corepb.Registration - regC *corepb.Registration - regD *corepb.Registration -) - -const ( - emailARaw = "test@example.com" - emailBRaw = "example@example.com" - emailCRaw = "test-example@example.com" - telNum = "666-666-7777" -) - -func TestFindIDs(t *testing.T) { - testCtx := setup(t) - defer testCtx.cleanUp() - - // Add some test registrations - testCtx.addRegistrations(t) - - // Run findIDs - since no certificates have been added corresponding to - // the above registrations, no IDs should be found. - results, err := testCtx.c.findIDs() - test.AssertNotError(t, err, "findIDs() produced error") - test.AssertEquals(t, len(results), 0) - - // Now add some certificates - testCtx.addCertificates(t) - - // Run findIDs - since there are three registrations with unexpired certs - // we should get exactly three IDs back: RegA, RegC and RegD. RegB should - // *not* be present since their certificate has already expired. Unlike - // previous versions of this test RegD is not filtered out for having a `tel:` - // contact field anymore - this is the duty of the notify-mailer. - results, err = testCtx.c.findIDs() - test.AssertNotError(t, err, "findIDs() produced error") - test.AssertEquals(t, len(results), 3) - for _, entry := range results { - switch entry.ID { - case regA.Id: - case regC.Id: - case regD.Id: - default: - t.Errorf("ID: %d not expected", entry.ID) - } - } - - // Allow a 1 year grace period - testCtx.c.grace = 360 * 24 * time.Hour - results, err = testCtx.c.findIDs() - test.AssertNotError(t, err, "findIDs() produced error") - // Now all four registration should be returned, including RegB since its - // certificate expired within the grace period - for _, entry := range results { - switch entry.ID { - case regA.Id: - case regB.Id: - case regC.Id: - case regD.Id: - default: - t.Errorf("ID: %d not expected", entry.ID) - } - } -} - -func TestFindIDsWithExampleHostnames(t *testing.T) { - testCtx := setup(t) - defer testCtx.cleanUp() - - // Add some test registrations - testCtx.addRegistrations(t) - - // Run findIDsWithExampleHostnames - since no certificates have been - // added corresponding to the above registrations, no IDs should be - // found. - results, err := testCtx.c.findIDsWithExampleHostnames() - test.AssertNotError(t, err, "findIDs() produced error") - test.AssertEquals(t, len(results), 0) - - // Now add some certificates - testCtx.addCertificates(t) - - // Run findIDsWithExampleHostnames - since there are three - // registrations with unexpired certs we should get exactly three - // IDs back: RegA, RegC and RegD. RegB should *not* be present since - // their certificate has already expired. - results, err = testCtx.c.findIDsWithExampleHostnames() - test.AssertNotError(t, err, "findIDs() produced error") - test.AssertEquals(t, len(results), 3) - for _, entry := range results { - switch entry.ID { - case regA.Id: - test.AssertEquals(t, entry.Hostname, "example-a.com") - case regC.Id: - test.AssertEquals(t, entry.Hostname, "example-c.com") - case regD.Id: - test.AssertEquals(t, entry.Hostname, "example-d.com") - default: - t.Errorf("ID: %d not expected", entry.ID) - } - } - - // Allow a 1 year grace period - testCtx.c.grace = 360 * 24 * time.Hour - results, err = testCtx.c.findIDsWithExampleHostnames() - test.AssertNotError(t, err, "findIDs() produced error") - - // Now all four registrations should be returned, including RegB - // since it expired within the grace period - test.AssertEquals(t, len(results), 4) - for _, entry := range results { - switch entry.ID { - case regA.Id: - test.AssertEquals(t, entry.Hostname, "example-a.com") - case regB.Id: - test.AssertEquals(t, entry.Hostname, "example-b.com") - case regC.Id: - test.AssertEquals(t, entry.Hostname, "example-c.com") - case regD.Id: - test.AssertEquals(t, entry.Hostname, "example-d.com") - default: - t.Errorf("ID: %d not expected", entry.ID) - } - } -} - -func TestFindIDsForHostnames(t *testing.T) { - testCtx := setup(t) - defer testCtx.cleanUp() - - // Add some test registrations - testCtx.addRegistrations(t) - - // Run findIDsForHostnames - since no certificates have been added corresponding to - // the above registrations, no IDs should be found. - results, err := testCtx.c.findIDsForHostnames([]string{"example-a.com", "example-b.com", "example-c.com", "example-d.com"}) - test.AssertNotError(t, err, "findIDs() produced error") - test.AssertEquals(t, len(results), 0) - - // Now add some certificates - testCtx.addCertificates(t) - - results, err = testCtx.c.findIDsForHostnames([]string{"example-a.com", "example-b.com", "example-c.com", "example-d.com"}) - test.AssertNotError(t, err, "findIDsForHostnames() failed") - test.AssertEquals(t, len(results), 3) - for _, entry := range results { - switch entry.ID { - case regA.Id: - case regC.Id: - case regD.Id: - default: - t.Errorf("ID: %d not expected", entry.ID) - } - } -} - -func TestWriteToFile(t *testing.T) { - expected := `[{"id":1},{"id":2},{"id":3}]` - mockResults := idExporterResults{{ID: 1}, {ID: 2}, {ID: 3}} - dir := os.TempDir() - - f, err := ioutil.TempFile(dir, "ids_test") - test.AssertNotError(t, err, "ioutil.TempFile produced an error") - - // Writing the result to an outFile should produce the correct results - err = mockResults.writeToFile(f.Name()) - test.AssertNotError(t, err, fmt.Sprintf("writeIDs produced an error writing to %s", f.Name())) - - contents, err := ioutil.ReadFile(f.Name()) - test.AssertNotError(t, err, fmt.Sprintf("ioutil.ReadFile produced an error reading from %s", f.Name())) - - test.AssertEquals(t, string(contents), expected+"\n") -} - -func Test_unmarshalHostnames(t *testing.T) { - testDir := os.TempDir() - testFile, err := ioutil.TempFile(testDir, "ids_test") - test.AssertNotError(t, err, "ioutil.TempFile produced an error") - - // Non-existent hostnamesFile - _, err = unmarshalHostnames("file_does_not_exist") - test.AssertError(t, err, "expected error for non-existent file") - - // Empty hostnamesFile - err = ioutil.WriteFile(testFile.Name(), []byte(""), 0644) - test.AssertNotError(t, err, "ioutil.WriteFile produced an error") - _, err = unmarshalHostnames(testFile.Name()) - test.AssertError(t, err, "expected error for file containing 0 entries") - - // One hostname present in the hostnamesFile - err = ioutil.WriteFile(testFile.Name(), []byte("example-a.com"), 0644) - test.AssertNotError(t, err, "ioutil.WriteFile produced an error") - results, err := unmarshalHostnames(testFile.Name()) - test.AssertNotError(t, err, "error when unmarshalling hostnamesFile with a single hostname") - test.AssertEquals(t, len(results), 1) - - // Two hostnames present in the hostnamesFile - err = ioutil.WriteFile(testFile.Name(), []byte("example-a.com\nexample-b.com"), 0644) - test.AssertNotError(t, err, "ioutil.WriteFile produced an error") - results, err = unmarshalHostnames(testFile.Name()) - test.AssertNotError(t, err, "error when unmarshalling hostnamesFile with a two hostnames") - test.AssertEquals(t, len(results), 2) - - // Three hostnames present in the hostnamesFile but two are separated only by a space - err = ioutil.WriteFile(testFile.Name(), []byte("example-a.com\nexample-b.com example-c.com"), 0644) - test.AssertNotError(t, err, "ioutil.WriteFile produced an error") - _, err = unmarshalHostnames(testFile.Name()) - test.AssertError(t, err, "error when unmarshalling hostnamesFile with three space separated domains") -} - -type testCtx struct { - c idExporter - ssa sapb.StorageAuthorityClient - cleanUp func() -} - -func (tc testCtx) addRegistrations(t *testing.T) { - emailA := "mailto:" + emailARaw - emailB := "mailto:" + emailBRaw - emailC := "mailto:" + emailCRaw - tel := "tel:" + telNum - - // Every registration needs a unique JOSE key - jsonKeyA := []byte(`{ - "kty":"RSA", - "n":"0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw", - "e":"AQAB" -}`) - jsonKeyB := []byte(`{ - "kty":"RSA", - "n":"z8bp-jPtHt4lKBqepeKF28g_QAEOuEsCIou6sZ9ndsQsEjxEOQxQ0xNOQezsKa63eogw8YS3vzjUcPP5BJuVzfPfGd5NVUdT-vSSwxk3wvk_jtNqhrpcoG0elRPQfMVsQWmxCAXCVRz3xbcFI8GTe-syynG3l-g1IzYIIZVNI6jdljCZML1HOMTTW4f7uJJ8mM-08oQCeHbr5ejK7O2yMSSYxW03zY-Tj1iVEebROeMv6IEEJNFSS4yM-hLpNAqVuQxFGetwtwjDMC1Drs1dTWrPuUAAjKGrP151z1_dE74M5evpAhZUmpKv1hY-x85DC6N0hFPgowsanmTNNiV75w", - "e":"AAEAAQ" -}`) - jsonKeyC := []byte(`{ - "kty":"RSA", - "n":"rFH5kUBZrlPj73epjJjyCxzVzZuV--JjKgapoqm9pOuOt20BUTdHqVfC2oDclqM7HFhkkX9OSJMTHgZ7WaVqZv9u1X2yjdx9oVmMLuspX7EytW_ZKDZSzL-sCOFCuQAuYKkLbsdcA3eHBK_lwc4zwdeHFMKIulNvLqckkqYB9s8GpgNXBDIQ8GjR5HuJke_WUNjYHSd8jY1LU9swKWsLQe2YoQUz_ekQvBvBCoaFEtrtRaSJKNLIVDObXFr2TLIiFiM0Em90kK01-eQ7ZiruZTKomll64bRFPoNo4_uwubddg3xTqur2vdF3NyhTrYdvAgTem4uC0PFjEQ1bK_djBQ", - "e":"AQAB" -}`) - jsonKeyD := []byte(`{ - "kty":"RSA", - "n":"rFH5kUBZrlPj73epjJjyCxzVzZuV--JjKgapoqm9pOuOt20BUTdHqVfC2oDclqM7HFhkkX9OSJMTHgZ7WaVqZv9u1X2yjdx9oVmMLuspX7EytW_ZKDZSzL-FCOFCuQAuYKkLbsdcA3eHBK_lwc4zwdeHFMKIulNvLqckkqYB9s8GpgNXBDIQ8GjR5HuJke_WUNjYHSd8jY1LU9swKWsLQe2YoQUz_ekQvBvBCoaFEtrtRaSJKNLIVDObXFr2TLIiFiM0Em90kK01-eQ7ZiruZTKomll64bRFPoNo4_uwubddg3xTqur2vdF3NyhTrYdvAgTem4uC0PFjEQ1bK_djBQ", - "e":"AQAB" -}`) - - initialIP, err := net.ParseIP("127.0.0.1").MarshalText() - test.AssertNotError(t, err, "Couldn't create initialIP") - - // Regs A through C have `mailto:` contact ACME URL's - regA = &corepb.Registration{ - Id: 1, - Contact: []string{emailA}, - Key: jsonKeyA, - InitialIP: initialIP, - } - regB = &corepb.Registration{ - Id: 2, - Contact: []string{emailB}, - Key: jsonKeyB, - InitialIP: initialIP, - } - regC = &corepb.Registration{ - Id: 3, - Contact: []string{emailC}, - Key: jsonKeyC, - InitialIP: initialIP, - } - // Reg D has a `tel:` contact ACME URL - regD = &corepb.Registration{ - Id: 4, - Contact: []string{tel}, - Key: jsonKeyD, - InitialIP: initialIP, - } - - // Add the four test registrations - ctx := context.Background() - regA, err = tc.ssa.NewRegistration(ctx, regA) - test.AssertNotError(t, err, "Couldn't store regA") - regB, err = tc.ssa.NewRegistration(ctx, regB) - test.AssertNotError(t, err, "Couldn't store regB") - regC, err = tc.ssa.NewRegistration(ctx, regC) - test.AssertNotError(t, err, "Couldn't store regC") - regD, err = tc.ssa.NewRegistration(ctx, regD) - test.AssertNotError(t, err, "Couldn't store regD") -} - -func (tc testCtx) addCertificates(t *testing.T) { - serial1 := big.NewInt(1336) - serial1String := core.SerialToString(serial1) - serial2 := big.NewInt(1337) - serial2String := core.SerialToString(serial2) - serial3 := big.NewInt(1338) - serial3String := core.SerialToString(serial3) - serial4 := big.NewInt(1339) - serial4String := core.SerialToString(serial4) - n := bigIntFromB64("n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw==") - e := intFromB64("AQAB") - d := bigIntFromB64("bWUC9B-EFRIo8kpGfh0ZuyGPvMNKvYWNtB_ikiH9k20eT-O1q_I78eiZkpXxXQ0UTEs2LsNRS-8uJbvQ-A1irkwMSMkK1J3XTGgdrhCku9gRldY7sNA_AKZGh-Q661_42rINLRCe8W-nZ34ui_qOfkLnK9QWDDqpaIsA-bMwWWSDFu2MUBYwkHTMEzLYGqOe04noqeq1hExBTHBOBdkMXiuFhUq1BU6l-DqEiWxqg82sXt2h-LMnT3046AOYJoRioz75tSUQfGCshWTBnP5uDjd18kKhyv07lhfSJdrPdM5Plyl21hsFf4L_mHCuoFau7gdsPfHPxxjVOcOpBrQzwQ==") - p := bigIntFromB64("uKE2dh-cTf6ERF4k4e_jy78GfPYUIaUyoSSJuBzp3Cubk3OCqs6grT8bR_cu0Dm1MZwWmtdqDyI95HrUeq3MP15vMMON8lHTeZu2lmKvwqW7anV5UzhM1iZ7z4yMkuUwFWoBvyY898EXvRD-hdqRxHlSqAZ192zB3pVFJ0s7pFc=") - q := bigIntFromB64("uKE2dh-cTf6ERF4k4e_jy78GfPYUIaUyoSSJuBzp3Cubk3OCqs6grT8bR_cu0Dm1MZwWmtdqDyI95HrUeq3MP15vMMON8lHTeZu2lmKvwqW7anV5UzhM1iZ7z4yMkuUwFWoBvyY898EXvRD-hdqRxHlSqAZ192zB3pVFJ0s7pFc=") - - testKey := rsa.PrivateKey{ - PublicKey: rsa.PublicKey{N: n, E: e}, - D: d, - Primes: []*big.Int{p, q}, - } - - fc := newFakeClock(t) - - // Add one cert for RegA that expires in 30 days - rawCertA := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy A", - }, - NotAfter: fc.Now().Add(30 * 24 * time.Hour), - DNSNames: []string{"example-a.com"}, - SerialNumber: serial1, - } - certDerA, _ := x509.CreateCertificate(rand.Reader, &rawCertA, &rawCertA, &testKey.PublicKey, &testKey) - certA := &core.Certificate{ - RegistrationID: regA.Id, - Serial: serial1String, - Expires: rawCertA.NotAfter, - DER: certDerA, - } - err := tc.c.dbMap.Insert(certA) - test.AssertNotError(t, err, "Couldn't add certA") - _, err = tc.c.dbMap.Exec( - "INSERT INTO issuedNames (reversedName, serial, notBefore) VALUES (?,?,0)", - "com.example-a", - serial1String, - ) - test.AssertNotError(t, err, "Couldn't add issued name for certA") - - // Add one cert for RegB that already expired 30 days ago - rawCertB := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy B", - }, - NotAfter: fc.Now().Add(-30 * 24 * time.Hour), - DNSNames: []string{"example-b.com"}, - SerialNumber: serial2, - } - certDerB, _ := x509.CreateCertificate(rand.Reader, &rawCertB, &rawCertB, &testKey.PublicKey, &testKey) - certB := &core.Certificate{ - RegistrationID: regB.Id, - Serial: serial2String, - Expires: rawCertB.NotAfter, - DER: certDerB, - } - err = tc.c.dbMap.Insert(certB) - test.AssertNotError(t, err, "Couldn't add certB") - _, err = tc.c.dbMap.Exec( - "INSERT INTO issuedNames (reversedName, serial, notBefore) VALUES (?,?,0)", - "com.example-b", - serial2String, - ) - test.AssertNotError(t, err, "Couldn't add issued name for certB") - - // Add one cert for RegC that expires in 30 days - rawCertC := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy C", - }, - NotAfter: fc.Now().Add(30 * 24 * time.Hour), - DNSNames: []string{"example-c.com"}, - SerialNumber: serial3, - } - certDerC, _ := x509.CreateCertificate(rand.Reader, &rawCertC, &rawCertC, &testKey.PublicKey, &testKey) - certC := &core.Certificate{ - RegistrationID: regC.Id, - Serial: serial3String, - Expires: rawCertC.NotAfter, - DER: certDerC, - } - err = tc.c.dbMap.Insert(certC) - test.AssertNotError(t, err, "Couldn't add certC") - _, err = tc.c.dbMap.Exec( - "INSERT INTO issuedNames (reversedName, serial, notBefore) VALUES (?,?,0)", - "com.example-c", - serial3String, - ) - test.AssertNotError(t, err, "Couldn't add issued name for certC") - - // Add one cert for RegD that expires in 30 days - rawCertD := x509.Certificate{ - Subject: pkix.Name{ - CommonName: "happy D", - }, - NotAfter: fc.Now().Add(30 * 24 * time.Hour), - DNSNames: []string{"example-d.com"}, - SerialNumber: serial4, - } - certDerD, _ := x509.CreateCertificate(rand.Reader, &rawCertD, &rawCertD, &testKey.PublicKey, &testKey) - certD := &core.Certificate{ - RegistrationID: regD.Id, - Serial: serial4String, - Expires: rawCertD.NotAfter, - DER: certDerD, - } - err = tc.c.dbMap.Insert(certD) - test.AssertNotError(t, err, "Couldn't add certD") - _, err = tc.c.dbMap.Exec( - "INSERT INTO issuedNames (reversedName, serial, notBefore) VALUES (?,?,0)", - "com.example-d", - serial4String, - ) - test.AssertNotError(t, err, "Couldn't add issued name for certD") -} - -func setup(t *testing.T) testCtx { - log := blog.UseMock() - - // Using DBConnSAFullPerms to be able to insert registrations and certificates - dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) - if err != nil { - t.Fatalf("Couldn't connect the database: %s", err) - } - cleanUp := test.ResetSATestDatabase(t) - - fc := newFakeClock(t) - ssa, err := sa.NewSQLStorageAuthority(dbMap, dbMap, nil, nil, fc, log, metrics.NoopRegisterer, 1) - if err != nil { - t.Fatalf("unable to create SQLStorageAuthority: %s", err) - } - - return testCtx{ - c: idExporter{ - dbMap: dbMap, - log: log, - clk: fc, - }, - ssa: isa.SA{Impl: ssa}, - cleanUp: cleanUp, - } -} - -func bigIntFromB64(b64 string) *big.Int { - bytes, _ := base64.URLEncoding.DecodeString(b64) - x := big.NewInt(0) - x.SetBytes(bytes) - return x -} - -func intFromB64(b64 string) int { - return int(bigIntFromB64(b64).Int64()) -} - -func newFakeClock(t *testing.T) clock.FakeClock { - const fakeTimeFormat = "2006-01-02T15:04:05.999999999Z" - ft, err := time.Parse(fakeTimeFormat, fakeTimeFormat) - if err != nil { - t.Fatal(err) - } - fc := clock.NewFake() - fc.Set(ft.UTC()) - return fc -} diff --git a/cmd/log-validator/main.go b/cmd/log-validator/main.go index 0e62edd1731..a292ddc371e 100644 --- a/cmd/log-validator/main.go +++ b/cmd/log-validator/main.go @@ -1,227 +1,50 @@ package notmain import ( - "encoding/base64" - "encoding/json" - "errors" + "context" "flag" - "fmt" - "io/ioutil" - "os" - "strings" - "time" - - "github.com/honeycombio/beeline-go" - "github.com/hpcloud/tail" - "github.com/prometheus/client_golang/prometheus" "github.com/letsencrypt/boulder/cmd" - blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/log/validator" ) -var errInvalidChecksum = errors.New("invalid checksum length") - -func lineValid(text string) error { - // Line format should match the following rsyslog omfile template: - // - // template( name="LELogFormat" type="list" ) { - // property(name="timereported" dateFormat="rfc3339") - // constant(value=" ") - // property(name="hostname" field.delimiter="46" field.number="1") - // constant(value=" datacenter ") - // property(name="syslogseverity") - // constant(value=" ") - // property(name="syslogtag") - // property(name="msg" spifno1stsp="on" ) - // property(name="msg" droplastlf="on" ) - // constant(value="\n") - // } - // - // This should result in a log line that looks like this: - // timestamp hostname datacenter syslogseverity binary-name[pid]: checksum msg - - fields := strings.Split(text, " ") - const errorPrefix = "log-validator:" - // Extract checksum from line - if len(fields) < 6 { - return fmt.Errorf("%s line doesn't match expected format", errorPrefix) - } - checksum := fields[5] - _, err := base64.RawURLEncoding.DecodeString(checksum) - if err != nil || len(checksum) != 7 { - return fmt.Errorf( - "%s expected a 7 character base64 raw URL decodable string, got %q: %w", - errorPrefix, - checksum, - errInvalidChecksum, - ) - } - - // Reconstruct just the message portion of the line - line := strings.Join(fields[6:], " ") - - // If we are fed our own output, treat it as always valid. This - // prevents runaway scenarios where we generate ever-longer output. - if strings.Contains(text, errorPrefix) { - return nil - } - // Check the extracted checksum against the computed checksum - if computedChecksum := blog.LogLineChecksum(line); checksum != computedChecksum { - return fmt.Errorf("%s invalid checksum (expected %q, got %q)", errorPrefix, computedChecksum, checksum) - } - return nil -} - -func validateFile(filename string) error { - file, err := ioutil.ReadFile(filename) - if err != nil { - return err - } - badFile := false - for i, line := range strings.Split(string(file), "\n") { - if line == "" { - continue - } - err := lineValid(line) - if err != nil { - badFile = true - fmt.Fprintf(os.Stderr, "[line %d] %s: %s\n", i+1, err, line) - } - } - - if badFile { - return errors.New("file contained invalid lines") - } - return nil -} - -// tailLogger is an adapter to the hpcloud/tail module's logging interface. -type tailLogger struct { - blog.Logger -} - -func (tl tailLogger) Fatal(v ...interface{}) { - tl.AuditErr(fmt.Sprint(v...)) -} -func (tl tailLogger) Fatalf(format string, v ...interface{}) { - tl.AuditErrf(format, v...) -} -func (tl tailLogger) Fatalln(v ...interface{}) { - tl.AuditErr(fmt.Sprint(v...) + "\n") -} -func (tl tailLogger) Panic(v ...interface{}) { - tl.AuditErr(fmt.Sprint(v...)) -} -func (tl tailLogger) Panicf(format string, v ...interface{}) { - tl.AuditErrf(format, v...) -} -func (tl tailLogger) Panicln(v ...interface{}) { - tl.AuditErr(fmt.Sprint(v...) + "\n") -} -func (tl tailLogger) Print(v ...interface{}) { - tl.Info(fmt.Sprint(v...)) -} -func (tl tailLogger) Printf(format string, v ...interface{}) { - tl.Infof(format, v...) -} -func (tl tailLogger) Println(v ...interface{}) { - tl.Info(fmt.Sprint(v...) + "\n") -} - type Config struct { - Files []string - - DebugAddr string - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig + Files []string `validate:"min=1,dive,required"` + DebugAddr string `validate:"omitempty,hostname_port"` + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig } func main() { - configPath := flag.String("config", "", "File path to the configuration file for this service") + debugAddr := flag.String("debug-addr", "", "Debug server address override") + configFile := flag.String("config", "", "File path to the configuration file for this service") checkFile := flag.String("check-file", "", "File path to a file to directly validate, if this argument is provided the config will not be parsed and only this file will be inspected") flag.Parse() if *checkFile != "" { - err := validateFile(*checkFile) + err := validator.ValidateFile(*checkFile) cmd.FailOnError(err, "validation failed") return } - configBytes, err := ioutil.ReadFile(*configPath) - cmd.FailOnError(err, "failed to read config file") var config Config - err = json.Unmarshal(configBytes, &config) - cmd.FailOnError(err, "failed to parse config file") - - bc, err := config.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() + err := cmd.ReadConfigFile(*configFile, &config) + cmd.FailOnError(err, "Reading JSON config file into config structure") - stats, logger := cmd.StatsAndLogging(config.Syslog, config.DebugAddr) - lineCounter := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "log_lines", - Help: "A counter of log lines processed, with status", - }, []string{"filename", "status"}) - stats.MustRegister(lineCounter) - - // Emit no more than 1 error line per second. This prevents consuming large - // amounts of disk space in case there is problem that causes all log lines to - // be invalid. - outputLimiter := time.NewTicker(time.Second) - - var tailers []*tail.Tail - for _, filename := range config.Files { - t, err := tail.TailFile(filename, tail.Config{ - ReOpen: true, - MustExist: false, // sometimes files won't exist, so we must tolerate that - Follow: true, - Logger: tailLogger{logger}, - }) - cmd.FailOnError(err, "failed to tail file") + if *debugAddr != "" { + config.DebugAddr = *debugAddr + } - go func() { - for line := range t.Lines { - if line.Err != nil { - logger.Errf("error while tailing %s: %s", t.Filename, err) - continue - } - err := lineValid(line.Text) - if err != nil { - if errors.Is(err, errInvalidChecksum) { - lineCounter.WithLabelValues(t.Filename, "invalid checksum length").Inc() - } else { - lineCounter.WithLabelValues(t.Filename, "bad").Inc() - } - select { - case <-outputLimiter.C: - logger.Errf("%s: %s %q", t.Filename, err, line.Text) - default: - } - } else { - lineCounter.WithLabelValues(t.Filename, "ok").Inc() - } - } - }() + stats, logger, oTelShutdown := cmd.StatsAndLogging(config.Syslog, config.OpenTelemetry, config.DebugAddr) + defer oTelShutdown(context.Background()) + cmd.LogStartup(logger) - tailers = append(tailers, t) - } + v := validator.New(config.Files, logger, stats) + defer v.Shutdown() - cmd.CatchSignals(logger, func() { - for _, t := range tailers { - // The tail module seems to have a race condition that will generate - // errors like this on shutdown: - // failed to stop tailing file: : Failed to detect creation of - // : inotify watcher has been closed - // This is probably related to the module's shutdown logic triggering the - // "reopen" code path for files that are removed and then recreated. - // These errors are harmless so we ignore them to allow clean shutdown. - _ = t.Stop() - t.Cleanup() - } - }) + cmd.WaitForSignal() } func init() { - cmd.RegisterCommand("log-validator", main) + cmd.RegisterCommand("log-validator", main, &cmd.ConfigValidator{Config: &Config{}}) } diff --git a/cmd/log-validator/main_test.go b/cmd/log-validator/main_test.go deleted file mode 100644 index fc7806d5c69..00000000000 --- a/cmd/log-validator/main_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package notmain - -import ( - "testing" - - "github.com/letsencrypt/boulder/test" -) - -func TestLineValidAccepts(t *testing.T) { - err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: kKG6cwA Caught SIGTERM") - test.AssertNotError(t, err, "errored on valid checksum") -} - -func TestLineValidRejects(t *testing.T) { - err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: xxxxxxx Caught SIGTERM") - test.AssertError(t, err, "didn't error on invalid checksum") -} - -func TestLineValidRejectsNotAChecksum(t *testing.T) { - err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: xxxx Caught SIGTERM") - test.AssertError(t, err, "didn't error on invalid checksum") - test.AssertErrorIs(t, err, errInvalidChecksum) -} - -func TestLineValidNonOurobouros(t *testing.T) { - err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: xxxxxxx Caught SIGTERM") - test.AssertError(t, err, "didn't error on invalid checksum") - - selfOutput := "2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 log-validator[1337]: xxxxxxx " + err.Error() - err2 := lineValid(selfOutput) - test.AssertNotError(t, err2, "expected no error when feeding lineValid's error output into itself") -} diff --git a/cmd/nonce-service/main.go b/cmd/nonce-service/main.go index d4f00241356..cae29d5cb58 100644 --- a/cmd/nonce-service/main.go +++ b/cmd/nonce-service/main.go @@ -3,11 +3,12 @@ package notmain import ( "context" "flag" + "fmt" + "net" + "net/netip" + "os" - "github.com/honeycombio/beeline-go" - "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/protobuf/types/known/emptypb" + "github.com/jmhodges/clock" "github.com/letsencrypt/boulder/cmd" bgrpc "github.com/letsencrypt/boulder/grpc" @@ -19,38 +20,52 @@ type Config struct { NonceService struct { cmd.ServiceConfig - MaxUsed int - NoncePrefix string + MaxUsed int - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig - } -} - -type nonceServer struct { - noncepb.UnimplementedNonceServiceServer - inner *nonce.NonceService -} + // NonceHMACKey is a path to a file containing an HMAC key which is a + // secret used for deriving the prefix of each nonce instance. It should + // contain 256 bits (32 bytes) of random data to be suitable as an + // HMAC-SHA256 key (e.g. the output of `openssl rand -hex 32`). In a + // multi-DC deployment this value should be the same across all + // boulder-wfe and nonce-service instances. + NonceHMACKey cmd.HMACKeyConfig `validate:"required"` -func (ns *nonceServer) Redeem(ctx context.Context, msg *noncepb.NonceMessage) (*noncepb.ValidMessage, error) { - return &noncepb.ValidMessage{Valid: ns.inner.Valid(msg.Nonce)}, nil + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig + } } -func (ns *nonceServer) Nonce(_ context.Context, _ *emptypb.Empty) (*noncepb.NonceMessage, error) { - nonce, err := ns.inner.Nonce() +func derivePrefix(key []byte, grpcAddr string) (string, error) { + host, port, err := net.SplitHostPort(grpcAddr) if err != nil { - return nil, err + return "", fmt.Errorf("parsing gRPC listen address: %w", err) + } + if host == "" { + return "", fmt.Errorf("nonce service gRPC address must include an IP address: got %q", grpcAddr) } - return &noncepb.NonceMessage{Nonce: nonce}, nil + if host != "" && port != "" { + hostIP, err := netip.ParseAddr(host) + if err != nil { + return "", fmt.Errorf("gRPC address host part was not an IP address") + } + if hostIP.IsUnspecified() { + return "", fmt.Errorf("nonce service gRPC address must be a specific IP address: got %q", grpcAddr) + } + } + return nonce.DerivePrefix(grpcAddr, key), nil } func main() { - grpcAddr := flag.String("addr", "", "gRPC listen address override") + grpcAddr := flag.String("addr", "", "gRPC listen address override. Also used to derive the nonce prefix.") debugAddr := flag.String("debug-addr", "", "Debug server address override") - prefixOverride := flag.String("prefix", "", "Override the configured nonce prefix") configFile := flag.String("config", "", "File path to the configuration file for this service") flag.Parse() + if *configFile == "" { + flag.Usage() + os.Exit(1) + } + var c Config err := cmd.ReadConfigFile(*configFile, &c) cmd.FailOnError(err, "Reading JSON config file into config structure") @@ -61,43 +76,32 @@ func main() { if *debugAddr != "" { c.NonceService.DebugAddr = *debugAddr } - if *prefixOverride != "" { - c.NonceService.NoncePrefix = *prefixOverride - } - bc, err := c.NonceService.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() + key, err := c.NonceService.NonceHMACKey.Load() + cmd.FailOnError(err, "Failed to load nonceHMACKey file.") + + noncePrefix, err := derivePrefix(key, c.NonceService.GRPC.Address) + cmd.FailOnError(err, "Failed to derive nonce prefix") - scope, logger := cmd.StatsAndLogging(c.NonceService.Syslog, c.NonceService.DebugAddr) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) + scope, logger, oTelShutdown := cmd.StatsAndLogging(c.NonceService.Syslog, c.NonceService.OpenTelemetry, c.NonceService.DebugAddr) + defer oTelShutdown(context.Background()) + cmd.LogStartup(logger) - ns, err := nonce.NewNonceService(scope, c.NonceService.MaxUsed, c.NonceService.NoncePrefix) + ns, err := nonce.NewNonceService(scope, c.NonceService.MaxUsed, noncePrefix) cmd.FailOnError(err, "Failed to initialize nonce service") - tlsConfig, err := c.NonceService.TLS.Load() + tlsConfig, err := c.NonceService.TLS.Load(scope) cmd.FailOnError(err, "tlsConfig config") - nonceServer := &nonceServer{inner: ns} - - serverMetrics := bgrpc.NewServerMetrics(scope) - grpcSrv, l, err := bgrpc.NewServer(c.NonceService.GRPC, tlsConfig, serverMetrics, cmd.Clock()) + start, err := bgrpc.NewServer(c.NonceService.GRPC, logger).Add( + &noncepb.NonceService_ServiceDesc, ns).Build(tlsConfig, scope, clock.New()) cmd.FailOnError(err, "Unable to setup nonce service gRPC server") - noncepb.RegisterNonceServiceServer(grpcSrv, nonceServer) - hs := health.NewServer() - healthpb.RegisterHealthServer(grpcSrv, hs) - go cmd.CatchSignals(logger, func() { - hs.Shutdown() - grpcSrv.GracefulStop() - }) + logger.Infof("Nonce server listening on %s with prefix %q", c.NonceService.GRPC.Address, noncePrefix) - err = cmd.FilterShutdownErrors(grpcSrv.Serve(l)) - cmd.FailOnError(err, "Nonce service gRPC server failed") + cmd.FailOnError(start(), "Nonce service gRPC server failed") } func init() { - cmd.RegisterCommand("nonce-service", main) + cmd.RegisterCommand("nonce-service", main, &cmd.ConfigValidator{Config: &Config{}}) } diff --git a/cmd/notify-mailer/main.go b/cmd/notify-mailer/main.go deleted file mode 100644 index 8bc97bd82e3..00000000000 --- a/cmd/notify-mailer/main.go +++ /dev/null @@ -1,584 +0,0 @@ -package notmain - -import ( - "encoding/csv" - "encoding/json" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "net/mail" - "os" - "sort" - "strconv" - "strings" - "text/template" - "time" - - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/db" - blog "github.com/letsencrypt/boulder/log" - bmail "github.com/letsencrypt/boulder/mail" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/policy" - "github.com/letsencrypt/boulder/sa" -) - -type mailer struct { - clk clock.Clock - log blog.Logger - dbMap dbSelector - mailer bmail.Mailer - subject string - emailTemplate *template.Template - recipients []recipient - targetRange interval - sleepInterval time.Duration -} - -// interval defines a range of email addresses to send to in alphabetical order. -// The `start` field is inclusive and the `end` field is exclusive. To include -// everything, set `end` to \xFF. -type interval struct { - start string - end string -} - -// contactQueryResult is a receiver for queries to the `registrations` table. -type contactQueryResult struct { - // ID is exported to receive the value of `id`. - ID int64 - - // Contact is exported to receive the value of `contact`. - Contact []byte -} - -func (i *interval) ok() error { - if i.start > i.end { - return fmt.Errorf("interval start value (%s) is greater than end value (%s)", - i.start, i.end) - } - return nil -} - -func (i *interval) includes(s string) bool { - return s >= i.start && s < i.end -} - -// ok ensures that both the `targetRange` and `sleepInterval` are valid. -func (m *mailer) ok() error { - err := m.targetRange.ok() - if err != nil { - return err - } - - if m.sleepInterval < 0 { - return fmt.Errorf( - "sleep interval (%d) is < 0", m.sleepInterval) - } - return nil -} - -func (m *mailer) logStatus(to string, current, total int, start time.Time) { - // Should never happen. - if total <= 0 || current < 1 || current > total { - m.log.AuditErrf("Invalid current (%d) or total (%d)", current, total) - } - completion := (float32(current) / float32(total)) * 100 - now := m.clk.Now() - elapsed := now.Sub(start) - m.log.Infof("Sending message (%d) of (%d) to address (%s) [%.2f%%] time elapsed (%s)", - current, total, to, completion, elapsed) -} - -func sortAddresses(input addressToRecipientMap) []string { - var addresses []string - for address := range input { - addresses = append(addresses, address) - } - sort.Strings(addresses) - return addresses -} - -// makeMessageBody is a helper for mailer.run() that's split out for the -// purposes of testing. -func (m *mailer) makeMessageBody(recipients []recipient) (string, error) { - var messageBody strings.Builder - - // Ensure that in the event of a missing key, an informative error is - // returned. - m.emailTemplate.Option("missingkey=error") - err := m.emailTemplate.Execute(&messageBody, recipients) - if err != nil { - return "", err - } - - if messageBody.Len() == 0 { - return "", errors.New("templating resulted in an empty message body") - } - return messageBody.String(), nil -} - -func (m *mailer) run() error { - err := m.ok() - if err != nil { - return err - } - - totalRecipients := len(m.recipients) - m.log.Infof("Resolving addresses for (%d) recipients", totalRecipients) - - addressToRecipient, err := m.resolveAddresses() - if err != nil { - return err - } - - totalAddresses := len(addressToRecipient) - if totalAddresses == 0 { - return errors.New("0 recipients remained after resolving addresses") - } - - m.log.Infof("%d recipients were resolved to %d addresses", totalRecipients, totalAddresses) - - var mostRecipients string - var mostRecipientsLen int - for k, v := range addressToRecipient { - if len(v) > mostRecipientsLen { - mostRecipientsLen = len(v) - mostRecipients = k - } - } - - m.log.Infof("Address %q was associated with the most recipients (%d)", - mostRecipients, mostRecipientsLen) - - err = m.mailer.Connect() - if err != nil { - return err - } - - defer func() { _ = m.mailer.Close() }() - - startTime := m.clk.Now() - sortedAddresses := sortAddresses(addressToRecipient) - - var sent int - for i, address := range sortedAddresses { - if !m.targetRange.includes(address) { - m.log.Debugf("Address %q is outside of target range, skipping", address) - continue - } - - err := policy.ValidEmail(address) - if err != nil { - m.log.Infof("Skipping %q due to policy violation: %s", address, err) - continue - } - - recipients := addressToRecipient[address] - m.logStatus(address, i+1, totalAddresses, startTime) - - messageBody, err := m.makeMessageBody(recipients) - if err != nil { - m.log.Errf("Skipping %q due to templating error: %s", address, err) - continue - } - - err = m.mailer.SendMail([]string{address}, m.subject, messageBody) - if err != nil { - var badAddrErr bmail.BadAddressSMTPError - if errors.As(err, &badAddrErr) { - m.log.Errf("address %q was rejected by server: %s", address, err) - continue - } - return fmt.Errorf("while sending mail (%d) of (%d) to address %q: %s", - i, len(sortedAddresses), address, err) - } - - sent++ - m.clk.Sleep(m.sleepInterval) - } - - if sent == 0 { - return errors.New("0 messages sent, check recipients or configured interval") - } - return nil -} - -// resolveAddresses creates a mapping of email addresses to (a list of) -// `recipient`s that resolve to that email address. -func (m *mailer) resolveAddresses() (addressToRecipientMap, error) { - result := make(addressToRecipientMap, len(m.recipients)) - for _, recipient := range m.recipients { - addresses, err := getAddressForID(recipient.id, m.dbMap) - if err != nil { - return nil, err - } - - for _, address := range addresses { - parsed, err := mail.ParseAddress(address) - if err != nil { - m.log.Errf("Unparsable address %q, skipping ID (%d)", address, recipient.id) - continue - } - result[parsed.Address] = append(result[parsed.Address], recipient) - } - } - return result, nil -} - -// dbSelector abstracts over a subset of methods from `gorp.DbMap` objects to -// facilitate mocking in unit tests. -type dbSelector interface { - SelectOne(holder interface{}, query string, args ...interface{}) error -} - -// getAddressForID queries the database for the email address associated with -// the provided registration ID. -func getAddressForID(id int64, dbMap dbSelector) ([]string, error) { - var result contactQueryResult - err := dbMap.SelectOne(&result, - `SELECT id, - contact - FROM registrations - WHERE contact NOT IN ('[]', 'null') - AND id = :id;`, - map[string]interface{}{"id": id}) - if err != nil { - if db.IsNoRows(err) { - return []string{}, nil - } - return nil, err - } - - var contacts []string - err = json.Unmarshal(result.Contact, &contacts) - if err != nil { - return nil, err - } - - var addresses []string - for _, contact := range contacts { - if strings.HasPrefix(contact, "mailto:") { - addresses = append(addresses, strings.TrimPrefix(contact, "mailto:")) - } - } - return addresses, nil -} - -// recipient represents a single record from the recipient list file. The 'id' -// column is parsed to the 'id' field, all additional data will be parsed to a -// mapping of column name to value in the 'Data' field. Please inform SRE if you -// make any changes to the exported fields of this struct. These fields are -// referenced in operationally critical e-mail templates used to notify -// subscribers during incident response. -type recipient struct { - // id is the subscriber's ID. - id int64 - - // Data is a mapping of column name to value parsed from a single record in - // the provided recipient list file. It's exported so the contents can be - // accessed by the the template package. Please inform SRE if you make any - // changes to this field. - Data map[string]string -} - -// addressToRecipientMap maps email addresses to a list of `recipient`s that -// resolve to that email address. -type addressToRecipientMap map[string][]recipient - -// readRecipientsList parses the contents of a recipient list file into a list -// of `recipient` objects. -func readRecipientsList(filename string, delimiter rune) ([]recipient, string, error) { - f, err := os.Open(filename) - if err != nil { - return nil, "", err - } - - reader := csv.NewReader(f) - reader.Comma = delimiter - - // Parse header. - record, err := reader.Read() - if err != nil { - return nil, "", fmt.Errorf("failed to parse header: %w", err) - } - - if record[0] != "id" { - return nil, "", errors.New("header must begin with \"id\"") - } - - // Collect the names of each header column after `id`. - var dataColumns []string - for _, v := range record[1:] { - dataColumns = append(dataColumns, strings.TrimSpace(v)) - if len(v) == 0 { - return nil, "", errors.New("header contains an empty column") - } - } - - var recordsWithEmptyColumns []int64 - var recordsWithDuplicateIDs []int64 - var probsBuff strings.Builder - stringProbs := func() string { - if len(recordsWithEmptyColumns) != 0 { - fmt.Fprintf(&probsBuff, "ID(s) %v contained empty columns and ", - recordsWithEmptyColumns) - } - - if len(recordsWithDuplicateIDs) != 0 { - fmt.Fprintf(&probsBuff, "ID(s) %v were skipped as duplicates", - recordsWithDuplicateIDs) - } - - if probsBuff.Len() == 0 { - return "" - } - return strings.TrimSuffix(probsBuff.String(), " and ") - } - - // Parse records. - recipientIDs := make(map[int64]bool) - var recipients []recipient - for { - record, err := reader.Read() - if errors.Is(err, io.EOF) { - // Finished parsing the file. - if len(recipients) == 0 { - return nil, stringProbs(), errors.New("no records after header") - } - return recipients, stringProbs(), nil - } else if err != nil { - return nil, "", err - } - - // Ensure the first column of each record can be parsed as a valid - // registration ID. - recordID := record[0] - id, err := strconv.ParseInt(recordID, 10, 64) - if err != nil { - return nil, "", fmt.Errorf( - "%q couldn't be parsed as a registration ID due to: %s", recordID, err) - } - - // Skip records that have the same ID as those read previously. - if recipientIDs[id] { - recordsWithDuplicateIDs = append(recordsWithDuplicateIDs, id) - continue - } - recipientIDs[id] = true - - // Collect the columns of data after `id` into a map. - var emptyColumn bool - data := make(map[string]string) - for i, v := range record[1:] { - if len(v) == 0 { - emptyColumn = true - } - data[dataColumns[i]] = v - } - - // Only used for logging. - if emptyColumn { - recordsWithEmptyColumns = append(recordsWithEmptyColumns, id) - } - - recipients = append(recipients, recipient{id, data}) - } -} - -const usageIntro = ` -Introduction: - -The notification mailer exists to send a message to the contact associated -with a list of registration IDs. The attributes of the message (from address, -subject, and message content) are provided by the command line arguments. The -message content is provided as a path to a template file via the -body argument. - -Provide a list of recipient user ids in a CSV file passed with the -recipientList -flag. The CSV file must have "id" as the first column and may have additional -fields to be interpolated into the email template: - - id, lastIssuance - 1234, "from example.com 2018-12-01" - 5678, "from example.net 2018-12-13" - -The additional fields will be interpolated with Golang templating, e.g.: - - Your last issuance on each account was: - {{ range . }} {{ .Data.lastIssuance }} - {{ end }} - -To help the operator gain confidence in the mailing run before committing fully -three safety features are supported: dry runs, intervals and a sleep between emails. - -The -dryRun=true flag will use a mock mailer that prints message content to -stdout instead of performing an SMTP transaction with a real mailserver. This -can be used when the initial parameters are being tweaked to ensure no real -emails are sent. Using -dryRun=false will send real email. - -Intervals supported via the -start and -end arguments. Only email addresses that -are alphabetically between the -start and -end strings will be sent. This can be used -to break up sending into batches, or more likely to resume sending if a batch is killed, -without resending messages that have already been sent. The -start flag is inclusive and -the -end flag is exclusive. - -Notify-mailer de-duplicates email addresses and groups together the resulting recipient -structs, so a person who has multiple accounts using the same address will only receive -one email. - -During mailing the -sleep argument is used to space out individual messages. -This can be used to ensure that the mailing happens at a steady pace with ample -opportunity for the operator to terminate early in the event of error. The --sleep flag honours durations with a unit suffix (e.g. 1m for 1 minute, 10s for -10 seconds, etc). Using -sleep=0 will disable the sleep and send at full speed. - -Examples: - Send an email with subject "Hello!" from the email "hello@goodbye.com" with - the contents read from "test_msg_body.txt" to every email associated with the - registration IDs listed in "test_reg_recipients.json", sleeping 10 seconds - between each message: - - notify-mailer -config test/config/notify-mailer.json -body - cmd/notify-mailer/testdata/test_msg_body.txt -from hello@goodbye.com - -recipientList cmd/notify-mailer/testdata/test_msg_recipients.csv -subject "Hello!" - -sleep 10s -dryRun=false - - Do the same, but only to example@example.com: - - notify-mailer -config test/config/notify-mailer.json - -body cmd/notify-mailer/testdata/test_msg_body.txt -from hello@goodbye.com - -recipientList cmd/notify-mailer/testdata/test_msg_recipients.csv -subject "Hello!" - -start example@example.com -end example@example.comX - - Send the message starting with example@example.com and emailing every address that's - alphabetically higher: - - notify-mailer -config test/config/notify-mailer.json - -body cmd/notify-mailer/testdata/test_msg_body.txt -from hello@goodbye.com - -recipientList cmd/notify-mailer/testdata/test_msg_recipients.csv -subject "Hello!" - -start example@example.com - -Required arguments: -- body -- config -- from -- subject -- recipientList` - -type Config struct { - NotifyMailer struct { - DB cmd.DBConfig - cmd.SMTPConfig - } - Syslog cmd.SyslogConfig -} - -func main() { - from := flag.String("from", "", "From header for emails. Must be a bare email address.") - subject := flag.String("subject", "", "Subject of emails") - recipientListFile := flag.String("recipientList", "", "File containing a CSV list of registration IDs and extra info.") - parseAsTSV := flag.Bool("tsv", false, "Parse the recipient list file as a TSV.") - bodyFile := flag.String("body", "", "File containing the email body in Golang template format.") - dryRun := flag.Bool("dryRun", true, "Whether to do a dry run.") - sleep := flag.Duration("sleep", 500*time.Millisecond, "How long to sleep between emails.") - start := flag.String("start", "", "Alphabetically lowest email address to include.") - end := flag.String("end", "\xFF", "Alphabetically highest email address (exclusive).") - reconnBase := flag.Duration("reconnectBase", 1*time.Second, "Base sleep duration between reconnect attempts") - reconnMax := flag.Duration("reconnectMax", 5*60*time.Second, "Max sleep duration between reconnect attempts after exponential backoff") - configFile := flag.String("config", "", "File containing a JSON config.") - - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "%s\n\n", usageIntro) - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - flag.PrintDefaults() - } - - // Validate required args. - flag.Parse() - if *from == "" || *subject == "" || *bodyFile == "" || *configFile == "" || - *recipientListFile == "" { - flag.Usage() - os.Exit(1) - } - - configData, err := ioutil.ReadFile(*configFile) - cmd.FailOnError(err, "Couldn't load JSON config file") - - // Parse JSON config. - var cfg Config - err = json.Unmarshal(configData, &cfg) - cmd.FailOnError(err, "Couldn't unmarshal JSON config file") - - log := cmd.NewLogger(cfg.Syslog) - defer log.AuditPanic() - - dbMap, err := sa.InitWrappedDb(cfg.NotifyMailer.DB, nil, log) - cmd.FailOnError(err, "While initializing dbMap") - - // Load and parse message body. - template, err := template.New("email").ParseFiles(*bodyFile) - cmd.FailOnError(err, "Couldn't parse message template") - - address, err := mail.ParseAddress(*from) - cmd.FailOnError(err, fmt.Sprintf("Couldn't parse %q to address", *from)) - - recipientListDelimiter := ',' - if *parseAsTSV { - recipientListDelimiter = '\t' - } - recipients, probs, err := readRecipientsList(*recipientListFile, recipientListDelimiter) - cmd.FailOnError(err, "Couldn't populate recipients") - - if probs != "" { - log.Infof("While reading the recipient list file %s", probs) - } - - var mailClient bmail.Mailer - if *dryRun { - log.Infof("Starting %s in dry-run mode", cmd.VersionString()) - mailClient = bmail.NewDryRun(*address, log) - } else { - log.Infof("Starting %s", cmd.VersionString()) - smtpPassword, err := cfg.NotifyMailer.PasswordConfig.Pass() - cmd.FailOnError(err, "Couldn't load SMTP password from file") - - mailClient = bmail.New( - cfg.NotifyMailer.Server, - cfg.NotifyMailer.Port, - cfg.NotifyMailer.Username, - smtpPassword, - nil, - *address, - log, - metrics.NoopRegisterer, - *reconnBase, - *reconnMax) - } - - m := mailer{ - clk: cmd.Clock(), - log: log, - dbMap: dbMap, - mailer: mailClient, - subject: *subject, - recipients: recipients, - emailTemplate: template, - targetRange: interval{ - start: *start, - end: *end, - }, - sleepInterval: *sleep, - } - - err = m.run() - cmd.FailOnError(err, "Couldn't complete") - - log.Info("Completed successfully") -} - -func init() { - cmd.RegisterCommand("notify-mailer", main) -} diff --git a/cmd/notify-mailer/main_test.go b/cmd/notify-mailer/main_test.go deleted file mode 100644 index 660ca7d997e..00000000000 --- a/cmd/notify-mailer/main_test.go +++ /dev/null @@ -1,745 +0,0 @@ -package notmain - -import ( - "database/sql" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "testing" - "text/template" - "time" - - "github.com/jmhodges/clock" - - "github.com/letsencrypt/boulder/db" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/mocks" - "github.com/letsencrypt/boulder/test" -) - -func TestIntervalOK(t *testing.T) { - // Test a number of intervals know to be OK, ensure that no error is - // produced when calling `ok()`. - okCases := []struct { - testInterval interval - }{ - {interval{}}, - {interval{start: "aa", end: "\xFF"}}, - {interval{end: "aa"}}, - {interval{start: "aa", end: "bb"}}, - } - for _, testcase := range okCases { - err := testcase.testInterval.ok() - test.AssertNotError(t, err, "valid interval produced ok() error") - } - - badInterval := interval{start: "bb", end: "aa"} - err := badInterval.ok() - test.AssertError(t, err, "bad interval was considered ok") -} - -func setupMakeRecipientList(t *testing.T, contents string) string { - entryFile, err := ioutil.TempFile("", "") - test.AssertNotError(t, err, "couldn't create temp file") - - _, err = entryFile.WriteString(contents) - test.AssertNotError(t, err, "couldn't write contents to temp file") - - err = entryFile.Close() - test.AssertNotError(t, err, "couldn't close temp file") - return entryFile.Name() -} - -func TestReadRecipientList(t *testing.T) { - contents := `id, domainName, date -10,example.com,2018-11-21 -23,example.net,2018-11-22` - - entryFile := setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - list, _, err := readRecipientsList(entryFile, ',') - test.AssertNotError(t, err, "received an error for a valid CSV file") - - expected := []recipient{ - {id: 10, Data: map[string]string{"date": "2018-11-21", "domainName": "example.com"}}, - {id: 23, Data: map[string]string{"date": "2018-11-22", "domainName": "example.net"}}, - } - test.AssertDeepEquals(t, list, expected) - - contents = `id domainName date -10 example.com 2018-11-21 -23 example.net 2018-11-22` - - entryFile = setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - list, _, err = readRecipientsList(entryFile, '\t') - test.AssertNotError(t, err, "received an error for a valid TSV file") - test.AssertDeepEquals(t, list, expected) -} - -func TestReadRecipientListNoExtraColumns(t *testing.T) { - contents := `id -10 -23` - - entryFile := setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - _, _, err := readRecipientsList(entryFile, ',') - test.AssertNotError(t, err, "received an error for a valid CSV file") -} - -func TestReadRecipientsListFileNoExist(t *testing.T) { - _, _, err := readRecipientsList("doesNotExist", ',') - test.AssertError(t, err, "expected error for a file that doesn't exist") -} - -func TestReadRecipientListWithEmptyColumnInHeader(t *testing.T) { - contents := `id, domainName,,date -10,example.com,2018-11-21 -23,example.net` - - entryFile := setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - _, _, err := readRecipientsList(entryFile, ',') - test.AssertError(t, err, "failed to error on CSV file with trailing delimiter in header") - test.AssertDeepEquals(t, err, errors.New("header contains an empty column")) -} - -func TestReadRecipientListWithProblems(t *testing.T) { - contents := `id, domainName, date -10,example.com,2018-11-21 -23,example.net, -10,example.com,2018-11-22 -42,example.net, -24,example.com,2018-11-21 -24,example.com,2018-11-21 -` - - entryFile := setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - recipients, probs, err := readRecipientsList(entryFile, ',') - test.AssertNotError(t, err, "received an error for a valid CSV file") - test.AssertEquals(t, probs, "ID(s) [23 42] contained empty columns and ID(s) [10 24] were skipped as duplicates") - test.AssertEquals(t, len(recipients), 4) - - // Ensure trailing " and " is trimmed from single problem. - contents = `id, domainName, date -23,example.net, -10,example.com,2018-11-21 -42,example.net, -` - - entryFile = setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - _, probs, err = readRecipientsList(entryFile, ',') - test.AssertNotError(t, err, "received an error for a valid CSV file") - test.AssertEquals(t, probs, "ID(s) [23 42] contained empty columns") -} - -func TestReadRecipientListWithEmptyLine(t *testing.T) { - contents := `id, domainName, date -10,example.com,2018-11-21 - -23,example.net,2018-11-22` - - entryFile := setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - _, _, err := readRecipientsList(entryFile, ',') - test.AssertNotError(t, err, "received an error for a valid CSV file") -} - -func TestReadRecipientListWithMismatchedColumns(t *testing.T) { - contents := `id, domainName, date -10,example.com,2018-11-21 -23,example.net` - - entryFile := setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - _, _, err := readRecipientsList(entryFile, ',') - test.AssertError(t, err, "failed to error on CSV file with mismatched columns") -} - -func TestReadRecipientListWithDuplicateIDs(t *testing.T) { - contents := `id, domainName, date -10,example.com,2018-11-21 -10,example.net,2018-11-22` - - entryFile := setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - _, _, err := readRecipientsList(entryFile, ',') - test.AssertNotError(t, err, "received an error for a valid CSV file") -} - -func TestReadRecipientListWithUnparsableID(t *testing.T) { - contents := `id, domainName, date -10,example.com,2018-11-21 -twenty,example.net,2018-11-22` - - entryFile := setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - _, _, err := readRecipientsList(entryFile, ',') - test.AssertError(t, err, "expected error for CSV file that contains an unparsable registration ID") -} - -func TestReadRecipientListWithoutIDHeader(t *testing.T) { - contents := `notId, domainName, date -10,example.com,2018-11-21 -twenty,example.net,2018-11-22` - - entryFile := setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - _, _, err := readRecipientsList(entryFile, ',') - test.AssertError(t, err, "expected error for CSV file missing header field `id`") -} - -func TestReadRecipientListWithNoRecords(t *testing.T) { - contents := `id, domainName, date -` - entryFile := setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - _, _, err := readRecipientsList(entryFile, ',') - test.AssertError(t, err, "expected error for CSV file containing only a header") -} - -func TestReadRecipientListWithNoHeaderOrRecords(t *testing.T) { - contents := `` - entryFile := setupMakeRecipientList(t, contents) - defer os.Remove(entryFile) - - _, _, err := readRecipientsList(entryFile, ',') - test.AssertError(t, err, "expected error for CSV file containing only a header") - test.AssertErrorIs(t, err, io.EOF) -} - -func TestMakeMessageBody(t *testing.T) { - emailTemplate := `{{range . }} -{{ .Data.date }} -{{ .Data.domainName }} -{{end}}` - - m := &mailer{ - log: blog.UseMock(), - mailer: &mocks.Mailer{}, - emailTemplate: template.Must(template.New("email").Parse(emailTemplate)), - sleepInterval: 0, - targetRange: interval{end: "\xFF"}, - clk: newFakeClock(t), - recipients: nil, - dbMap: mockEmailResolver{}, - } - - recipients := []recipient{ - {id: 10, Data: map[string]string{"date": "2018-11-21", "domainName": "example.com"}}, - {id: 23, Data: map[string]string{"date": "2018-11-22", "domainName": "example.net"}}, - } - - expectedMessageBody := ` -2018-11-21 -example.com - -2018-11-22 -example.net -` - - // Ensure that a very basic template with 2 recipients can be successfully - // executed. - messageBody, err := m.makeMessageBody(recipients) - test.AssertNotError(t, err, "failed to execute a valid template") - test.AssertEquals(t, messageBody, expectedMessageBody) - - // With no recipients we should get an empty body error. - recipients = []recipient{} - _, err = m.makeMessageBody(recipients) - test.AssertError(t, err, "should have errored on empty body") - - // With a missing key we should get an informative templating error. - recipients = []recipient{{id: 10, Data: map[string]string{"domainName": "example.com"}}} - _, err = m.makeMessageBody(recipients) - test.AssertEquals(t, err.Error(), "template: email:2:8: executing \"email\" at <.Data.date>: map has no entry for key \"date\"") -} - -func TestSleepInterval(t *testing.T) { - const sleepLen = 10 - mc := &mocks.Mailer{} - dbMap := mockEmailResolver{} - tmpl := template.Must(template.New("letter").Parse("an email body")) - recipients := []recipient{{id: 1}, {id: 2}, {id: 3}} - // Set up a mock mailer that sleeps for `sleepLen` seconds - m := &mailer{ - log: blog.UseMock(), - mailer: mc, - emailTemplate: tmpl, - sleepInterval: sleepLen * time.Second, - targetRange: interval{start: "", end: "\xFF"}, - clk: newFakeClock(t), - recipients: recipients, - dbMap: dbMap, - } - - // Call run() - this should sleep `sleepLen` per destination address - // After it returns, we expect (sleepLen * number of destinations) seconds has - // elapsed - err := m.run() - test.AssertNotError(t, err, "error calling mailer run()") - expectedEnd := newFakeClock(t) - expectedEnd.Add(time.Second * time.Duration(sleepLen*len(recipients))) - test.AssertEquals(t, m.clk.Now(), expectedEnd.Now()) - - // Set up a mock mailer that doesn't sleep at all - m = &mailer{ - log: blog.UseMock(), - mailer: mc, - emailTemplate: tmpl, - sleepInterval: 0, - targetRange: interval{end: "\xFF"}, - clk: newFakeClock(t), - recipients: recipients, - dbMap: dbMap, - } - - // Call run() - this should blast through all destinations without sleep - // After it returns, we expect no clock time to have elapsed on the fake clock - err = m.run() - test.AssertNotError(t, err, "error calling mailer run()") - expectedEnd = newFakeClock(t) - test.AssertEquals(t, m.clk.Now(), expectedEnd.Now()) -} - -func TestMailIntervals(t *testing.T) { - const testSubject = "Test Subject" - dbMap := mockEmailResolver{} - - tmpl := template.Must(template.New("letter").Parse("an email body")) - recipients := []recipient{{id: 1}, {id: 2}, {id: 3}} - - mc := &mocks.Mailer{} - - // Create a mailer with a checkpoint interval larger than any of the - // destination email addresses. - m := &mailer{ - log: blog.UseMock(), - mailer: mc, - dbMap: dbMap, - subject: testSubject, - recipients: recipients, - emailTemplate: tmpl, - targetRange: interval{start: "\xFF", end: "\xFF\xFF"}, - sleepInterval: 0, - clk: newFakeClock(t), - } - - // Run the mailer. It should produce an error about the interval start - mc.Clear() - err := m.run() - test.AssertError(t, err, "expected error") - test.AssertEquals(t, len(mc.Messages), 0) - - // Create a mailer with a negative sleep interval - m = &mailer{ - log: blog.UseMock(), - mailer: mc, - dbMap: dbMap, - subject: testSubject, - recipients: recipients, - emailTemplate: tmpl, - targetRange: interval{}, - sleepInterval: -10, - clk: newFakeClock(t), - } - - // Run the mailer. It should produce an error about the sleep interval - mc.Clear() - err = m.run() - test.AssertEquals(t, len(mc.Messages), 0) - test.AssertEquals(t, err.Error(), "sleep interval (-10) is < 0") - - // Create a mailer with an interval starting with a specific email address. - // It should send email to that address and others alphabetically higher. - m = &mailer{ - log: blog.UseMock(), - mailer: mc, - dbMap: dbMap, - subject: testSubject, - recipients: []recipient{{id: 1}, {id: 2}, {id: 3}, {id: 4}}, - emailTemplate: tmpl, - targetRange: interval{start: "test-example-updated@letsencrypt.org", end: "\xFF"}, - sleepInterval: 0, - clk: newFakeClock(t), - } - - // Run the mailer. Two messages should have been produced, one to - // test-example-updated@letsencrypt.org (beginning of the range), - // and one to test-test-test@letsencrypt.org. - mc.Clear() - err = m.run() - test.AssertNotError(t, err, "run() produced an error") - test.AssertEquals(t, len(mc.Messages), 2) - test.AssertEquals(t, mocks.MailerMessage{ - To: "test-example-updated@letsencrypt.org", - Subject: testSubject, - Body: "an email body", - }, mc.Messages[0]) - test.AssertEquals(t, mocks.MailerMessage{ - To: "test-test-test@letsencrypt.org", - Subject: testSubject, - Body: "an email body", - }, mc.Messages[1]) - - // Create a mailer with a checkpoint interval ending before - // "test-example-updated@letsencrypt.org" - m = &mailer{ - log: blog.UseMock(), - mailer: mc, - dbMap: dbMap, - subject: testSubject, - recipients: []recipient{{id: 1}, {id: 2}, {id: 3}, {id: 4}}, - emailTemplate: tmpl, - targetRange: interval{end: "test-example-updated@letsencrypt.org"}, - sleepInterval: 0, - clk: newFakeClock(t), - } - - // Run the mailer. Two messages should have been produced, one to - // example@letsencrypt.org (ID 1), one to example-example-example@example.com (ID 2) - mc.Clear() - err = m.run() - test.AssertNotError(t, err, "run() produced an error") - test.AssertEquals(t, len(mc.Messages), 2) - test.AssertEquals(t, mocks.MailerMessage{ - To: "example-example-example@letsencrypt.org", - Subject: testSubject, - Body: "an email body", - }, mc.Messages[0]) - test.AssertEquals(t, mocks.MailerMessage{ - To: "example@letsencrypt.org", - Subject: testSubject, - Body: "an email body", - }, mc.Messages[1]) -} - -func TestMessageContentStatic(t *testing.T) { - // Create a mailer with fixed content - const ( - testSubject = "Test Subject" - ) - dbMap := mockEmailResolver{} - mc := &mocks.Mailer{} - m := &mailer{ - log: blog.UseMock(), - mailer: mc, - dbMap: dbMap, - subject: testSubject, - recipients: []recipient{{id: 1}}, - emailTemplate: template.Must(template.New("letter").Parse("an email body")), - targetRange: interval{end: "\xFF"}, - sleepInterval: 0, - clk: newFakeClock(t), - } - - // Run the mailer, one message should have been created with the content - // expected - err := m.run() - test.AssertNotError(t, err, "error calling mailer run()") - test.AssertEquals(t, len(mc.Messages), 1) - test.AssertEquals(t, mocks.MailerMessage{ - To: "example@letsencrypt.org", - Subject: testSubject, - Body: "an email body", - }, mc.Messages[0]) -} - -// Send mail with a variable interpolated. -func TestMessageContentInterpolated(t *testing.T) { - recipients := []recipient{ - { - id: 1, - Data: map[string]string{ - "validationMethod": "eyeballing it", - }, - }, - } - dbMap := mockEmailResolver{} - mc := &mocks.Mailer{} - m := &mailer{ - log: blog.UseMock(), - mailer: mc, - dbMap: dbMap, - subject: "Test Subject", - recipients: recipients, - emailTemplate: template.Must(template.New("letter").Parse( - `issued by {{range .}}{{ .Data.validationMethod }}{{end}}`)), - targetRange: interval{end: "\xFF"}, - sleepInterval: 0, - clk: newFakeClock(t), - } - - // Run the mailer, one message should have been created with the content - // expected - err := m.run() - test.AssertNotError(t, err, "error calling mailer run()") - test.AssertEquals(t, len(mc.Messages), 1) - test.AssertEquals(t, mocks.MailerMessage{ - To: "example@letsencrypt.org", - Subject: "Test Subject", - Body: "issued by eyeballing it", - }, mc.Messages[0]) -} - -// Send mail with a variable interpolated multiple times for accounts that share -// an email address. -func TestMessageContentInterpolatedMultiple(t *testing.T) { - recipients := []recipient{ - { - id: 200, - Data: map[string]string{ - "domain": "blog.example.com", - }, - }, - { - id: 201, - Data: map[string]string{ - "domain": "nas.example.net", - }, - }, - { - id: 202, - Data: map[string]string{ - "domain": "mail.example.org", - }, - }, - { - id: 203, - Data: map[string]string{ - "domain": "panel.example.net", - }, - }, - } - dbMap := mockEmailResolver{} - mc := &mocks.Mailer{} - m := &mailer{ - log: blog.UseMock(), - mailer: mc, - dbMap: dbMap, - subject: "Test Subject", - recipients: recipients, - emailTemplate: template.Must(template.New("letter").Parse( - `issued for: -{{range .}}{{ .Data.domain }} -{{end}}Thanks`)), - targetRange: interval{end: "\xFF"}, - sleepInterval: 0, - clk: newFakeClock(t), - } - - // Run the mailer, one message should have been created with the content - // expected - err := m.run() - test.AssertNotError(t, err, "error calling mailer run()") - test.AssertEquals(t, len(mc.Messages), 1) - test.AssertEquals(t, mocks.MailerMessage{ - To: "gotta.lotta.accounts@letsencrypt.org", - Subject: "Test Subject", - Body: `issued for: -blog.example.com -nas.example.net -mail.example.org -panel.example.net -Thanks`, - }, mc.Messages[0]) -} - -// the `mockEmailResolver` implements the `dbSelector` interface from -// `notify-mailer/main.go` to allow unit testing without using a backing -// database -type mockEmailResolver struct{} - -// the `mockEmailResolver` select method treats the requested reg ID as an index -// into a list of anonymous structs -func (bs mockEmailResolver) SelectOne(output interface{}, _ string, args ...interface{}) error { - // The "dbList" is just a list of contact records in memory - dbList := []contactQueryResult{ - { - ID: 1, - Contact: []byte(`["mailto:example@letsencrypt.org"]`), - }, - { - ID: 2, - Contact: []byte(`["mailto:test-example-updated@letsencrypt.org"]`), - }, - { - ID: 3, - Contact: []byte(`["mailto:test-test-test@letsencrypt.org"]`), - }, - { - ID: 4, - Contact: []byte(`["mailto:example-example-example@letsencrypt.org"]`), - }, - { - ID: 5, - Contact: []byte(`["mailto:youve.got.mail@letsencrypt.org"]`), - }, - { - ID: 6, - Contact: []byte(`["mailto:mail@letsencrypt.org"]`), - }, - { - ID: 7, - Contact: []byte(`["mailto:***********"]`), - }, - { - ID: 200, - Contact: []byte(`["mailto:gotta.lotta.accounts@letsencrypt.org"]`), - }, - { - ID: 201, - Contact: []byte(`["mailto:gotta.lotta.accounts@letsencrypt.org"]`), - }, - { - ID: 202, - Contact: []byte(`["mailto:gotta.lotta.accounts@letsencrypt.org"]`), - }, - { - ID: 203, - Contact: []byte(`["mailto:gotta.lotta.accounts@letsencrypt.org"]`), - }, - { - ID: 204, - Contact: []byte(`["mailto:gotta.lotta.accounts@letsencrypt.org"]`), - }, - } - - // Play the type cast game so that we can dig into the arguments map and get - // out an int64 `id` parameter. - argsRaw := args[0] - argsMap, ok := argsRaw.(map[string]interface{}) - if !ok { - return fmt.Errorf("incorrect args type %T", args) - } - idRaw := argsMap["id"] - id, ok := idRaw.(int64) - if !ok { - return fmt.Errorf("incorrect args ID type %T", id) - } - - // Play the type cast game to get a `*contactQueryResult` so we can write - // the result from the db list. - outputPtr, ok := output.(*contactQueryResult) - if !ok { - return fmt.Errorf("incorrect output type %T", output) - } - - for _, v := range dbList { - if v.ID == id { - *outputPtr = v - } - } - if outputPtr.ID == 0 { - return db.ErrDatabaseOp{ - Op: "select one", - Table: "registrations", - Err: sql.ErrNoRows, - } - } - return nil -} - -func TestResolveEmails(t *testing.T) { - // Start with three reg. IDs. Note: the IDs have been matched with fake - // results in the `db` slice in `mockEmailResolver`'s `SelectOne`. If you add - // more test cases here you must also add the corresponding DB result in the - // mock. - recipients := []recipient{ - { - id: 1, - }, - { - id: 2, - }, - { - id: 3, - }, - // This registration ID deliberately doesn't exist in the mock data to make - // sure this case is handled gracefully - { - id: 999, - }, - // This registration ID deliberately returns an invalid email to make sure any - // invalid contact info that slipped into the DB once upon a time will be ignored - { - id: 7, - }, - { - id: 200, - }, - { - id: 201, - }, - { - id: 202, - }, - { - id: 203, - }, - { - id: 204, - }, - } - - tmpl := template.Must(template.New("letter").Parse("an email body")) - - dbMap := mockEmailResolver{} - mc := &mocks.Mailer{} - m := &mailer{ - log: blog.UseMock(), - mailer: mc, - dbMap: dbMap, - subject: "Test", - recipients: recipients, - emailTemplate: tmpl, - targetRange: interval{end: "\xFF"}, - sleepInterval: 0, - clk: newFakeClock(t), - } - - addressesToRecipients, err := m.resolveAddresses() - test.AssertNotError(t, err, "failed to resolveEmailAddresses") - - expected := []string{ - "example@letsencrypt.org", - "test-example-updated@letsencrypt.org", - "test-test-test@letsencrypt.org", - "gotta.lotta.accounts@letsencrypt.org", - } - - test.AssertEquals(t, len(addressesToRecipients), len(expected)) - for _, address := range expected { - if _, ok := addressesToRecipients[address]; !ok { - t.Errorf("missing entry in addressesToRecipients: %q", address) - } - } -} - -func newFakeClock(t *testing.T) clock.FakeClock { - const fakeTimeFormat = "2006-01-02T15:04:05.999999999Z" - ft, err := time.Parse(fakeTimeFormat, fakeTimeFormat) - if err != nil { - t.Fatal(err) - } - fc := clock.NewFake() - fc.Set(ft.UTC()) - return fc -} diff --git a/cmd/notify-mailer/testdata/test_msg_body.txt b/cmd/notify-mailer/testdata/test_msg_body.txt deleted file mode 100644 index 16417d92c7c..00000000000 --- a/cmd/notify-mailer/testdata/test_msg_body.txt +++ /dev/null @@ -1,3 +0,0 @@ -This is a test message body regarding these domains: -{{ range . }} {{ .Extra.domainName }} -{{ end }} diff --git a/cmd/notify-mailer/testdata/test_msg_recipients.csv b/cmd/notify-mailer/testdata/test_msg_recipients.csv deleted file mode 100644 index ce3b9f86aeb..00000000000 --- a/cmd/notify-mailer/testdata/test_msg_recipients.csv +++ /dev/null @@ -1,4 +0,0 @@ -id,domainName -1,one.example.com -2,two.example.net -3,three.example.org diff --git a/cmd/ocsp-responder/main.go b/cmd/ocsp-responder/main.go deleted file mode 100644 index 078c930441b..00000000000 --- a/cmd/ocsp-responder/main.go +++ /dev/null @@ -1,206 +0,0 @@ -package notmain - -import ( - "context" - "flag" - "fmt" - "net/http" - "net/url" - "os" - "strings" - "time" - - "github.com/honeycombio/beeline-go" - "github.com/honeycombio/beeline-go/wrappers/hnynethttp" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/features" - "github.com/letsencrypt/boulder/issuance" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics/measured_http" - "github.com/letsencrypt/boulder/ocsp/responder" - rocsp_config "github.com/letsencrypt/boulder/rocsp/config" - "github.com/letsencrypt/boulder/sa" -) - -type Config struct { - OCSPResponder struct { - cmd.ServiceConfig - DB cmd.DBConfig - - // Source indicates the source of pre-signed OCSP responses to be used. It - // can be a DBConnect string or a file URL. The file URL style is used - // when responding from a static file for intermediates and roots. - // If DBConfig has non-empty fields, it takes precedence over this. - Source string - - // The list of issuer certificates, against which OCSP requests/responses - // are checked to ensure we're not responding for anyone else's certs. - IssuerCerts []string - - Path string - ListenAddress string - // MaxAge is the max-age to set in the Cache-Control response - // header. It is a time.Duration formatted string. - MaxAge cmd.ConfigDuration - - // When to timeout a request. This should be slightly lower than the - // upstream's timeout when making request to ocsp-responder. - Timeout cmd.ConfigDuration - - ShutdownStopTimeout cmd.ConfigDuration - - RequiredSerialPrefixes []string - - Features map[string]bool - - Redis rocsp_config.RedisConfig - } - - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig -} - -func main() { - configFile := flag.String("config", "", "File path to the configuration file for this service") - flag.Parse() - if *configFile == "" { - fmt.Fprintf(os.Stderr, `Usage of %s: -Config JSON should contain either a DBConnectFile or a Source value containing a file: URL. -If Source is a file: URL, the file should contain a list of OCSP responses in base64-encoded DER, -as generated by Boulder's ceremony command. -`, os.Args[0]) - flag.PrintDefaults() - os.Exit(1) - } - - var c Config - err := cmd.ReadConfigFile(*configFile, &c) - cmd.FailOnError(err, "Reading JSON config file into config structure") - err = features.Set(c.OCSPResponder.Features) - cmd.FailOnError(err, "Failed to set feature flags") - - clk := cmd.Clock() - - bc, err := c.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() - - stats, logger := cmd.StatsAndLogging(c.Syslog, c.OCSPResponder.DebugAddr) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) - - config := c.OCSPResponder - var source responder.Source - - if strings.HasPrefix(config.Source, "file:") { - url, err := url.Parse(config.Source) - cmd.FailOnError(err, "Source was not a URL") - filename := url.Path - // Go interprets cwd-relative file urls (file:test/foo.txt) as having the - // relative part of the path in the 'Opaque' field. - if filename == "" { - filename = url.Opaque - } - source, err = responder.NewMemorySourceFromFile(filename, logger) - cmd.FailOnError(err, fmt.Sprintf("Couldn't read file: %s", url.Path)) - } else { - // Set DB.DBConnect as a fallback if DB.DBConnectFile isn't present. - config.DB.DBConnect = config.Source - - dbMap, err := sa.InitWrappedDb(config.DB, stats, logger) - cmd.FailOnError(err, "While initializing dbMap") - - source, err = responder.NewDbSource(dbMap, stats, logger) - cmd.FailOnError(err, "Could not create database source") - - // Set up the redis source and the combined multiplex source if there is a - // config for it. Otherwise just pass through the existing mysql source. - if c.OCSPResponder.Redis.Addrs != nil { - rocspReader, err := rocsp_config.MakeReadClient(&c.OCSPResponder.Redis, clk, stats) - cmd.FailOnError(err, "Could not make redis client") - - rocspSource, err := responder.NewRedisSource(rocspReader, stats, logger) - cmd.FailOnError(err, "Could not create redis source") - - source, err = responder.NewMultiSource(source, rocspSource, stats, logger) - cmd.FailOnError(err, "Could not create multiplex source") - } - - // Load the certificate from the file path. - issuerCerts := make([]*issuance.Certificate, len(c.OCSPResponder.IssuerCerts)) - for i, issuerFile := range c.OCSPResponder.IssuerCerts { - issuerCert, err := issuance.LoadCertificate(issuerFile) - cmd.FailOnError(err, "Could not load issuer cert") - issuerCerts[i] = issuerCert - } - - source, err = responder.NewFilterSource( - issuerCerts, - c.OCSPResponder.RequiredSerialPrefixes, - source, - stats, - logger, - ) - cmd.FailOnError(err, "Could not create filtered source") - } - - m := mux(c.OCSPResponder.Path, source, c.OCSPResponder.Timeout.Duration, stats, logger) - srv := &http.Server{ - Addr: c.OCSPResponder.ListenAddress, - Handler: m, - } - - done := make(chan bool) - go cmd.CatchSignals(logger, func() { - ctx, cancel := context.WithTimeout(context.Background(), - c.OCSPResponder.ShutdownStopTimeout.Duration) - defer cancel() - _ = srv.Shutdown(ctx) - done <- true - }) - - err = srv.ListenAndServe() - if err != nil && err != http.ErrServerClosed { - cmd.FailOnError(err, "Running HTTP server") - } - - // https://godoc.org/net/http#Server.Shutdown: - // When Shutdown is called, Serve, ListenAndServe, and ListenAndServeTLS - // immediately return ErrServerClosed. Make sure the program doesn't exit and - // waits instead for Shutdown to return. - <-done -} - -// ocspMux partially implements the interface defined for http.ServeMux but doesn't implement -// the path cleaning its Handler method does. Notably http.ServeMux will collapse repeated -// slashes into a single slash which breaks the base64 encoding that is used in OCSP GET -// requests. ocsp.Responder explicitly recommends against using http.ServeMux -// for this reason. -type ocspMux struct { - handler http.Handler -} - -func (om *ocspMux) Handler(_ *http.Request) (http.Handler, string) { - return om.handler, "/" -} - -func mux(responderPath string, source responder.Source, timeout time.Duration, stats prometheus.Registerer, logger blog.Logger) http.Handler { - stripPrefix := http.StripPrefix(responderPath, responder.NewResponder(source, timeout, stats, logger)) - h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "GET" && r.URL.Path == "/" { - w.Header().Set("Cache-Control", "max-age=43200") // Cache for 12 hours - w.WriteHeader(200) - return - } - stripPrefix.ServeHTTP(w, r) - }) - return hnynethttp.WrapHandler(measured_http.New(&ocspMux{h}, cmd.Clock(), stats)) -} - -func init() { - cmd.RegisterCommand("ocsp-responder", main) -} diff --git a/cmd/ocsp-responder/main_test.go b/cmd/ocsp-responder/main_test.go deleted file mode 100644 index 68cf3f4b134..00000000000 --- a/cmd/ocsp-responder/main_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package notmain - -import ( - "bytes" - "encoding/base64" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - "time" - - "golang.org/x/crypto/ocsp" - - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/ocsp/responder" - "github.com/letsencrypt/boulder/test" -) - -func TestMux(t *testing.T) { - reqBytes, err := ioutil.ReadFile("./testdata/ocsp.req") - test.AssertNotError(t, err, "failed to read OCSP request") - req, err := ocsp.ParseRequest(reqBytes) - test.AssertNotError(t, err, "failed to parse OCSP request") - - doubleSlashBytes, err := base64.StdEncoding.DecodeString("MFMwUTBPME0wSzAJBgUrDgMCGgUABBR+5mrncpqz/PiiIGRsFqEtYHEIXQQUqEpqYwR93brm0Tm3pkVl7/Oo7KECEgO/AC2R1FW8hePAj4xp//8Jhw==") - test.AssertNotError(t, err, "failed to decode double slash OCSP request") - doubleSlashReq, err := ocsp.ParseRequest(doubleSlashBytes) - test.AssertNotError(t, err, "failed to parse double slash OCSP request") - - respBytes, err := ioutil.ReadFile("./testdata/ocsp.resp") - test.AssertNotError(t, err, "failed to read OCSP response") - resp, err := ocsp.ParseResponse(respBytes, nil) - test.AssertNotError(t, err, "failed to parse OCSP response") - - responses := map[string]*responder.Response{ - req.SerialNumber.String(): {Response: resp, Raw: respBytes}, - doubleSlashReq.SerialNumber.String(): {Response: resp, Raw: respBytes}, - } - src, err := responder.NewMemorySource(responses, blog.NewMock()) - test.AssertNotError(t, err, "failed to create inMemorySource") - - h := mux("/foobar/", src, time.Second, metrics.NoopRegisterer, blog.NewMock()) - - type muxTest struct { - method string - path string - reqBody []byte - respBody []byte - expectedType string - } - mts := []muxTest{ - {"POST", "/foobar/", reqBytes, respBytes, "Success"}, - {"GET", "/", nil, nil, ""}, - {"GET", "/foobar/MFMwUTBPME0wSzAJBgUrDgMCGgUABBR+5mrncpqz/PiiIGRsFqEtYHEIXQQUqEpqYwR93brm0Tm3pkVl7/Oo7KECEgO/AC2R1FW8hePAj4xp//8Jhw==", nil, respBytes, "Success"}, - } - for i, mt := range mts { - w := httptest.NewRecorder() - r, err := http.NewRequest(mt.method, mt.path, bytes.NewReader(mt.reqBody)) - if err != nil { - t.Fatalf("#%d, NewRequest: %s", i, err) - } - h.ServeHTTP(w, r) - if w.Code != http.StatusOK { - t.Errorf("Code: want %d, got %d", http.StatusOK, w.Code) - } - if !bytes.Equal(w.Body.Bytes(), mt.respBody) { - t.Errorf("Mismatched body: want %#v, got %#v", mt.respBody, w.Body.Bytes()) - } - } -} diff --git a/cmd/ocsp-responder/testdata/ocsp.req b/cmd/ocsp-responder/testdata/ocsp.req deleted file mode 100644 index 5878715020d..00000000000 Binary files a/cmd/ocsp-responder/testdata/ocsp.req and /dev/null differ diff --git a/cmd/ocsp-responder/testdata/ocsp.resp b/cmd/ocsp-responder/testdata/ocsp.resp deleted file mode 100644 index a35f0bb9fb8..00000000000 Binary files a/cmd/ocsp-responder/testdata/ocsp.resp and /dev/null differ diff --git a/cmd/ocsp-responder/testdata/test-ca.der.pem b/cmd/ocsp-responder/testdata/test-ca.der.pem deleted file mode 100644 index 760417fe943..00000000000 --- a/cmd/ocsp-responder/testdata/test-ca.der.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDETCCAfmgAwIBAgIJAJzxkS6o1QkIMA0GCSqGSIb3DQEBCwUAMB8xHTAbBgNV -BAMMFGhhcHB5IGhhY2tlciBmYWtlIENBMB4XDTE1MDQwNzIzNTAzOFoXDTI1MDQw -NDIzNTAzOFowHzEdMBsGA1UEAwwUaGFwcHkgaGFja2VyIGZha2UgQ0EwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDCCkd5mgXFErJ3F2M0E9dw+Ta/md5i -8TDId01HberAApqmydG7UZYF3zLTSzNjlNSOmtybvrSGUnZ9r9tSQcL8VM6WUOM8 -tnIpiIjEA2QkBycMwvRmZ/B2ltPdYs/R9BqNwO1g18GDZrHSzUYtNKNeFI6Glamj -7GK2Vr0SmiEamlNIR5ktAFsEErzf/d4jCF7sosMsJpMCm1p58QkP4LHLShVLXDa8 -BMfVoI+ipYcA08iNUFkgW8VWDclIDxcysa0psDDtMjX3+4aPkE/cefmP+1xOfUuD -HOGV8XFynsP4EpTfVOZr0/g9gYQ7ZArqXX7GTQkFqduwPm/w5qxSPTarAgMBAAGj -UDBOMB0GA1UdDgQWBBT7eE8S+WAVgyyfF380GbMuNupBiTAfBgNVHSMEGDAWgBT7 -eE8S+WAVgyyfF380GbMuNupBiTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUA -A4IBAQAd9Da+Zv+TjMv7NTAmliqnWHY6d3UxEZN3hFEJ58IQVHbBZVZdW7zhRktB -vR05Kweac0HJeK91TKmzvXl21IXLvh0gcNLU/uweD3no/snfdB4OoFompljThmgl -zBqiqWoKBJQrLCA8w5UB+ReomRYd/EYXF/6TAfzm6hr//Xt5mPiUHPdvYt75lMAo -vRxLSbF8TSQ6b7BYxISWjPgFASNNqJNHEItWsmQMtAjjwzb9cs01XH9pChVAWn9L -oeMKa+SlHSYrWG93+EcrIH/dGU76uNOiaDzBSKvaehG53h25MHuO1anNICJvZovW -rFo4Uv1EnkKJm3vJFe50eJGhEKlx ------END CERTIFICATE----- diff --git a/cmd/ocsp-responder/testdata/test-ca.key b/cmd/ocsp-responder/testdata/test-ca.key deleted file mode 100644 index e3b5697be66..00000000000 --- a/cmd/ocsp-responder/testdata/test-ca.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDCCkd5mgXFErJ3 -F2M0E9dw+Ta/md5i8TDId01HberAApqmydG7UZYF3zLTSzNjlNSOmtybvrSGUnZ9 -r9tSQcL8VM6WUOM8tnIpiIjEA2QkBycMwvRmZ/B2ltPdYs/R9BqNwO1g18GDZrHS -zUYtNKNeFI6Glamj7GK2Vr0SmiEamlNIR5ktAFsEErzf/d4jCF7sosMsJpMCm1p5 -8QkP4LHLShVLXDa8BMfVoI+ipYcA08iNUFkgW8VWDclIDxcysa0psDDtMjX3+4aP -kE/cefmP+1xOfUuDHOGV8XFynsP4EpTfVOZr0/g9gYQ7ZArqXX7GTQkFqduwPm/w -5qxSPTarAgMBAAECggEAZh00uhjFOo35X1TufwSGF0z/c9uMvfMB4i1ufM2qgXud -WXLSLcrksZhhTfLAS4KSTa3PtSKqLBoPg1tdhy9WZqZWxaIxw8ybzaGtn8HNHGyr -LzsVlSLT2ATN4C7VAT9+DeVext0kWHtdz3r5mGagJq2Yx9jRGpQW6rBA9h4ol699 -BM09UPCcdlGmpdrb0jDjyfohG139EBSmEeB+Jim+oLO1sXe/LvWllU0UL527CExp -ykiIjASd4s7tFErV9sVJ+bDI97GOyBUGcVMiQ+TRPKFr0kfLgbJz24l8ycPI4odp -IGY+6igicg67n5BktAH+UfCQlUIpWbF2SwRAMht0AQKBgQD8gocy2VuCPj285hBY -8g/1GFd58HkCh54bOhAOb2PK+NE4mRuHCBlBj/tQOmgYz2Pna2k5ldJSUwXsUKkx -9R7hutnwXbcQTSQIRcjhYDLeGetJYXR96ylDig+6XjdW3A5SIc2JzlbVThP39TTm -gRqE/rj9G4ARMfHxffp7YT5AqwKBgQDEuN0pYMKjaW0xvc7WYUOqGHqt2di/BwMr -Ur438MtePArELY35P6kDcrfnlacDToA3Tebk9Rw18y1kl3BFO7VdJbQJSa6RWbp5 -aK7E5lq1pCrdyhGwiaI1f5VgzeY8ywS3TqGqU9GOqpENiZqgs1ly9l8gZSaw8/yF -uDWGg7jiAQKBgQCyLtGEmkiuoYkjUR1cBoQoKeMgkwZxOI3jHJfT99ptkiLhU3lP -UfGwiA+JT43BZCdVWEBKeGSP3zIgzdJ3BEekdhvwN9FEWYsBo2zbTOzYOWYExBZV -/KmDlVr/4hge3O3mGyBVDBvOLWh94rRPq+6wxqZ3RP6cI6hdBs7IXZh2PQKBgQDB -rav4kA4xKpvaDCC2yj3/Gmi1/zO5J2NEZQtoMgdXeM+0w5Dy4204Otq7A4jR5Ziw -Wl9H7dZfe1Kmpb5gO1/dHEC7oDJhYjEIVTs0GgMWsFGP2OE/qNHtz/W2wCC8m7jB -7IWYFzvLNTzoUiDNtKYNXGjdkRjdwOlOkcUI8Wi2AQKBgQC9EJsMz/ySt58IvwWy -fQJyg742j21pXHqlMnmHygnSgNa7f3yPQK3FxjvhIPmgu7x8+sSUtXHOjKhZML3p -SdTm/yN487hOYp03jy/wVXLcCDp9XhBeIt/z/TZMPMjAHOLG9xG6cF8AOVq7mLBc -tsDWUHoXPZj/YciXZLq3fPuXyw== ------END PRIVATE KEY----- diff --git a/cmd/ocsp-updater/main.go b/cmd/ocsp-updater/main.go deleted file mode 100644 index c57df30a151..00000000000 --- a/cmd/ocsp-updater/main.go +++ /dev/null @@ -1,147 +0,0 @@ -package notmain - -import ( - "database/sql" - "flag" - "os" - "strings" - "time" - - "github.com/honeycombio/beeline-go" - - capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/features" - bgrpc "github.com/letsencrypt/boulder/grpc" - ocsp_updater "github.com/letsencrypt/boulder/ocsp/updater" - "github.com/letsencrypt/boulder/rocsp" - rocsp_config "github.com/letsencrypt/boulder/rocsp/config" - "github.com/letsencrypt/boulder/sa" -) - -type Config struct { - OCSPUpdater struct { - cmd.ServiceConfig - DB cmd.DBConfig - ReadOnlyDB cmd.DBConfig - Redis *rocsp_config.RedisConfig - - // Issuers is a map from filenames to short issuer IDs. - // Each filename must contain an issuer certificate. The short issuer - // IDs are arbitrarily assigned and must be consistent across OCSP - // components. For production we'll use the number part of the CN, i.e. - // E1 -> 1, R3 -> 3, etc. - Issuers map[string]int - - OldOCSPWindow cmd.ConfigDuration - OldOCSPBatchSize int - - OCSPMinTimeToExpiry cmd.ConfigDuration - ParallelGenerateOCSPRequests int - - // TODO(#5933): Replace this with a unifed RetryBackoffConfig - SignFailureBackoffFactor float64 - SignFailureBackoffMax cmd.ConfigDuration - - SerialSuffixShards string - - OCSPGeneratorService *cmd.GRPCClientConfig - - Features map[string]bool - } - - Syslog cmd.SyslogConfig - Beeline cmd.BeelineConfig -} - -func main() { - configFile := flag.String("config", "", "File path to the configuration file for this service") - flag.Parse() - if *configFile == "" { - flag.Usage() - os.Exit(1) - } - - var c Config - err := cmd.ReadConfigFile(*configFile, &c) - cmd.FailOnError(err, "Reading JSON config file into config structure") - - conf := c.OCSPUpdater - err = features.Set(conf.Features) - cmd.FailOnError(err, "Failed to set feature flags") - - bc, err := c.Beeline.Load() - cmd.FailOnError(err, "Failed to load Beeline config") - beeline.Init(bc) - defer beeline.Close() - - stats, logger := cmd.StatsAndLogging(c.Syslog, conf.DebugAddr) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) - - db, err := sa.InitSqlDb(conf.DB, stats) - cmd.FailOnError(err, "Failed to initialize database client") - - var readOnlyDb *sql.DB - readOnlyDbDSN, _ := conf.ReadOnlyDB.URL() - if readOnlyDbDSN == "" { - readOnlyDb = db - } else { - readOnlyDb, err = sa.InitSqlDb(conf.ReadOnlyDB, stats) - cmd.FailOnError(err, "Failed to initialize read-only database client") - } - - clk := cmd.Clock() - - redisConf := c.OCSPUpdater.Redis - var rocspClient *rocsp.WritingClient - var redisTimeout time.Duration - if redisConf != nil { - rocspClient, err = rocsp_config.MakeClient(redisConf, clk, stats) - redisTimeout = redisConf.Timeout.Duration - cmd.FailOnError(err, "making Redis client") - } - issuers, err := rocsp_config.LoadIssuers(c.OCSPUpdater.Issuers) - cmd.FailOnError(err, "loading issuers") - - tlsConfig, err := c.OCSPUpdater.TLS.Load() - cmd.FailOnError(err, "TLS config") - clientMetrics := bgrpc.NewClientMetrics(stats) - caConn, err := bgrpc.ClientSetup(c.OCSPUpdater.OCSPGeneratorService, tlsConfig, clientMetrics, clk) - cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to CA") - ogc := capb.NewOCSPGeneratorClient(caConn) - - var serialSuffixes []string - if c.OCSPUpdater.SerialSuffixShards != "" { - serialSuffixes = strings.Fields(c.OCSPUpdater.SerialSuffixShards) - } - - updater, err := ocsp_updater.New( - stats, - clk, - db, - readOnlyDb, - rocspClient, - issuers, - serialSuffixes, - ogc, - conf.OldOCSPBatchSize, - conf.OldOCSPWindow.Duration, - conf.SignFailureBackoffMax.Duration, - conf.SignFailureBackoffFactor, - conf.OCSPMinTimeToExpiry.Duration, - conf.ParallelGenerateOCSPRequests, - redisTimeout, - logger, - ) - cmd.FailOnError(err, "Failed to create updater") - - go cmd.CatchSignals(logger, nil) - for { - updater.Tick() - } -} - -func init() { - cmd.RegisterCommand("ocsp-updater", main) -} diff --git a/cmd/orphan-finder/main.go b/cmd/orphan-finder/main.go deleted file mode 100644 index e98e9083f95..00000000000 --- a/cmd/orphan-finder/main.go +++ /dev/null @@ -1,446 +0,0 @@ -package notmain - -import ( - "context" - "crypto/x509" - "encoding/asn1" - "encoding/hex" - "encoding/json" - "errors" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "strconv" - "strings" - "time" - - capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/core" - berrors "github.com/letsencrypt/boulder/errors" - "github.com/letsencrypt/boulder/features" - bgrpc "github.com/letsencrypt/boulder/grpc" - "github.com/letsencrypt/boulder/issuance" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - sapb "github.com/letsencrypt/boulder/sa/proto" - "google.golang.org/grpc" -) - -var usageString = ` -name: - orphan-finder - Reads orphaned certificates from a boulder-ca log or a der file and adds them to the database - -usage: - orphan-finder parse-ca-log --config --log-file - orphan-finder parse-der --config --der-file --regID - -command descriptions: - parse-ca-log Parses boulder-ca logs to add multiple orphaned certificates - parse-der Parses a single orphaned DER certificate file and adds it to the database -` - -type Config struct { - TLS cmd.TLSConfig - SAService *cmd.GRPCClientConfig - OCSPGeneratorService *cmd.GRPCClientConfig - Syslog cmd.SyslogConfig - // Backdate specifies how to adjust a certificate's NotBefore date to get back - // to the original issued date. It should match the value used in - // `test/config/ca.json` for the CA "backdate" value. - Backdate cmd.ConfigDuration - // IssuerCerts is a list of paths to all intermediate certificates which may - // have been used to issue certificates in the last 90 days. These are used - // to form OCSP generation requests. - IssuerCerts []string - Features map[string]bool -} - -type ocspGenerator interface { - GenerateOCSP(context.Context, *capb.GenerateOCSPRequest, ...grpc.CallOption) (*capb.OCSPResponse, error) -} - -// orphanType is a numeric identifier for the type of orphan being processed. -type orphanType int - -const ( - // unknownOrphan indicates an orphan of an unknown type - unknownOrphan orphanType = iota - // certOrphan indicates an orphaned final certificate type - certOrphan - // precertOrphan indicates an orphaned precertificate type - precertOrphan -) - -// String returns a human representation of the orphanType and the expected -// label in the orphaning message for that type, or "unknown" if it isn't -// a known orphan type. -func (t orphanType) String() string { - switch t { - case certOrphan: - return "certificate" - case precertOrphan: - return "precertificate" - default: - return "unknown" - } -} - -// An orphaned cert log line must contain at least the following tokens: -// "orphaning", "(pre)?certificate", "cert=[\w+]", "issuerID=[\d+]", and "regID=[\d]". -// For example: -// `[AUDIT] Failed RPC to store at SA, orphaning precertificate: serial=[04asdf1234], cert=[MIIdeafbeef], issuerID=[112358], regID=[1001], orderID=[1002], err=[Timed out]` -// The orphan-finder does not care about the serial, error, or orderID. -type parsedLine struct { - certDER []byte - issuerID int64 - regID int64 -} - -var ( - derOrphan = regexp.MustCompile(`cert=\[([0-9a-f]+)\]`) - regOrphan = regexp.MustCompile(`regID=\[(\d+)\]`) - issuerOrphan = regexp.MustCompile(`issuerID=\[(\d+)\]`) - errAlreadyExists = fmt.Errorf("Certificate already exists in DB") -) - -// orphanTypeForCert returns precertOrphan if the certificate has the RFC 6962 -// CT poison extension, or certOrphan if it does not. If the certificate is nil -// unknownOrphan is returned. -func orphanTypeForCert(cert *x509.Certificate) orphanType { - if cert == nil { - return unknownOrphan - } - // RFC 6962 Section 3.1 - https://tools.ietf.org/html/rfc6962#section-3.1 - poisonExt := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3} - for _, ext := range cert.Extensions { - if ext.Id.Equal(poisonExt) { - return precertOrphan - } - } - return certOrphan -} - -// checkDER parses the provided DER bytes and uses the resulting certificate's -// serial to check if there is an existing precertificate or certificate for the -// provided DER. If there is a matching precert/cert serial then -// errAlreadyExists and the orphanType are returned. If there is no matching -// precert/cert serial then the parsed certificate and orphanType are returned. -func checkDER(sai sapb.StorageAuthorityCertificateClient, der []byte) (*x509.Certificate, orphanType, error) { - ctx := context.Background() - orphan, err := x509.ParseCertificate(der) - if err != nil { - return nil, unknownOrphan, fmt.Errorf("Failed to parse orphan DER: %s", err) - } - orphanSerial := core.SerialToString(orphan.SerialNumber) - orphanTyp := orphanTypeForCert(orphan) - - switch orphanTyp { - case certOrphan: - _, err = sai.GetCertificate(ctx, &sapb.Serial{Serial: orphanSerial}) - case precertOrphan: - _, err = sai.GetPrecertificate(ctx, &sapb.Serial{Serial: orphanSerial}) - default: - err = errors.New("unknown orphan type") - } - if err == nil { - return nil, orphanTyp, errAlreadyExists - } - if errors.Is(err, berrors.NotFound) { - return orphan, orphanTyp, nil - } - return nil, orphanTyp, fmt.Errorf("Existing %s lookup failed: %s", orphanTyp, err) -} - -func parseLogLine(line string, logger blog.Logger) (parsedLine, error) { - derStr := derOrphan.FindStringSubmatch(line) - if len(derStr) <= 1 { - return parsedLine{}, fmt.Errorf("unable to find cert der: %s", line) - } - der, err := hex.DecodeString(derStr[1]) - if err != nil { - return parsedLine{}, fmt.Errorf("unable to decode hex der from [%s]: %s", line, err) - } - - regStr := regOrphan.FindStringSubmatch(line) - if len(regStr) <= 1 { - return parsedLine{}, fmt.Errorf("unable to find regID: %s", line) - } - regID, err := strconv.ParseInt(regStr[1], 10, 64) - if err != nil { - return parsedLine{}, fmt.Errorf("unable to parse regID from [%s]: %s", line, err) - } - - issuerStr := issuerOrphan.FindStringSubmatch(line) - if len(issuerStr) <= 1 { - return parsedLine{}, fmt.Errorf("unable to find issuerID: %s", line) - } - issuerID, err := strconv.ParseInt(issuerStr[1], 10, 64) - if err != nil { - return parsedLine{}, fmt.Errorf("unable to parse issuerID from [%s]: %s", line, err) - } - - return parsedLine{ - certDER: der, - regID: regID, - issuerID: issuerID, - }, nil -} - -type orphanFinder struct { - sa sapb.StorageAuthorityCertificateClient - ca ocspGenerator - logger blog.Logger - issuers map[issuance.IssuerNameID]*issuance.Certificate - backdate time.Duration -} - -func newOrphanFinder(configFile string) *orphanFinder { - configJSON, err := ioutil.ReadFile(configFile) - cmd.FailOnError(err, "Failed to read config file") - var conf Config - err = json.Unmarshal(configJSON, &conf) - cmd.FailOnError(err, "Failed to parse config file") - err = features.Set(conf.Features) - cmd.FailOnError(err, "Failed to set feature flags") - logger := cmd.NewLogger(conf.Syslog) - - tlsConfig, err := conf.TLS.Load() - cmd.FailOnError(err, "TLS config") - - clientMetrics := bgrpc.NewClientMetrics(metrics.NoopRegisterer) - saConn, err := bgrpc.ClientSetup(conf.SAService, tlsConfig, clientMetrics, cmd.Clock()) - cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA") - sac := sapb.NewStorageAuthorityClient(saConn) - - caConn, err := bgrpc.ClientSetup(conf.OCSPGeneratorService, tlsConfig, clientMetrics, cmd.Clock()) - cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to CA") - cac := capb.NewOCSPGeneratorClient(caConn) - - issuers := make(map[issuance.IssuerNameID]*issuance.Certificate) - for _, issuerCertPath := range conf.IssuerCerts { - c, err := issuance.LoadCertificate(issuerCertPath) - cmd.FailOnError(err, "Failed to load issuer certificate") - issuers[c.NameID()] = c - } - - return &orphanFinder{ - sa: sac, - ca: cac, - logger: logger, - issuers: issuers, - backdate: conf.Backdate.Duration, - } -} - -// parseCALog reads a log file, and attempts to parse and store any orphans from -// each line of it. It outputs stats about how many cert and precert orphans it -// found, and how many it successfully stored. -func (opf *orphanFinder) parseCALog(logPath string) { - ctx := context.Background() - logData, err := ioutil.ReadFile(logPath) - cmd.FailOnError(err, "Failed to read log file") - - var certOrphansFound, certOrphansAdded, precertOrphansFound, precertOrphansAdded int64 - for _, line := range strings.Split(string(logData), "\n") { - if line == "" { - continue - } - found, added, typ := opf.storeLogLine(ctx, line) - var foundStat, addStat *int64 - switch typ { - case certOrphan: - foundStat = &certOrphansFound - addStat = &certOrphansAdded - case precertOrphan: - foundStat = &precertOrphansFound - addStat = &precertOrphansAdded - default: - opf.logger.Errf("Found orphan type %s", typ) - continue - } - if found { - *foundStat++ - if added { - *addStat++ - } - } - } - opf.logger.Infof("Found %d certificate orphans and added %d to the database", certOrphansFound, certOrphansAdded) - opf.logger.Infof("Found %d precertificate orphans and added %d to the database", precertOrphansFound, precertOrphansAdded) -} - -// storeLogLine attempts to parse one log line according to the format used when -// orphaning certificates and precertificates. It returns two booleans and the -// orphanType: The first boolean is true if the line was a match, and the second -// is true if the orphan was successfully added to the DB. As part of adding an -// orphan to the DB, it requests a fresh OCSP response from the CA to store -// alongside the precertificate/certificate. -func (opf *orphanFinder) storeLogLine(ctx context.Context, line string) (found bool, added bool, typ orphanType) { - // At a minimum, the log line should contain the word "orphaning" and the token - // "cert=". If it doesn't have those, short-circuit. - if (!strings.Contains(line, fmt.Sprintf("orphaning %s", certOrphan)) && - !strings.Contains(line, fmt.Sprintf("orphaning %s", precertOrphan))) || - !strings.Contains(line, "cert=") { - return false, false, unknownOrphan - } - - parsed, err := parseLogLine(line, opf.logger) - if err != nil { - opf.logger.AuditErr(fmt.Sprintf("Couldn't parse log line: %s", err)) - return true, false, unknownOrphan - } - - // Parse the DER, determine the orphan type, and ensure it doesn't already - // exist in the DB - cert, typ, err := checkDER(opf.sa, parsed.certDER) - if err != nil { - logFunc := opf.logger.Errf - if err == errAlreadyExists { - logFunc = opf.logger.Infof - } - logFunc("%s, [%s]", err, line) - return true, false, typ - } - - // generate an OCSP response - response, err := opf.generateOCSP(ctx, cert) - if err != nil { - opf.logger.AuditErrf("Couldn't generate OCSP: %s, [%s]", err, line) - return true, false, typ - } - - // We use `cert.NotBefore` as the issued date to avoid the SA tagging this - // certificate with an issued date of the current time when we know it was an - // orphan issued in the past. Because certificates are backdated we need to - // add the backdate duration to find the true issued time. - issuedDate := cert.NotBefore.Add(opf.backdate) - switch typ { - case certOrphan: - _, err = opf.sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsed.certDER, - RegID: parsed.regID, - Ocsp: response, - Issued: issuedDate.UnixNano(), - }) - case precertOrphan: - _, err = opf.sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsed.certDER, - RegID: parsed.regID, - Ocsp: response, - Issued: issuedDate.UnixNano(), - IssuerID: parsed.issuerID, - }) - default: - // Shouldn't happen but be defensive anyway - err = errors.New("unknown orphan type") - } - if err != nil { - opf.logger.AuditErrf("Failed to store certificate: %s, [%s]", err, line) - return true, false, typ - } - return true, true, typ -} - -// parseDER loads and attempts to store a single orphan from a single DER file. -func (opf *orphanFinder) parseDER(derPath string, regID int64) { - ctx := context.Background() - der, err := ioutil.ReadFile(derPath) - cmd.FailOnError(err, "Failed to read DER file") - cert, typ, err := checkDER(opf.sa, der) - cmd.FailOnError(err, "Pre-AddCertificate checks failed") - // Because certificates are backdated we need to add the backdate duration - // to find the true issued time. - issuedDate := cert.NotBefore.Add(1 * opf.backdate) - response, err := opf.generateOCSP(ctx, cert) - cmd.FailOnError(err, "Generating OCSP") - - switch typ { - case certOrphan: - _, err = opf.sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: der, - RegID: regID, - Ocsp: response, - Issued: issuedDate.UnixNano(), - }) - case precertOrphan: - _, err = opf.sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: der, - RegID: regID, - Ocsp: response, - Issued: issuedDate.UnixNano(), - }) - default: - err = errors.New("unknown orphan type") - } - cmd.FailOnError(err, "Failed to add certificate to database") -} - -// generateOCSP asks the CA to generate a new OCSP response for the given cert. -func (opf *orphanFinder) generateOCSP(ctx context.Context, cert *x509.Certificate) ([]byte, error) { - issuerID := issuance.GetIssuerNameID(cert) - _, ok := opf.issuers[issuerID] - if !ok { - return nil, errors.New("unrecognized issuer for orphan") - } - ocspResponse, err := opf.ca.GenerateOCSP(ctx, &capb.GenerateOCSPRequest{ - Serial: core.SerialToString(cert.SerialNumber), - IssuerID: int64(issuerID), - Status: string(core.OCSPStatusGood), - Reason: 0, - RevokedAt: 0, - }) - if err != nil { - return nil, err - } - return ocspResponse.Response, nil -} - -func main() { - if len(os.Args) <= 2 { - fmt.Fprint(os.Stderr, usageString) - os.Exit(1) - } - - command := os.Args[1] - flagSet := flag.NewFlagSet(command, flag.ContinueOnError) - configFile := flagSet.String("config", "", "File path to the configuration file for this service") - logPath := flagSet.String("log-file", "", "Path to boulder-ca log file to parse") - derPath := flagSet.String("der-file", "", "Path to DER certificate file") - regID := flagSet.Int64("regID", 0, "Registration ID of user who requested the certificate") - err := flagSet.Parse(os.Args[2:]) - cmd.FailOnError(err, "Error parsing flagset") - - usage := func() { - fmt.Fprintf(os.Stderr, "%s\nargs:", usageString) - flagSet.PrintDefaults() - os.Exit(1) - } - - if *configFile == "" { - usage() - } - - opf := newOrphanFinder(*configFile) - - switch command { - case "parse-ca-log": - if *logPath == "" { - usage() - } - opf.parseCALog(*logPath) - case "parse-der": - if *derPath == "" || *regID == 0 { - usage() - } - opf.parseDER(*derPath, *regID) - default: - usage() - } -} - -func init() { - cmd.RegisterCommand("orphan-finder", main) -} diff --git a/cmd/orphan-finder/main_test.go b/cmd/orphan-finder/main_test.go deleted file mode 100644 index 8324b4a1637..00000000000 --- a/cmd/orphan-finder/main_test.go +++ /dev/null @@ -1,308 +0,0 @@ -package notmain - -import ( - "context" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/hex" - "fmt" - "math/big" - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" - - "github.com/jmhodges/clock" - capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/core" - corepb "github.com/letsencrypt/boulder/core/proto" - berrors "github.com/letsencrypt/boulder/errors" - bgrpc "github.com/letsencrypt/boulder/grpc" - "github.com/letsencrypt/boulder/issuance" - blog "github.com/letsencrypt/boulder/log" - sapb "github.com/letsencrypt/boulder/sa/proto" - "github.com/letsencrypt/boulder/test" -) - -type mockSA struct { - certificates []*corepb.Certificate - precertificates []core.Certificate - clk clock.FakeClock -} - -func (m *mockSA) AddCertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*sapb.AddCertificateResponse, error) { - parsed, err := x509.ParseCertificate(req.Der) - if err != nil { - return nil, err - } - cert := &corepb.Certificate{ - Der: req.Der, - RegistrationID: req.RegID, - Serial: core.SerialToString(parsed.SerialNumber), - Issued: req.Issued, - } - m.certificates = append(m.certificates, cert) - return nil, nil -} - -func (m *mockSA) GetCertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { - if len(m.certificates) == 0 { - return nil, berrors.NotFoundError("no certs stored") - } - for _, cert := range m.certificates { - if cert.Serial == req.Serial { - return cert, nil - } - } - return nil, berrors.NotFoundError("no cert stored for requested serial") -} - -func (m *mockSA) AddPrecertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - if core.IsAnyNilOrZero(req.Der, req.Issued, req.RegID, req.IssuerID) { - return nil, berrors.InternalServerError("Incomplete request") - } - parsed, err := x509.ParseCertificate(req.Der) - if err != nil { - return nil, err - } - precert := core.Certificate{ - DER: req.Der, - RegistrationID: req.RegID, - Serial: core.SerialToString(parsed.SerialNumber), - } - if req.Issued == 0 { - precert.Issued = m.clk.Now() - } else { - precert.Issued = time.Unix(0, req.Issued) - } - m.precertificates = append(m.precertificates, precert) - return &emptypb.Empty{}, nil -} - -func (m *mockSA) GetPrecertificate(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { - if len(m.precertificates) == 0 { - return nil, berrors.NotFoundError("no precerts stored") - } - for _, precert := range m.precertificates { - if precert.Serial == req.Serial { - return bgrpc.CertToPB(precert), nil - } - } - return nil, berrors.NotFoundError("no precert stored for requested serial") -} - -func (m *mockSA) AddSerial(ctx context.Context, req *sapb.AddSerialRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return &emptypb.Empty{}, nil -} - -type mockCA struct{} - -func (ca *mockCA) GenerateOCSP(context.Context, *capb.GenerateOCSPRequest, ...grpc.CallOption) (*capb.OCSPResponse, error) { - return &capb.OCSPResponse{ - Response: []byte("HI"), - }, nil -} - -func TestParseLine(t *testing.T) { - issuer, err := issuance.LoadCertificate("../../test/hierarchy/int-e1.cert.pem") - test.AssertNotError(t, err, "failed to load test issuer") - signer, err := test.LoadSigner("../../test/hierarchy/int-e1.key.pem") - test.AssertNotError(t, err, "failed to load test signer") - cert, err := core.LoadCert("../../test/hierarchy/ee-e1.cert.pem") - test.AssertNotError(t, err, "failed to load test cert") - certStr := hex.EncodeToString(cert.Raw) - precertTmpl := x509.Certificate{ - SerialNumber: big.NewInt(0), - NotBefore: time.Now(), - ExtraExtensions: []pkix.Extension{ - {Id: asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}, Critical: true, Value: []byte{0x05, 0x00}}, - }, - } - precertDER, err := x509.CreateCertificate(rand.Reader, &precertTmpl, issuer.Certificate, signer.Public(), signer) - test.AssertNotError(t, err, "failed to generate test precert") - precertStr := hex.EncodeToString(precertDER) - - opf := &orphanFinder{ - sa: &mockSA{}, - ca: &mockCA{}, - logger: blog.UseMock(), - issuers: map[issuance.IssuerNameID]*issuance.Certificate{issuer.NameID(): issuer}, - backdate: time.Hour, - } - - logLine := func(typ orphanType, der, issuerID, regID, orderID string) string { - return fmt.Sprintf( - "0000-00-00T00:00:00+00:00 hostname boulder-ca[pid]: "+ - "[AUDIT] Failed RPC to store at SA, orphaning %s: "+ - "serial=[unused], cert=[%s], issuerID=[%s], regID=[%s], orderID=[%s], err=[context deadline exceeded]", - typ, der, issuerID, regID, orderID) - } - - testCases := []struct { - Name string - LogLine string - ExpectFound bool - ExpectAdded bool - ExpectNoErrors bool - ExpectAddedDER string - ExpectRegID int - }{ - { - Name: "Empty line", - LogLine: "", - ExpectFound: false, - ExpectAdded: false, - ExpectNoErrors: false, - }, - { - Name: "Empty cert in line", - LogLine: logLine(certOrphan, "", "1", "1337", "0"), - ExpectFound: true, - ExpectAdded: false, - ExpectNoErrors: false, - }, - { - Name: "Invalid cert in line", - LogLine: logLine(certOrphan, "deadbeef", "", "", ""), - ExpectFound: true, - ExpectAdded: false, - ExpectNoErrors: false, - }, - { - Name: "Valid cert in line", - LogLine: logLine(certOrphan, certStr, "1", "1001", "0"), - ExpectFound: true, - ExpectAdded: true, - ExpectAddedDER: certStr, - ExpectRegID: 1001, - ExpectNoErrors: true, - }, - { - Name: "Already inserted cert in line", - LogLine: logLine(certOrphan, certStr, "1", "1001", "0"), - ExpectFound: true, - // ExpectAdded is false because we have already added this cert in the - // previous "Valid cert in line" test case. - ExpectAdded: false, - ExpectNoErrors: true, - }, - { - Name: "Empty precert in line", - LogLine: logLine(precertOrphan, "", "1", "1337", "0"), - ExpectFound: true, - ExpectAdded: false, - ExpectNoErrors: false, - }, - { - Name: "Invalid precert in line", - LogLine: logLine(precertOrphan, "deadbeef", "", "", ""), - ExpectFound: true, - ExpectAdded: false, - ExpectNoErrors: false, - }, - { - Name: "Valid precert in line", - LogLine: logLine(precertOrphan, precertStr, "1", "9999", "0"), - ExpectFound: true, - ExpectAdded: true, - ExpectAddedDER: precertStr, - ExpectRegID: 9999, - ExpectNoErrors: true, - }, - { - Name: "Already inserted precert in line", - LogLine: logLine(precertOrphan, precertStr, "1", "1001", "0"), - ExpectFound: true, - // ExpectAdded is false because we have already added this cert in the - // previous "Valid cert in line" test case. - ExpectAdded: false, - ExpectNoErrors: true, - }, - { - Name: "Unknown orphan type", - LogLine: logLine(unknownOrphan, precertStr, "1", "1001", "0"), - ExpectFound: false, - ExpectAdded: false, - ExpectNoErrors: false, - }, - { - Name: "Empty issuerID in line", - LogLine: logLine(precertOrphan, precertStr, "", "1001", "0"), - ExpectFound: true, - ExpectAdded: false, - ExpectNoErrors: false, - }, - { - Name: "Zero issuerID in line", - LogLine: logLine(precertOrphan, precertStr, "0", "1001", "0"), - ExpectFound: true, - ExpectAdded: false, - ExpectNoErrors: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - ctx := context.Background() - opf.logger.(*blog.Mock).Clear() - found, added, typ := opf.storeLogLine(ctx, tc.LogLine) - test.AssertEquals(t, found, tc.ExpectFound) - test.AssertEquals(t, added, tc.ExpectAdded) - logs := opf.logger.(*blog.Mock).GetAllMatching("ERR:") - if tc.ExpectNoErrors { - test.AssertEquals(t, len(logs), 0) - } - - if tc.ExpectAdded { - // Decode the precert/cert DER we expect the testcase added to get the - // certificate serial - der, _ := hex.DecodeString(tc.ExpectAddedDER) - testCert, _ := x509.ParseCertificate(der) - testCertSerial := core.SerialToString(testCert.SerialNumber) - - // Fetch the precert/cert using the correct mock SA function - var storedCert *corepb.Certificate - switch typ { - case precertOrphan: - storedCert, err = opf.sa.GetPrecertificate(ctx, &sapb.Serial{Serial: testCertSerial}) - test.AssertNotError(t, err, "Error getting test precert serial from SA") - case certOrphan: - storedCert, err = opf.sa.GetCertificate(ctx, &sapb.Serial{Serial: testCertSerial}) - test.AssertNotError(t, err, "Error getting test cert serial from SA") - default: - t.Fatalf("unknown orphan type returned: %s", typ) - } - // The orphan should have been added with the correct registration ID from the log line - test.AssertEquals(t, storedCert.RegistrationID, int64(tc.ExpectRegID)) - // The Issued timestamp should be the certificate's NotBefore timestamp offset by the backdate - expectedIssued := testCert.NotBefore.Add(opf.backdate).UnixNano() - test.AssertEquals(t, storedCert.Issued, expectedIssued) - } - }) - } -} - -func TestNotOrphan(t *testing.T) { - ctx := context.Background() - opf := &orphanFinder{ - sa: &mockSA{}, - ca: &mockCA{}, - logger: blog.UseMock(), - backdate: time.Hour, - } - - found, added, typ := opf.storeLogLine(ctx, "cert=fakeout") - test.AssertEquals(t, found, false) - test.AssertEquals(t, added, false) - test.AssertEquals(t, typ, unknownOrphan) - logs := opf.logger.(*blog.Mock).GetAllMatching("ERR:") - if len(logs) != 0 { - t.Error("Found error logs:") - for _, ll := range logs { - t.Error(ll) - } - } -} diff --git a/cmd/registry.go b/cmd/registry.go index 329b1d49959..1f6d4860e11 100644 --- a/cmd/registry.go +++ b/cmd/registry.go @@ -2,17 +2,29 @@ package cmd import ( "fmt" + "reflect" "sort" "sync" + + "github.com/letsencrypt/validator/v10" ) +type ConfigValidator struct { + Config any + Validators map[string]validator.Func +} + var registry struct { sync.Mutex commands map[string]func() + configs map[string]*ConfigValidator } -// Register a boulder subcommand to be run when the binary name matches `name`. -func RegisterCommand(name string, f func()) { +// RegisterCommand registers a subcommand and its corresponding config +// validator. The provided func() is called when the subcommand is invoked on +// the command line. The ConfigValidator is optional and used to validate the +// config file for the subcommand. +func RegisterCommand(name string, f func(), cv *ConfigValidator) { registry.Lock() defer registry.Unlock() @@ -24,6 +36,19 @@ func RegisterCommand(name string, f func()) { panic(fmt.Sprintf("command %q was registered twice", name)) } registry.commands[name] = f + + if cv == nil { + return + } + + if registry.configs == nil { + registry.configs = make(map[string]*ConfigValidator) + } + + if registry.configs[name] != nil { + panic(fmt.Sprintf("config validator for command %q was registered twice", name)) + } + registry.configs[name] = cv } func LookupCommand(name string) func() { @@ -42,3 +67,38 @@ func AvailableCommands() []string { sort.Strings(avail) return avail } + +// LookupConfigValidator constructs an instance of the *ConfigValidator for the +// given Boulder component name. If no *ConfigValidator was registered, nil is +// returned. +func LookupConfigValidator(name string) *ConfigValidator { + registry.Lock() + defer registry.Unlock() + if registry.configs[name] == nil { + return nil + } + + // Create a new copy of the config struct so that we can validate it + // multiple times without mutating the registry's copy. + copy := reflect.New(reflect.ValueOf( + registry.configs[name].Config).Elem().Type(), + ).Interface() + + return &ConfigValidator{ + Config: copy, + Validators: registry.configs[name].Validators, + } +} + +// AvailableConfigValidators returns a list of Boulder component names for which +// a *ConfigValidator has been registered. +func AvailableConfigValidators() []string { + registry.Lock() + defer registry.Unlock() + var avail []string + for name := range registry.configs { + avail = append(avail, name) + } + sort.Strings(avail) + return avail +} diff --git a/cmd/remoteva/main.go b/cmd/remoteva/main.go new file mode 100644 index 00000000000..7309146427e --- /dev/null +++ b/cmd/remoteva/main.go @@ -0,0 +1,145 @@ +package notmain + +import ( + "context" + "crypto/tls" + "flag" + "os" + "time" + + "github.com/jmhodges/clock" + + "github.com/letsencrypt/boulder/bdns" + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/features" + bgrpc "github.com/letsencrypt/boulder/grpc" + "github.com/letsencrypt/boulder/iana" + "github.com/letsencrypt/boulder/va" + vaConfig "github.com/letsencrypt/boulder/va/config" + vapb "github.com/letsencrypt/boulder/va/proto" +) + +type Config struct { + RVA struct { + vaConfig.Common + + // Perspective uniquely identifies the Network Perspective used to + // perform the validation, as specified in BRs Section 5.4.1, + // Requirement 2.7 ("Multi-Perspective Issuance Corroboration attempts + // from each Network Perspective"). It should uniquely identify a group + // of RVAs deployed in the same datacenter. + Perspective string `omitempty:"required"` + + // RIR indicates the Regional Internet Registry where this RVA is + // located. This field is used to identify the RIR region from which a + // given validation was performed, as specified in the "Phased + // Implementation Timeline" in BRs Section 3.2.2.9. It must be one of + // the following values: + // - ARIN + // - RIPE + // - APNIC + // - LACNIC + // - AFRINIC + RIR string `validate:"required,oneof=ARIN RIPE APNIC LACNIC AFRINIC"` + + // SkipGRPCClientCertVerification, when disabled as it should typically + // be, will cause the remoteva server (which receives gRPCs from a + // boulder-va client) to use our default RequireAndVerifyClientCert + // policy. When enabled, the remoteva server will instead use the less + // secure VerifyClientCertIfGiven policy. It should typically be used in + // conjunction with the boulder-va "RVATLSClient" configuration object. + // + // An operator may choose to enable this if the remoteva server is + // logically behind an OSI layer-7 loadbalancer/reverse proxy which + // decrypts traffic and does not/cannot re-encrypt it's own client + // connection to the remoteva server. + // + // Use with caution. + // + // For more information, see: https://pkg.go.dev/crypto/tls#ClientAuthType + SkipGRPCClientCertVerification bool + + Features features.Config + } + + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig +} + +func main() { + grpcAddr := flag.String("addr", "", "gRPC listen address override") + debugAddr := flag.String("debug-addr", "", "Debug server address override") + configFile := flag.String("config", "", "File path to the configuration file for this service") + flag.Parse() + if *configFile == "" { + flag.Usage() + os.Exit(1) + } + + var c Config + err := cmd.ReadConfigFile(*configFile, &c) + cmd.FailOnError(err, "Reading JSON config file into config structure") + err = c.RVA.SetDefaultsAndValidate(grpcAddr, debugAddr) + cmd.FailOnError(err, "Setting and validating default config values") + features.Set(c.RVA.Features) + + scope, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.RVA.DebugAddr) + defer oTelShutdown(context.Background()) + cmd.LogStartup(logger) + clk := clock.New() + + var servers bdns.ServerProvider + + if len(c.RVA.DNSStaticResolvers) != 0 { + servers, err = bdns.NewStaticProvider(c.RVA.DNSStaticResolvers) + cmd.FailOnError(err, "Couldn't start static DNS server resolver") + } else { + servers, err = bdns.StartDynamicProvider(c.RVA.DNSProvider, 60*time.Second, "tcp") + cmd.FailOnError(err, "Couldn't start dynamic DNS server resolver") + } + defer servers.Stop() + + tlsConfig, err := c.RVA.TLS.Load(scope) + cmd.FailOnError(err, "tlsConfig config") + + if c.RVA.SkipGRPCClientCertVerification { + tlsConfig.ClientAuth = tls.VerifyClientCertIfGiven + } + + resolver := bdns.New( + c.RVA.DNSTimeout.Duration, + servers, + scope, + clk, + c.RVA.DNSTries, + c.RVA.UserAgent, + logger, + tlsConfig) + + vai, err := va.NewValidationAuthorityImpl( + resolver, + nil, // Our RVAs will never have RVAs of their own. + c.RVA.UserAgent, + c.RVA.IssuerDomain, + scope, + clk, + logger, + c.RVA.AccountURIPrefixes, + c.RVA.Perspective, + c.RVA.RIR, + iana.IsReservedAddr, + 0, + c.RVA.DNSAllowLoopbackAddresses, + ) + cmd.FailOnError(err, "Unable to create Remote-VA server") + + start, err := bgrpc.NewServer(c.RVA.GRPC, logger).Add( + &vapb.VA_ServiceDesc, vai).Add( + &vapb.CAA_ServiceDesc, vai).Build(tlsConfig, scope, clk) + cmd.FailOnError(err, "Unable to setup Remote-VA gRPC server") + cmd.FailOnError(start(), "Remote-VA gRPC service failed") +} + +func init() { + cmd.RegisterCommand("remoteva", main, &cmd.ConfigValidator{Config: &Config{}}) +} diff --git a/cmd/reversed-hostname-checker/main.go b/cmd/reversed-hostname-checker/main.go index 52985e8c5b3..e3869ffe288 100644 --- a/cmd/reversed-hostname-checker/main.go +++ b/cmd/reversed-hostname-checker/main.go @@ -1,5 +1,5 @@ -// Read a list of reversed hostnames, separated by newlines. Print only those -// that are rejected by the current policy. +// Read a list of reversed FQDNs and/or normal IP addresses, separated by +// newlines. Print only those that are rejected by the current policy. package notmain @@ -9,6 +9,7 @@ import ( "fmt" "io" "log" + "net/netip" "os" "github.com/letsencrypt/boulder/cmd" @@ -18,12 +19,12 @@ import ( ) func init() { - cmd.RegisterCommand("reversed-hostname-checker", main) + cmd.RegisterCommand("reversed-hostname-checker", main, nil) } func main() { inputFilename := flag.String("input", "", "File containing a list of reversed hostnames to check, newline separated. Defaults to stdin") - policyFile := flag.String("policy", "test/hostname-policy.yaml", "File containing a hostname policy in yaml.") + policyFile := flag.String("policy", "test/ident-policy.yaml", "File containing an identifier policy in YAML.") flag.Parse() var input io.Reader @@ -38,18 +39,27 @@ func main() { } scanner := bufio.NewScanner(input) - pa, err := policy.New(nil) + logger := cmd.NewLogger(cmd.SyslogConfig{StdoutLevel: 7}) + cmd.LogStartup(logger) + pa, err := policy.New(nil, nil, logger) if err != nil { log.Fatal(err) } - err = pa.SetHostnamePolicyFile(*policyFile) + err = pa.LoadIdentPolicyFile(*policyFile) if err != nil { log.Fatalf("reading %s: %s", *policyFile, err) } var errors bool for scanner.Scan() { - n := sa.ReverseName(scanner.Text()) - err := pa.WillingToIssueWildcards([]identifier.ACMEIdentifier{identifier.DNSIdentifier(n)}) + n := sa.EncodeIssuedName(scanner.Text()) + var ident identifier.ACMEIdentifier + ip, err := netip.ParseAddr(n) + if err == nil { + ident = identifier.NewIP(ip) + } else { + ident = identifier.NewDNS(n) + } + err = pa.WillingToIssue(identifier.ACMEIdentifiers{ident}) if err != nil { errors = true fmt.Printf("%s: %s\n", n, err) diff --git a/cmd/rocsp-tool/client.go b/cmd/rocsp-tool/client.go deleted file mode 100644 index ac0c1f41265..00000000000 --- a/cmd/rocsp-tool/client.go +++ /dev/null @@ -1,318 +0,0 @@ -package notmain - -import ( - "context" - "database/sql" - "fmt" - "io/ioutil" - "math/rand" - "strings" - "sync/atomic" - "time" - - "github.com/jmhodges/clock" - capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/core" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/rocsp" - rocsp_config "github.com/letsencrypt/boulder/rocsp/config" - "github.com/letsencrypt/boulder/sa" - "github.com/letsencrypt/boulder/test/ocsp/helper" - "golang.org/x/crypto/ocsp" -) - -type client struct { - issuers []rocsp_config.ShortIDIssuer - redis *rocsp.WritingClient - db *sql.DB // optional - ocspGenerator capb.OCSPGeneratorClient - clk clock.Clock - scanBatchSize int - logger blog.Logger -} - -// processResult represents the result of attempting to sign and store status -// for a single certificateStatus ID. If `err` is non-nil, it indicates the -// attempt failed. -type processResult struct { - id uint64 - err error -} - -func getStartingID(ctx context.Context, clk clock.Clock, db *sql.DB) (int64, error) { - // To scan the DB efficiently, we want to select only currently-valid certificates. There's a - // handy expires index, but for selecting a large set of rows, using the primary key will be - // more efficient. So first we find a good id to start with, then scan from there. Note: since - // AUTO_INCREMENT can skip around a bit, we add padding to ensure we get all currently-valid - // certificates. - startTime := clk.Now().Add(-24 * time.Hour) - var minID *int64 - err := db.QueryRowContext( - ctx, - "SELECT MIN(id) FROM certificateStatus WHERE notAfter >= ?", - startTime, - ).Scan(&minID) - if err != nil { - return 0, fmt.Errorf("selecting minID: %w", err) - } - if minID == nil { - return 0, fmt.Errorf("no entries in certificateStatus (where notAfter >= %s)", startTime) - } - return *minID, nil -} - -func (cl *client) loadFromDB(ctx context.Context, speed ProcessingSpeed, startFromID int64) error { - prevID := startFromID - var err error - if prevID == 0 { - prevID, err = getStartingID(ctx, cl.clk, cl.db) - if err != nil { - return fmt.Errorf("getting starting ID: %w", err) - } - } - - // Find the current maximum id in certificateStatus. We do this because the table is always - // growing. If we scanned until we saw a batch with no rows, we would scan forever. - var maxID *int64 - err = cl.db.QueryRowContext( - ctx, - "SELECT MAX(id) FROM certificateStatus", - ).Scan(&maxID) - if err != nil { - return fmt.Errorf("selecting maxID: %w", err) - } - if maxID == nil { - return fmt.Errorf("no entries in certificateStatus") - } - - // Limit the rate of reading rows. - frequency := time.Duration(float64(time.Second) / float64(time.Duration(speed.RowsPerSecond))) - // a set of all inflight certificate statuses, indexed by their `ID`. - inflightIDs := newInflight() - statusesToSign := cl.scanFromDB(ctx, prevID, *maxID, frequency, inflightIDs) - - results := make(chan processResult, speed.ParallelSigns) - var runningSigners int32 - for i := 0; i < speed.ParallelSigns; i++ { - atomic.AddInt32(&runningSigners, 1) - go cl.signAndStoreResponses(ctx, statusesToSign, results, &runningSigners) - } - - var successCount, errorCount int64 - - for result := range results { - inflightIDs.remove(result.id) - if result.err != nil { - errorCount++ - if errorCount < 10 || - (errorCount < 1000 && rand.Intn(1000) < 100) || - (errorCount < 100000 && rand.Intn(1000) < 10) || - (rand.Intn(1000) < 1) { - cl.logger.Errf("error: %s", result.err) - } - } else { - successCount++ - } - - total := successCount + errorCount - if total < 10 || - (total < 1000 && rand.Intn(1000) < 100) || - (total < 100000 && rand.Intn(1000) < 10) || - (rand.Intn(1000) < 1) { - cl.logger.Infof("stored %d responses, %d errors", successCount, errorCount) - } - } - - cl.logger.Infof("done. processed %d successes and %d errors\n", successCount, errorCount) - if inflightIDs.len() != 0 { - return fmt.Errorf("inflightIDs non-empty! has %d items, lowest %d", inflightIDs.len(), inflightIDs.min()) - } - - return nil -} - -// scanFromDB scans certificateStatus rows from the DB, starting with `minID`, and writes them to -// its output channel at a maximum frequency of `frequency`. When it's read all available rows, it -// closes its output channel and exits. -// If there is an error, it logs the error, closes its output channel, and exits. -func (cl *client) scanFromDB(ctx context.Context, prevID int64, maxID int64, frequency time.Duration, inflightIDs *inflight) <-chan *sa.CertStatusMetadata { - statusesToSign := make(chan *sa.CertStatusMetadata) - go func() { - defer close(statusesToSign) - - var err error - currentMin := prevID - for currentMin < maxID { - currentMin, err = cl.scanFromDBOneBatch(ctx, currentMin, frequency, statusesToSign, inflightIDs) - if err != nil { - cl.logger.Infof("error scanning rows: %s", err) - } - } - }() - return statusesToSign -} - -// scanFromDBOneBatch scans up to `cl.scanBatchSize` rows from certificateStatus, in order, and -// writes them to `output`. When done, it returns the highest `id` it saw during the scan. -// We do this in batches because if we tried to scan the whole table in a single query, MariaDB -// would terminate the query after a certain amount of data transferred. -func (cl *client) scanFromDBOneBatch(ctx context.Context, prevID int64, frequency time.Duration, output chan<- *sa.CertStatusMetadata, inflightIDs *inflight) (int64, error) { - rowTicker := time.NewTicker(frequency) - - query := fmt.Sprintf("SELECT %s FROM certificateStatus WHERE id > ? ORDER BY id LIMIT ?", - strings.Join(sa.CertStatusMetadataFields(), ", ")) - rows, err := cl.db.QueryContext(ctx, query, prevID, cl.scanBatchSize) - if err != nil { - return -1, fmt.Errorf("scanning certificateStatus: %w", err) - } - defer func() { - rerr := rows.Close() - if rerr != nil { - cl.logger.Infof("closing rows: %s", rerr) - } - }() - - var scanned int - var previousID int64 - for rows.Next() { - <-rowTicker.C - - status := new(sa.CertStatusMetadata) - err := sa.ScanCertStatusMetadataRow(rows, status) - if err != nil { - return -1, fmt.Errorf("scanning row %d (previous ID %d): %w", scanned, previousID, err) - } - scanned++ - inflightIDs.add(uint64(status.ID)) - // Emit a log line every 100000 rows. For our current ~215M rows, that - // will emit about 2150 log lines. This probably strikes a good balance - // between too spammy and having a reasonably frequent checkpoint. - if scanned%100000 == 0 { - cl.logger.Infof("scanned %d certificateStatus rows. minimum inflight ID %d", scanned, inflightIDs.min()) - } - output <- status - previousID = status.ID - } - return previousID, nil -} - -// signAndStoreResponses consumes cert statuses on its input channel and writes them to its output -// channel. Before returning, it atomically decrements the provided runningSigners int. If the -// result is 0, indicating this was the last running signer, it closes its output channel. -func (cl *client) signAndStoreResponses(ctx context.Context, input <-chan *sa.CertStatusMetadata, output chan processResult, runningSigners *int32) { - defer func() { - if atomic.AddInt32(runningSigners, -1) <= 0 { - close(output) - } - }() - for status := range input { - ocspReq := &capb.GenerateOCSPRequest{ - Serial: status.Serial, - IssuerID: status.IssuerID, - Status: string(status.Status), - Reason: int32(status.RevokedReason), - RevokedAt: status.RevokedDate.UnixNano(), - } - result, err := cl.ocspGenerator.GenerateOCSP(ctx, ocspReq) - if err != nil { - output <- processResult{id: uint64(status.ID), err: err} - continue - } - // ttl is the lifetime of the certificate - ttl := cl.clk.Now().Sub(status.NotAfter) - issuer, err := rocsp_config.FindIssuerByID(status.IssuerID, cl.issuers) - if err != nil { - output <- processResult{id: uint64(status.ID), err: err} - continue - } - - err = cl.redis.StoreResponse(ctx, result.Response, issuer.ShortID(), ttl) - if err != nil { - output <- processResult{id: uint64(status.ID), err: err} - } else { - output <- processResult{id: uint64(status.ID), err: nil} - } - } -} - -type expiredError struct { - serial string - ago time.Duration -} - -func (e expiredError) Error() string { - return fmt.Sprintf("response for %s expired %s ago", e.serial, e.ago) -} - -func (cl *client) storeResponsesFromFiles(ctx context.Context, files []string) error { - for _, respFile := range files { - respBytes, err := ioutil.ReadFile(respFile) - if err != nil { - return fmt.Errorf("reading response file %q: %w", respFile, err) - } - err = cl.storeResponse(ctx, respBytes, nil) - if err != nil { - return err - } - } - return nil -} - -func (cl *client) storeResponse(ctx context.Context, respBytes []byte, ttl *time.Duration) error { - resp, err := ocsp.ParseResponse(respBytes, nil) - if err != nil { - return fmt.Errorf("parsing response: %w", err) - } - issuer, err := rocsp_config.FindIssuerByName(resp, cl.issuers) - if err != nil { - return fmt.Errorf("finding issuer for response: %w", err) - } - - // Re-parse the response, this time verifying with the appropriate issuer - resp, err = ocsp.ParseResponse(respBytes, issuer.Certificate.Certificate) - if err != nil { - return fmt.Errorf("parsing response: %w", err) - } - - serial := core.SerialToString(resp.SerialNumber) - - if resp.NextUpdate.Before(cl.clk.Now()) { - return expiredError{ - serial: serial, - ago: cl.clk.Now().Sub(resp.NextUpdate), - } - } - - // Note: Here we set the TTL to slightly more than the lifetime of the - // OCSP response. In ocsp-updater we'll want to set it to the lifetime - // of the certificate, so that the metadata field doesn't fall out of - // storage even if we are down for days. However, in this tool we don't - // have the full certificate, so this will do. - if ttl == nil { - ttl_temp := resp.NextUpdate.Sub(cl.clk.Now()) + time.Hour - ttl = &ttl_temp - } - - cl.logger.Infof("storing response for %s, generated %s, ttl %g hours", - serial, - resp.ThisUpdate, - ttl.Hours(), - ) - - err = cl.redis.StoreResponse(ctx, respBytes, issuer.ShortID(), *ttl) - if err != nil { - return fmt.Errorf("storing response: %w", err) - } - - retrievedResponse, err := cl.redis.GetResponse(ctx, serial) - if err != nil { - return fmt.Errorf("getting response: %w", err) - } - - parsedRetrievedResponse, err := ocsp.ParseResponse(retrievedResponse, issuer.Certificate.Certificate) - if err != nil { - return fmt.Errorf("parsing retrieved response: %w", err) - } - cl.logger.Infof("retrieved %s", helper.PrettyResponse(parsedRetrievedResponse)) - return nil -} diff --git a/cmd/rocsp-tool/client_test.go b/cmd/rocsp-tool/client_test.go deleted file mode 100644 index 0d0cfed1a6d..00000000000 --- a/cmd/rocsp-tool/client_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package notmain - -import ( - "context" - "fmt" - "math/big" - "testing" - "time" - - "github.com/go-redis/redis/v8" - "github.com/jmhodges/clock" - capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/core" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/rocsp" - rocsp_config "github.com/letsencrypt/boulder/rocsp/config" - "github.com/letsencrypt/boulder/sa" - "github.com/letsencrypt/boulder/test" - "github.com/letsencrypt/boulder/test/vars" - "golang.org/x/crypto/ocsp" - "google.golang.org/grpc" -) - -func makeClient() (*rocsp.WritingClient, clock.Clock) { - CACertFile := "../../test/redis-tls/minica.pem" - CertFile := "../../test/redis-tls/boulder/cert.pem" - KeyFile := "../../test/redis-tls/boulder/key.pem" - tlsConfig := cmd.TLSConfig{ - CACertFile: &CACertFile, - CertFile: &CertFile, - KeyFile: &KeyFile, - } - tlsConfig2, err := tlsConfig.Load() - if err != nil { - panic(err) - } - - rdb := redis.NewClusterClient(&redis.ClusterOptions{ - Addrs: []string{"10.33.33.2:4218"}, - Username: "unittest-rw", - Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d", - TLSConfig: tlsConfig2, - }) - clk := clock.NewFake() - return rocsp.NewWritingClient(rdb, 500*time.Millisecond, clk, metrics.NoopRegisterer), clk -} - -func TestGetStartingID(t *testing.T) { - clk := clock.NewFake() - dbMap, err := sa.NewDbMap(vars.DBConnSAFullPerms, sa.DbSettings{}) - test.AssertNotError(t, err, "failed setting up db client") - defer test.ResetSATestDatabase(t)() - sa.SetSQLDebug(dbMap, blog.Get()) - - cs := core.CertificateStatus{ - Serial: "1337", - NotAfter: clk.Now().Add(12 * time.Hour), - } - err = dbMap.Insert(&cs) - test.AssertNotError(t, err, "inserting certificate status") - firstID := cs.ID - - cs = core.CertificateStatus{ - Serial: "1338", - NotAfter: clk.Now().Add(36 * time.Hour), - } - err = dbMap.Insert(&cs) - test.AssertNotError(t, err, "inserting certificate status") - secondID := cs.ID - t.Logf("first ID %d, second ID %d", firstID, secondID) - - clk.Sleep(48 * time.Hour) - - startingID, err := getStartingID(context.Background(), clk, dbMap.Db) - test.AssertNotError(t, err, "getting starting ID") - - test.AssertEquals(t, startingID, secondID) -} - -func TestStoreResponse(t *testing.T) { - redisClient, clk := makeClient() - - issuer, err := core.LoadCert("../../test/hierarchy/int-e1.cert.pem") - test.AssertNotError(t, err, "loading int-e1") - - issuerKey, err := test.LoadSigner("../../test/hierarchy/int-e1.key.pem") - test.AssertNotError(t, err, "loading int-e1 key ") - response, err := ocsp.CreateResponse(issuer, issuer, ocsp.Response{ - SerialNumber: big.NewInt(1337), - Status: 0, - ThisUpdate: clk.Now(), - NextUpdate: clk.Now().Add(time.Hour), - }, issuerKey) - test.AssertNotError(t, err, "creating OCSP response") - - issuers, err := rocsp_config.LoadIssuers(map[string]int{ - "../../test/hierarchy/int-e1.cert.pem": 23, - }) - if err != nil { - t.Fatal(err) - } - - cl := client{ - issuers: issuers, - redis: redisClient, - db: nil, - ocspGenerator: nil, - clk: clk, - logger: blog.NewMock(), - } - - ttl := time.Hour - err = cl.storeResponse(context.Background(), response, &ttl) - test.AssertNotError(t, err, "storing response") -} - -type mockOCSPGenerator struct{} - -func (mog mockOCSPGenerator) GenerateOCSP(ctx context.Context, in *capb.GenerateOCSPRequest, opts ...grpc.CallOption) (*capb.OCSPResponse, error) { - return &capb.OCSPResponse{ - Response: []byte("phthpbt"), - }, nil - -} - -func TestLoadFromDB(t *testing.T) { - redisClient, clk := makeClient() - - dbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{}) - if err != nil { - t.Fatalf("Failed to create dbMap: %s", err) - } - - defer test.ResetSATestDatabase(t) - - for i := 0; i < 100; i++ { - err = dbMap.Insert(&core.CertificateStatus{ - Serial: fmt.Sprintf("%036x", i), - OCSPResponse: []byte("phthpbt"), - NotAfter: clk.Now().Add(200 * time.Hour), - OCSPLastUpdated: clk.Now(), - }) - if err != nil { - t.Fatalf("Failed to insert certificateStatus: %s", err) - } - } - - rocspToolClient := client{ - issuers: nil, - redis: redisClient, - db: dbMap.Db, - ocspGenerator: mockOCSPGenerator{}, - clk: clk, - scanBatchSize: 10, - logger: blog.NewMock(), - } - - speed := ProcessingSpeed{ - RowsPerSecond: 10000, - ParallelSigns: 100, - } - - err = rocspToolClient.loadFromDB(context.Background(), speed, 0) - if err != nil { - t.Fatalf("loading from DB: %s", err) - } -} diff --git a/cmd/rocsp-tool/inflight.go b/cmd/rocsp-tool/inflight.go deleted file mode 100644 index 5a0ca5ba669..00000000000 --- a/cmd/rocsp-tool/inflight.go +++ /dev/null @@ -1,53 +0,0 @@ -package notmain - -import "sync" - -type inflight struct { - sync.RWMutex - items map[uint64]struct{} -} - -func newInflight() *inflight { - return &inflight{ - items: make(map[uint64]struct{}), - } -} - -func (i *inflight) add(n uint64) { - i.Lock() - defer i.Unlock() - i.items[n] = struct{}{} -} - -func (i *inflight) remove(n uint64) { - i.Lock() - defer i.Unlock() - delete(i.items, n) -} - -func (i *inflight) len() int { - i.RLock() - defer i.RUnlock() - return len(i.items) -} - -// min returns the numerically smallest key inflight. If nothing is inflight, -// it returns 0. Note: this takes O(n) time in the number of keys and should -// be called rarely. -func (i *inflight) min() uint64 { - i.RLock() - defer i.RUnlock() - if len(i.items) == 0 { - return 0 - } - var min uint64 - for k := range i.items { - if min == 0 { - min = k - } - if k < min { - min = k - } - } - return min -} diff --git a/cmd/rocsp-tool/inflight_test.go b/cmd/rocsp-tool/inflight_test.go deleted file mode 100644 index 9ce52ee03a7..00000000000 --- a/cmd/rocsp-tool/inflight_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package notmain - -import ( - "testing" - - "github.com/letsencrypt/boulder/test" -) - -func TestInflight(t *testing.T) { - ifl := newInflight() - test.AssertEquals(t, ifl.len(), 0) - test.AssertEquals(t, ifl.min(), uint64(0)) - - ifl.add(1337) - test.AssertEquals(t, ifl.len(), 1) - test.AssertEquals(t, ifl.min(), uint64(1337)) - - ifl.remove(1337) - test.AssertEquals(t, ifl.len(), 0) - test.AssertEquals(t, ifl.min(), uint64(0)) - - ifl.add(7341) - ifl.add(3317) - ifl.add(1337) - test.AssertEquals(t, ifl.len(), 3) - test.AssertEquals(t, ifl.min(), uint64(1337)) - - ifl.remove(3317) - ifl.remove(1337) - ifl.remove(7341) - test.AssertEquals(t, ifl.len(), 0) - test.AssertEquals(t, ifl.min(), uint64(0)) -} diff --git a/cmd/rocsp-tool/main.go b/cmd/rocsp-tool/main.go deleted file mode 100644 index 4a7e090831d..00000000000 --- a/cmd/rocsp-tool/main.go +++ /dev/null @@ -1,229 +0,0 @@ -package notmain - -import ( - "context" - "database/sql" - "encoding/base64" - "flag" - "fmt" - "math/rand" - "os" - "time" - - "github.com/jmhodges/clock" - capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/cmd" - bgrpc "github.com/letsencrypt/boulder/grpc" - "github.com/letsencrypt/boulder/metrics" - rocsp_config "github.com/letsencrypt/boulder/rocsp/config" - "github.com/letsencrypt/boulder/sa" - "github.com/letsencrypt/boulder/test/ocsp/helper" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" -) - -type Config struct { - ROCSPTool struct { - cmd.ServiceConfig - Redis rocsp_config.RedisConfig - // Issuers is a map from filenames to short issuer IDs. - // Each filename must contain an issuer certificate. The short issuer - // IDs are arbitrarily assigned and must be consistent across OCSP - // components. For production we'll use the number part of the CN, i.e. - // E1 -> 1, R3 -> 3, etc. - Issuers map[string]int - - // If using load-from-db, this provides credentials to connect to the DB - // and the CA. Otherwise, it's optional. - LoadFromDB *LoadFromDBConfig - } - Syslog cmd.SyslogConfig -} - -// LoadFromDBConfig provides the credentials and configuration needed to load -// data from the certificateStatuses table in the DB and get it signed. -type LoadFromDBConfig struct { - // Credentials to connect to the DB. - DB cmd.DBConfig - // Credentials to request OCSP signatures from the CA. - GRPCTLS cmd.TLSConfig - // Timeouts and hostnames for the CA. - OCSPGeneratorService cmd.GRPCClientConfig - // How fast to process rows. - Speed ProcessingSpeed -} - -type ProcessingSpeed struct { - // If using load-from-db, this limits how many items per second we - // scan from the DB. We might go slower than this depending on how fast - // we read rows from the DB, but we won't go faster. Defaults to 2000. - RowsPerSecond int - // If using load-from-db, this controls how many parallel requests to - // boulder-ca for OCSP signing we can make. Defaults to 100. - ParallelSigns int - // If using load-from-db, the LIMIT on our scanning queries. We have to - // apply a limit because MariaDB will cut off our response at some - // threshold of total bytes transferred (1 GB by default). Defaults to 10000. - ScanBatchSize int -} - -func init() { - cmd.RegisterCommand("rocsp-tool", main) -} - -func main() { - err := main2() - if err != nil { - cmd.FailOnError(err, "") - } -} - -func main2() error { - configFile := flag.String("config", "", "File path to the configuration file for this service") - startFromID := flag.Int64("start-from-id", 0, "For load-from-db, the first ID in the certificateStatus table to scan") - flag.Parse() - if *configFile == "" { - flag.Usage() - os.Exit(1) - } - rand.Seed(time.Now().UnixNano()) - - var c Config - err := cmd.ReadConfigFile(*configFile, &c) - if err != nil { - return fmt.Errorf("reading JSON config file: %w", err) - } - - _, logger := cmd.StatsAndLogging(c.Syslog, c.ROCSPTool.DebugAddr) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) - - issuers, err := rocsp_config.LoadIssuers(c.ROCSPTool.Issuers) - if err != nil { - return fmt.Errorf("loading issuers: %w", err) - } - if len(issuers) == 0 { - return fmt.Errorf("'issuers' section of config JSON is required") - } - clk := cmd.Clock() - redisClient, err := rocsp_config.MakeClient(&c.ROCSPTool.Redis, clk, metrics.NoopRegisterer) - if err != nil { - return fmt.Errorf("making client: %w", err) - } - - var db *sql.DB - var ocspGenerator capb.OCSPGeneratorClient - var scanBatchSize int - if c.ROCSPTool.LoadFromDB != nil { - lfd := c.ROCSPTool.LoadFromDB - db, err = sa.InitSqlDb(lfd.DB, nil) - if err != nil { - return fmt.Errorf("connecting to DB: %w", err) - } - - ocspGenerator, err = configureOCSPGenerator(lfd.GRPCTLS, - lfd.OCSPGeneratorService, clk, metrics.NoopRegisterer) - if err != nil { - return fmt.Errorf("configuring gRPC to CA: %w", err) - } - setDefault(&lfd.Speed.RowsPerSecond, 2000) - setDefault(&lfd.Speed.ParallelSigns, 100) - setDefault(&lfd.Speed.ScanBatchSize, 10000) - scanBatchSize = lfd.Speed.ScanBatchSize - } - - if len(flag.Args()) < 1 { - helpExit() - } - - ctx := context.Background() - cl := client{ - issuers: issuers, - redis: redisClient, - db: db, - ocspGenerator: ocspGenerator, - clk: clk, - scanBatchSize: scanBatchSize, - logger: logger, - } - switch flag.Arg(0) { - case "get": - for _, serial := range flag.Args()[1:] { - resp, err := cl.redis.GetResponse(ctx, serial) - if err != nil { - return err - } - parsed, err := ocsp.ParseResponse(resp, nil) - if err != nil { - logger.Infof("parsing error on %x: %s", resp, err) - continue - } else { - logger.Infof("%s", helper.PrettyResponse(parsed)) - } - } - case "store": - err := cl.storeResponsesFromFiles(ctx, flag.Args()[1:]) - if err != nil { - return err - } - case "load-from-db": - if c.ROCSPTool.LoadFromDB == nil { - return fmt.Errorf("config field LoadFromDB was missing") - } - err = cl.loadFromDB(ctx, c.ROCSPTool.LoadFromDB.Speed, *startFromID) - if err != nil { - return fmt.Errorf("loading OCSP responses from DB: %w", err) - } - case "scan-metadata": - results := cl.redis.ScanMetadata(ctx, "*") - for r := range results { - if r.Err != nil { - cmd.FailOnError(err, "while scanning") - } - age := clk.Now().Sub(r.Metadata.ThisUpdate) - logger.Infof("%s: %g\n", r.Serial, age.Hours()) - } - case "scan-responses": - results := cl.redis.ScanResponses(ctx, "*") - for r := range results { - if r.Err != nil { - cmd.FailOnError(err, "while scanning") - } - logger.Infof("%s: %s\n", r.Serial, base64.StdEncoding.EncodeToString(r.Body)) - } - default: - logger.Errf("unrecognized subcommand %q\n", flag.Arg(0)) - helpExit() - } - return nil -} - -func helpExit() { - fmt.Fprintf(os.Stderr, "Usage: %s [store|copy-from-db|scan-metadata|scan-responses] --config path/to/config.json\n", os.Args[0]) - fmt.Fprintln(os.Stderr, " store -- for each filename on command line, read the file as an OCSP response and store it in Redis") - fmt.Fprintln(os.Stderr, " get -- for each serial on command line, fetch that serial's response and pretty-print it") - fmt.Fprintln(os.Stderr, " load-from-db -- scan the database for all OCSP entries for unexpired certificates, and store in Redis") - fmt.Fprintln(os.Stderr, " scan-metadata -- scan Redis for metadata entries. For each entry, print the serial and the age in hours") - fmt.Fprintln(os.Stderr, " scan-responses -- scan Redis for OCSP response entries. For each entry, print the serial and base64-encoded response") - fmt.Fprintln(os.Stderr) - flag.PrintDefaults() - os.Exit(1) -} - -func configureOCSPGenerator(tlsConf cmd.TLSConfig, grpcConf cmd.GRPCClientConfig, clk clock.Clock, stats prometheus.Registerer) (capb.OCSPGeneratorClient, error) { - tlsConfig, err := tlsConf.Load() - if err != nil { - return nil, fmt.Errorf("loading TLS config: %w", err) - } - clientMetrics := bgrpc.NewClientMetrics(stats) - caConn, err := bgrpc.ClientSetup(&grpcConf, tlsConfig, clientMetrics, clk) - cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to CA") - return capb.NewOCSPGeneratorClient(caConn), nil -} - -// setDefault sets the target to a default value, if it is zero. -func setDefault(target *int, def int) { - if *target == 0 { - *target = def - } -} diff --git a/cmd/rocsp-tool/testdata/ocsp.response b/cmd/rocsp-tool/testdata/ocsp.response deleted file mode 100644 index c52cbbc1eb4..00000000000 Binary files a/cmd/rocsp-tool/testdata/ocsp.response and /dev/null differ diff --git a/cmd/sfe/main.go b/cmd/sfe/main.go new file mode 100644 index 00000000000..cfc5e5f0a2d --- /dev/null +++ b/cmd/sfe/main.go @@ -0,0 +1,277 @@ +package notmain + +import ( + "context" + "flag" + "net/http" + "os" + "sync" + + "github.com/jmhodges/clock" + + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/features" + bgrpc "github.com/letsencrypt/boulder/grpc" + rapb "github.com/letsencrypt/boulder/ra/proto" + "github.com/letsencrypt/boulder/ratelimits" + bredis "github.com/letsencrypt/boulder/redis" + sapb "github.com/letsencrypt/boulder/sa/proto" + salesforcepb "github.com/letsencrypt/boulder/salesforce/proto" + "github.com/letsencrypt/boulder/sfe" + "github.com/letsencrypt/boulder/sfe/zendesk" + "github.com/letsencrypt/boulder/web" +) + +type Config struct { + SFE struct { + DebugAddr string `validate:"omitempty,hostname_port"` + + // ListenAddress is the address:port on which to listen for incoming + // HTTP requests. Defaults to ":80". + ListenAddress string `validate:"omitempty,hostname_port"` + + // Timeout is the per-request overall timeout. This should be slightly + // lower than the upstream's timeout when making requests to this service. + Timeout config.Duration `validate:"-"` + + // ShutdownStopTimeout determines the maximum amount of time to wait + // for extant request handlers to complete before exiting. It should be + // greater than Timeout. + ShutdownStopTimeout config.Duration + + TLS cmd.TLSConfig + + RAService *cmd.GRPCClientConfig + SAService *cmd.GRPCClientConfig + EmailExporter *cmd.GRPCClientConfig + + // UnpauseHMACKey validates incoming JWT signatures at the unpause + // endpoint. This key must be the same as the one configured for all + // WFEs. This field is required to enable the pausing feature. + UnpauseHMACKey cmd.HMACKeyConfig + + Zendesk *struct { + BaseURL string `validate:"required,url"` + TokenEmail string `validate:"required,email"` + Token cmd.PasswordConfig `validate:"required,dive"` + CustomFields struct { + Organization int64 `validate:"required"` + Tier int64 `validate:"required"` + RateLimit int64 `validate:"required"` + ReviewStatus int64 `validate:"required"` + AccountURI int64 `validate:"required"` + RegisteredDomain int64 `validate:"required"` + IPAddress int64 `validate:"required"` + } `validate:"required,dive"` + } `validate:"omitempty,dive"` + + Limiter struct { + // Redis contains the configuration necessary to connect to Redis + // for rate limiting. This field is required to enable rate + // limiting. + Redis *bredis.Config `validate:"required_with=Defaults"` + + // Defaults is a path to a YAML file containing default rate limits. + // See: ratelimits/README.md for details. This field is required to + // enable rate limiting. If any individual rate limit is not set, + // that limit will be disabled. Failed Authorizations limits passed + // in this file must be identical to those in the RA. + Defaults string `validate:"required_with=Redis"` + } + + // OverridesImporter configures the periodic import of approved rate + // limit override requests from Zendesk. + OverridesImporter struct { + // Mode controls which tickets are processed. Valid values are: + // - "all": process all tickets + // - "even": process only tickets with even IDs + // - "odd": process only tickets with odd IDs + // If unspecified or empty, defaults to "all". + Mode string `validate:"omitempty,required_with=Interval,oneof=all even odd"` + // Interval is the amount of time between runs of the importer. If + // zero or unspecified, the importer is disabled. Minimum value is + // 20 minutes. + Interval config.Duration `validate:"omitempty,required_with=Mode,min=1200s"` + } `validate:"omitempty,dive"` + + // AutoApproveOverrides enables automatic approval of override requests + // for the following limits and tiers: + // - NewOrdersPerAccount: 1000 + // - CertificatesPerDomain: 300 + // - CertificatesPerDomainPerAccount: 300 + AutoApproveOverrides bool `validate:"-"` + Features features.Config + } + + Syslog cmd.SyslogConfig + OpenTelemetry cmd.OpenTelemetryConfig + + // OpenTelemetryHTTPConfig configures tracing on incoming HTTP requests + OpenTelemetryHTTPConfig cmd.OpenTelemetryHTTPConfig +} + +func main() { + listenAddr := flag.String("addr", "", "HTTP listen address override") + debugAddr := flag.String("debug-addr", "", "Debug server address override") + configFile := flag.String("config", "", "File path to the configuration file for this service") + flag.Parse() + if *configFile == "" { + flag.Usage() + os.Exit(1) + } + + var c Config + err := cmd.ReadConfigFile(*configFile, &c) + cmd.FailOnError(err, "Reading JSON config file into config structure") + + features.Set(c.SFE.Features) + + if *listenAddr != "" { + c.SFE.ListenAddress = *listenAddr + } + if c.SFE.ListenAddress == "" { + cmd.Fail("HTTP listen address is not configured") + } + if *debugAddr != "" { + c.SFE.DebugAddr = *debugAddr + } + + stats, logger, oTelShutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.SFE.DebugAddr) + cmd.LogStartup(logger) + + clk := clock.New() + + unpauseHMACKey, err := c.SFE.UnpauseHMACKey.Load() + cmd.FailOnError(err, "Failed to load unpauseHMACKey") + + tlsConfig, err := c.SFE.TLS.Load(stats) + cmd.FailOnError(err, "TLS config") + + raConn, err := bgrpc.ClientSetup(c.SFE.RAService, tlsConfig, stats, clk) + cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA") + rac := rapb.NewRegistrationAuthorityClient(raConn) + + saConn, err := bgrpc.ClientSetup(c.SFE.SAService, tlsConfig, stats, clk) + cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA") + sac := sapb.NewStorageAuthorityReadOnlyClient(saConn) + + var eec salesforcepb.ExporterClient + if c.SFE.EmailExporter != nil { + emailExporterConn, err := bgrpc.ClientSetup(c.SFE.EmailExporter, tlsConfig, stats, clk) + cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to email-exporter") + eec = salesforcepb.NewExporterClient(emailExporterConn) + } + + var zendeskClient *zendesk.Client + var overridesImporterShutdown func() + var overridesImporterWG sync.WaitGroup + if c.SFE.Zendesk != nil { + zendeskToken, err := c.SFE.Zendesk.Token.Pass() + cmd.FailOnError(err, "Failed to load Zendesk token") + + zendeskClient, err = zendesk.NewClient( + c.SFE.Zendesk.BaseURL, + c.SFE.Zendesk.TokenEmail, + zendeskToken, + map[string]int64{ + sfe.OrganizationFieldName: c.SFE.Zendesk.CustomFields.Organization, + sfe.TierFieldName: c.SFE.Zendesk.CustomFields.Tier, + sfe.RateLimitFieldName: c.SFE.Zendesk.CustomFields.RateLimit, + sfe.ReviewStatusFieldName: c.SFE.Zendesk.CustomFields.ReviewStatus, + sfe.AccountURIFieldName: c.SFE.Zendesk.CustomFields.AccountURI, + sfe.RegisteredDomainFieldName: c.SFE.Zendesk.CustomFields.RegisteredDomain, + sfe.IPAddressFieldName: c.SFE.Zendesk.CustomFields.IPAddress, + }, + ) + if err != nil { + cmd.FailOnError(err, "Failed to create Zendesk client") + } + + if c.SFE.OverridesImporter.Interval.Duration > 0 { + mode := sfe.ProcessMode(c.SFE.OverridesImporter.Mode) + if mode == "" { + mode = sfe.ProcessAll + } + + importer, err := sfe.NewOverridesImporter( + mode, + c.SFE.OverridesImporter.Interval.Duration, + zendeskClient, + rac, + clk, + logger, + ) + cmd.FailOnError(err, "Creating overrides importer") + + var ctx context.Context + ctx, overridesImporterShutdown = context.WithCancel(context.Background()) + overridesImporterWG.Go(func() { + importer.Start(ctx) + }) + logger.Infof("Overrides importer started with mode=%s interval=%s", mode, c.SFE.OverridesImporter.Interval.Duration) + } + } + + var limiter *ratelimits.Limiter + var txnBuilder *ratelimits.TransactionBuilder + var limiterRedis *bredis.Ring + if c.SFE.Limiter.Defaults != "" { + limiterRedis, err = bredis.NewRingFromConfig(*c.SFE.Limiter.Redis, stats, logger) + cmd.FailOnError(err, "Failed to create Redis ring") + + source := ratelimits.NewRedisSource(limiterRedis.Ring, clk, stats) + limiter, err = ratelimits.NewLimiter(clk, source, stats) + cmd.FailOnError(err, "Failed to create rate limiter") + txnBuilder, err = ratelimits.NewTransactionBuilderFromFiles(c.SFE.Limiter.Defaults, "", stats, logger) + cmd.FailOnError(err, "Failed to create rate limits transaction builder") + } + + sfei, err := sfe.NewSelfServiceFrontEndImpl( + stats, + clk, + logger, + c.SFE.Timeout.Duration, + rac, + sac, + eec, + unpauseHMACKey, + zendeskClient, + limiter, + txnBuilder, + c.SFE.AutoApproveOverrides, + ) + cmd.FailOnError(err, "Unable to create SFE") + + logger.Infof("Server running, listening on %s....", c.SFE.ListenAddress) + handler := sfei.Handler(stats, c.OpenTelemetryHTTPConfig.Options()...) + + srv := web.NewServer(c.SFE.ListenAddress, handler, logger) + go func() { + err := srv.ListenAndServe() + if err != nil && err != http.ErrServerClosed { + cmd.FailOnError(err, "Running HTTP server") + } + }() + + // When main is ready to exit (because it has received a shutdown signal), + // gracefully shutdown the servers. Calling these shutdown functions causes + // ListenAndServe() and ListenAndServeTLS() to immediately return, then waits + // for any lingering connection-handling goroutines to finish their work. + defer func() { + ctx, cancel := context.WithTimeout(context.Background(), c.SFE.ShutdownStopTimeout.Duration) + defer cancel() + if overridesImporterShutdown != nil { + overridesImporterShutdown() + overridesImporterWG.Wait() + } + _ = srv.Shutdown(ctx) + oTelShutdown(ctx) + }() + + cmd.WaitForSignal() +} + +func init() { + cmd.RegisterCommand("sfe", main, &cmd.ConfigValidator{Config: &Config{}}) +} diff --git a/cmd/shell.go b/cmd/shell.go index 7937c24eb3d..2b3509f5aaa 100644 --- a/cmd/shell.go +++ b/cmd/shell.go @@ -1,31 +1,45 @@ -// This package provides utilities that underlie the specific commands. +// Package cmd provides utilities that underlie the specific commands. package cmd import ( + "context" "encoding/json" + "errors" "expvar" "fmt" + "io" "log" "log/syslog" "net/http" "net/http/pprof" "os" "os/signal" - "path" "runtime" + "runtime/debug" "strings" "syscall" "time" - "google.golang.org/grpc/grpclog" - + "github.com/go-logr/stdr" "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/redis/go-redis/v9" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.30.0" + "google.golang.org/grpc/grpclog" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/core" blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/strictyaml" + "github.com/letsencrypt/validator/v10" ) // Because we don't know when this init will be called with respect to @@ -34,7 +48,7 @@ import ( func init() { for _, v := range os.Args { if v == "--version" || v == "-version" { - fmt.Println(VersionString()) + fmt.Printf("%+v", info()) os.Exit(0) } } @@ -45,8 +59,8 @@ type mysqlLogger struct { blog.Logger } -func (m mysqlLogger) Print(v ...interface{}) { - m.AuditErrf("[mysql] %s", fmt.Sprint(v...)) +func (m mysqlLogger) Print(v ...any) { + m.Errf("[mysql] %s", fmt.Sprint(v...)) } // grpcLogger implements the grpclog.LoggerV2 interface. @@ -56,38 +70,38 @@ type grpcLogger struct { // Ensure that fatal logs exit, because we use neither the gRPC default logger // nor the stdlib default logger, both of which would call os.Exit(1) for us. -func (log grpcLogger) Fatal(args ...interface{}) { +func (log grpcLogger) Fatal(args ...any) { log.Error(args...) os.Exit(1) } -func (log grpcLogger) Fatalf(format string, args ...interface{}) { +func (log grpcLogger) Fatalf(format string, args ...any) { log.Errorf(format, args...) os.Exit(1) } -func (log grpcLogger) Fatalln(args ...interface{}) { +func (log grpcLogger) Fatalln(args ...any) { log.Errorln(args...) os.Exit(1) } -// Treat all gRPC error logs as potential audit events. -func (log grpcLogger) Error(args ...interface{}) { - log.Logger.AuditErr(fmt.Sprint(args...)) +// Pass through all Error level logs. +func (log grpcLogger) Error(args ...any) { + log.Logger.Errf("%s", fmt.Sprint(args...)) } -func (log grpcLogger) Errorf(format string, args ...interface{}) { - log.Logger.AuditErrf(format, args...) +func (log grpcLogger) Errorf(format string, args ...any) { + log.Logger.Errf(format, args...) } -func (log grpcLogger) Errorln(args ...interface{}) { - log.Logger.AuditErr(fmt.Sprintln(args...)) +func (log grpcLogger) Errorln(args ...any) { + log.Logger.Errf("%s", fmt.Sprintln(args...)) } // Pass through most Warnings, but filter out a few noisy ones. -func (log grpcLogger) Warning(args ...interface{}) { +func (log grpcLogger) Warning(args ...any) { log.Logger.Warning(fmt.Sprint(args...)) } -func (log grpcLogger) Warningf(format string, args ...interface{}) { +func (log grpcLogger) Warningf(format string, args ...any) { log.Logger.Warningf(format, args...) } -func (log grpcLogger) Warningln(args ...interface{}) { +func (log grpcLogger) Warningln(args ...any) { msg := fmt.Sprintln(args...) // See https://github.com/letsencrypt/boulder/issues/4628 if strings.Contains(msg, `ccResolverWrapper: error parsing service config: no JSON service config provided`) { @@ -103,9 +117,9 @@ func (log grpcLogger) Warningln(args ...interface{}) { // Don't log any INFO-level gRPC stuff. In practice this is all noise, like // failed TXT lookups for service discovery (we only use A records). -func (log grpcLogger) Info(args ...interface{}) {} -func (log grpcLogger) Infof(format string, args ...interface{}) {} -func (log grpcLogger) Infoln(args ...interface{}) {} +func (log grpcLogger) Info(args ...any) {} +func (log grpcLogger) Infof(format string, args ...any) {} +func (log grpcLogger) Infoln(args ...any) {} // V returns true if the verbosity level l is less than the verbosity we want to // log at. @@ -122,8 +136,16 @@ type promLogger struct { blog.Logger } -func (log promLogger) Println(args ...interface{}) { - log.AuditErr(fmt.Sprint(args...)) +func (log promLogger) Println(args ...any) { + log.Errf("%s", fmt.Sprint(args...)) +} + +type redisLogger struct { + blog.Logger +} + +func (rl redisLogger) Printf(ctx context.Context, format string, v ...any) { + rl.Infof(format, v...) } // logWriter implements the io.Writer interface. @@ -137,55 +159,120 @@ func (lw logWriter) Write(p []byte) (n int, err error) { return } -// StatsAndLogging constructs a prometheus registerer and an AuditLogger based -// on its config parameters, and return them both. It also spawns off an HTTP -// server on the provided port to report the stats and provide pprof profiling -// handlers. NewLogger and newStatsRegistry will call os.Exit on errors. -// Also sets the constructed AuditLogger as the default logger, and configures -// the mysql and grpc packages to use our logger. -// This must be called before any gRPC code is called, because gRPC's SetLogger -// doesn't use any locking. -func StatsAndLogging(logConf SyslogConfig, addr string) (prometheus.Registerer, blog.Logger) { +// logOutput implements the log.Logger interface's Output method for use with logr +type logOutput struct { + blog.Logger +} + +func (l logOutput) Output(calldepth int, logline string) error { + l.Logger.Info(logline) + return nil +} + +// StatsAndLogging sets up an AuditLogger, Prometheus Registerer, and +// OpenTelemetry tracing. It returns the Registerer and AuditLogger, along +// with a graceful shutdown function to be deferred. +// +// It spawns off an HTTP server on the provided port to report the stats and +// provide pprof profiling handlers. +// +// The constructed AuditLogger as the default logger, and configures the mysql +// and grpc packages to use our logger. This must be called before any gRPC code +// is called, because gRPC's SetLogger doesn't use any locking. +// +// This function does not return an error, and will panic on problems. +func StatsAndLogging(logConf SyslogConfig, otConf OpenTelemetryConfig, addr string) (prometheus.Registerer, blog.Logger, func(context.Context)) { logger := NewLogger(logConf) - return newStatsRegistry(addr, logger), logger + + shutdown := NewOpenTelemetry(otConf, logger) + + return newStatsRegistry(addr, logger), logger, shutdown } +// NewLogger creates a logger object with the provided settings, sets it as +// the global logger, and returns it. +// +// It also sets the logging systems for various packages we use to go through +// the created logger, and sets up a periodic log event for the current timestamp. func NewLogger(logConf SyslogConfig) blog.Logger { - tag := path.Base(os.Args[0]) - syslogger, err := syslog.Dial( - "", - "", - syslog.LOG_INFO, // default, not actually used - tag) - FailOnError(err, "Could not connect to Syslog") - syslogLevel := int(syslog.LOG_INFO) - if logConf.SyslogLevel != 0 { - syslogLevel = logConf.SyslogLevel + var logger blog.Logger + if logConf.SyslogLevel >= 0 { + syslogger, err := syslog.Dial( + "", + "", + syslog.LOG_INFO, // default, not actually used + core.Command()) + FailOnError(err, "Could not connect to Syslog") + syslogLevel := int(syslog.LOG_INFO) + if logConf.SyslogLevel != 0 { + syslogLevel = logConf.SyslogLevel + } + logger, err = blog.New(syslogger, logConf.StdoutLevel, syslogLevel) + FailOnError(err, "Could not connect to Syslog") + } else { + logger = blog.StdoutLogger(logConf.StdoutLevel) } - logger, err := blog.New(syslogger, logConf.StdoutLevel, syslogLevel) - FailOnError(err, "Could not connect to Syslog") _ = blog.Set(logger) _ = mysql.SetLogger(mysqlLogger{logger}) grpclog.SetLoggerV2(grpcLogger{logger}) log.SetOutput(logWriter{logger}) + redis.SetLogger(redisLogger{logger}) // Periodically log the current timestamp, to ensure syslog timestamps match // Boulder's conception of time. go func() { for { - time.Sleep(time.Minute) + time.Sleep(time.Hour) logger.Info(fmt.Sprintf("time=%s", time.Now().Format(time.RFC3339Nano))) } }() return logger } +func newVersionCollector() prometheus.Collector { + buildTime := core.Unspecified + if core.GetBuildTime() != core.Unspecified { + // core.BuildTime is set by our Makefile using the shell command 'date + // -u' which outputs in a consistent format across all POSIX systems. + bt, err := time.Parse(time.UnixDate, core.BuildTime) + if err != nil { + // Should never happen unless the Makefile is changed. + buildTime = "Unparsable" + } else { + buildTime = bt.Format(time.RFC3339) + } + } + return prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Name: "version", + Help: fmt.Sprintf( + "A metric with a constant value of '1' labeled by the short commit-id (buildId), build timestamp in RFC3339 format (buildTime), and Go release tag like 'go1.3' (goVersion) from which %s was built.", + core.Command(), + ), + ConstLabels: prometheus.Labels{ + "buildId": core.GetBuildID(), + "buildTime": buildTime, + "goVersion": runtime.Version(), + }, + }, + func() float64 { return 1 }, + ) +} + func newStatsRegistry(addr string, logger blog.Logger) prometheus.Registerer { registry := prometheus.NewRegistry() + + if addr == "" { + logger.Info("No debug listen address specified") + return registry + } + registry.MustRegister(collectors.NewGoCollector()) registry.MustRegister(collectors.NewProcessCollector( collectors.ProcessCollectorOpts{})) + registry.MustRegister(newVersionCollector()) + registry.MustRegister(version.NewCollector("boulder")) mux := http.NewServeMux() // Register the available pprof handlers. These are all registered on @@ -208,29 +295,113 @@ func newStatsRegistry(addr string, logger blog.Logger) prometheus.Registerer { ErrorLog: promLogger{logger}, })) + logger.Infof("Debug server listening on %s", addr) + server := http.Server{ - Addr: addr, - Handler: mux, + Addr: addr, + Handler: mux, + ReadTimeout: time.Minute, } go func() { err := server.ListenAndServe() - if err != nil { - logger.Errf("unable to boot debug server on %s: %v", addr, err) - os.Exit(1) - } + FailOnError(err, "Unable to boot debug server") }() return registry } -// Fail exits and prints an error message to stderr and the logger audit log. -func Fail(msg string) { - logger := blog.Get() - logger.AuditErr(msg) +// NewOpenTelemetry sets up our OpenTelemetry tracing +// It returns a graceful shutdown function to be deferred. +func NewOpenTelemetry(config OpenTelemetryConfig, logger blog.Logger) func(ctx context.Context) { + otel.SetLogger(stdr.New(logOutput{logger})) + otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) { logger.Errf("OpenTelemetry error: %v", err) })) + + resources := resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceName(core.Command()), + semconv.ServiceVersion(core.GetBuildID()), + semconv.ProcessPID(os.Getpid()), + ) + + opts := []trace.TracerProviderOption{ + trace.WithResource(resources), + // Use a ParentBased sampler to respect the sample decisions on incoming + // traces, and TraceIDRatioBased to randomly sample new traces. + trace.WithSampler(trace.ParentBased(trace.TraceIDRatioBased(config.SampleRatio))), + } + + if config.Endpoint != "" { + exporter, err := otlptracegrpc.New( + context.Background(), + otlptracegrpc.WithInsecure(), + otlptracegrpc.WithEndpoint(config.Endpoint)) + if err != nil { + FailOnError(err, "Could not create OpenTelemetry OTLP exporter") + } + + opts = append(opts, trace.WithBatcher(exporter)) + } + + tracerProvider := trace.NewTracerProvider(opts...) + otel.SetTracerProvider(tracerProvider) + otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) + + return func(ctx context.Context) { + err := tracerProvider.Shutdown(ctx) + if err != nil { + logger.Errf("Error while shutting down OpenTelemetry: %v", err) + } + } +} + +// AuditPanic catches and logs panics, then exits with exit code 1. +// This method should be called in a defer statement as early as possible. +func AuditPanic() { + err := recover() + // No panic, no problem + if err == nil { + blog.Get().AuditInfo("Process exiting normally", info()) + return + } + // Get the global logger if it's initialized, or create a default one if not. + // We could wind up creating a default logger if we panic so early in a process' + // lifetime that we haven't yet parsed the config and created a logger. + log := blog.Get() + // For the special type `failure`, audit log the message and exit quietly + fail, ok := err.(failure) + if ok { + log.AuditErr(fail.msg, nil, nil) + } else { + // For all other values (which might not be an error) passed to `panic`, log + // them and a stack trace + log.AuditErr("Panic", nil, map[string]any{ + "panic": fmt.Sprintf("%#v", err), + "stack": string(debug.Stack()), + }) + } + // Because this function is deferred as early as possible, there's no further defers to run after this one + // So it is safe to os.Exit to set the exit code and exit without losing any defers we haven't executed. os.Exit(1) } -// FailOnError exits and prints an error message, but only if we encountered -// a problem and err != nil. err is required but msg can be "". +// failure is a sentinel type that `Fail` passes to `panic` so `AuditPanic` can exit +// quietly and print the msg. +type failure struct { + msg string +} + +func (f failure) String() string { + return f.msg +} + +// Fail raises a panic with a special type that causes `AuditPanic` to audit log the provided message +// and then exit nonzero (without printing a stack trace). +func Fail(msg string) { + panic(failure{msg}) +} + +// FailOnError calls Fail if the provided error is non-nil. +// This is useful for one-line error handling in top-level executables, +// but should generally be avoided in libraries. The message argument is optional. func FailOnError(err error, msg string) { if err == nil { return @@ -242,57 +413,165 @@ func FailOnError(err error, msg string) { } } +func decodeJSONStrict(in io.Reader, out any) error { + decoder := json.NewDecoder(in) + decoder.DisallowUnknownFields() + + return decoder.Decode(out) +} + // ReadConfigFile takes a file path as an argument and attempts to // unmarshal the content of the file into a struct containing a // configuration of a boulder component. Any config keys in the JSON // file which do not correspond to expected keys in the config struct // will result in errors. -func ReadConfigFile(filename string, out interface{}) error { +func ReadConfigFile(filename string, out any) error { file, err := os.Open(filename) if err != nil { return err } defer file.Close() - decoder := json.NewDecoder(file) - decoder.DisallowUnknownFields() - return decoder.Decode(out) + return decodeJSONStrict(file, out) } -// VersionString produces a friendly Application version string. -func VersionString() string { - name := path.Base(os.Args[0]) - return fmt.Sprintf("Versions: %s=(%s %s) Golang=(%s) BuildHost=(%s)", name, core.GetBuildID(), core.GetBuildTime(), runtime.Version(), core.GetBuildHost()) +// ValidateJSONConfig takes a *ConfigValidator and an io.Reader containing a +// JSON representation of a config. The JSON data is unmarshaled into the +// *ConfigValidator's inner Config and then validated according to the +// 'validate' tags for on each field. Callers can use cmd.LookupConfigValidator +// to get a *ConfigValidator for a given Boulder component. This is exported for +// use in SRE CI tooling. +func ValidateJSONConfig(cv *ConfigValidator, in io.Reader) error { + if cv == nil { + return errors.New("config validator cannot be nil") + } + + // Initialize the validator and load any custom tags. + validate := validator.New() + for tag, v := range cv.Validators { + err := validate.RegisterValidation(tag, v) + if err != nil { + return err + } + } + + // Register custom types for use with existing validation tags. + validate.RegisterCustomTypeFunc(config.DurationCustomTypeFunc, config.Duration{}) + + err := decodeJSONStrict(in, cv.Config) + if err != nil { + return err + } + err = validate.Struct(cv.Config) + if err != nil { + errs, ok := err.(validator.ValidationErrors) + if !ok { + // This should never happen. + return err + } + if len(errs) > 0 { + allErrs := []string{} + for _, e := range errs { + allErrs = append(allErrs, e.Error()) + } + return errors.New(strings.Join(allErrs, ", ")) + } + } + return nil } -// CatchSignals catches SIGTERM, SIGINT, SIGHUP and executes a callback -// method before exiting -func CatchSignals(logger blog.Logger, callback func()) { - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGTERM) - signal.Notify(sigChan, syscall.SIGINT) - signal.Notify(sigChan, syscall.SIGHUP) +// ValidateYAMLConfig takes a *ConfigValidator and an io.Reader containing a +// YAML representation of a config. The YAML data is unmarshaled into the +// *ConfigValidator's inner Config and then validated according to the +// 'validate' tags for on each field. Callers can use cmd.LookupConfigValidator +// to get a *ConfigValidator for a given Boulder component. This is exported for +// use in SRE CI tooling. +func ValidateYAMLConfig(cv *ConfigValidator, in io.Reader) error { + if cv == nil { + return errors.New("config validator cannot be nil") + } - <-sigChan - if callback != nil { - callback() + // Initialize the validator and load any custom tags. + validate := validator.New() + for tag, v := range cv.Validators { + err := validate.RegisterValidation(tag, v) + if err != nil { + return err + } } - os.Exit(0) -} + // Register custom types for use with existing validation tags. + validate.RegisterCustomTypeFunc(config.DurationCustomTypeFunc, config.Duration{}) -// FilterShutdownErrors returns the input error, with the exception of "use of -// closed network connection," on which it returns nil -// Per https://github.com/grpc/grpc-go/issues/1017, a gRPC server's `Serve()` -// will always return an error, even when GracefulStop() is called. We don't -// want to log graceful stops as errors, so we filter out the meaningless -// error we get in that situation. -func FilterShutdownErrors(err error) error { - if err == nil { - return nil + inBytes, err := io.ReadAll(in) + if err != nil { + return err + } + err = strictyaml.Unmarshal(inBytes, cv.Config) + if err != nil { + return err } - if strings.Contains(err.Error(), "use of closed network connection") { - return nil + err = validate.Struct(cv.Config) + if err != nil { + errs, ok := err.(validator.ValidationErrors) + if !ok { + // This should never happen. + return err + } + if len(errs) > 0 { + allErrs := []string{} + for _, e := range errs { + allErrs = append(allErrs, e.Error()) + } + return errors.New(strings.Join(allErrs, ", ")) + } } - return err + return nil +} + +type buildInfo struct { + Command string + BuildID string + BuildTime string + GoVersion string + BuildHost string +} + +// info produces build information about this binary +func info() buildInfo { + return buildInfo{ + Command: core.Command(), + BuildID: core.GetBuildID(), + BuildTime: core.GetBuildTime(), + GoVersion: runtime.Version(), + BuildHost: core.GetBuildHost(), + } +} + +func LogStartup(logger blog.Logger) { + logger.AuditInfo("Process starting", info()) +} + +// CatchSignals blocks until a SIGTERM, SIGINT, or SIGHUP is received, then +// executes the given callback. The callback should not block, it should simply +// signal other goroutines (particularly the main goroutine) to clean themselves +// up and exit. This function is intended to be called in its own goroutine, +// while the main goroutine waits for an indication that the other goroutines +// have exited cleanly. +func CatchSignals(callback func()) { + WaitForSignal() + callback() +} + +// WaitForSignal blocks until a SIGTERM, SIGINT, or SIGHUP is received. It then +// returns, allowing execution to resume, generally allowing a main() function +// to return and trigger and deferred cleanup functions. This function is +// intended to be called directly from the main goroutine, while a gRPC or HTTP +// server runs in a background goroutine. +func WaitForSignal() { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGTERM) + signal.Notify(sigChan, syscall.SIGINT) + signal.Notify(sigChan, syscall.SIGHUP) + <-sigChan } diff --git a/cmd/shell_test.go b/cmd/shell_test.go index 852168a7634..16cc8c114a3 100644 --- a/cmd/shell_test.go +++ b/cmd/shell_test.go @@ -4,10 +4,16 @@ import ( "encoding/json" "fmt" "log" + "os" + "os/exec" "runtime" "strings" "testing" + "time" + "github.com/prometheus/client_golang/prometheus" + + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/core" blog "github.com/letsencrypt/boulder/log" "github.com/letsencrypt/boulder/test" @@ -17,22 +23,24 @@ var ( validPAConfig = []byte(`{ "dbConnect": "dummyDBConnect", "enforcePolicyWhitelist": false, - "challenges": { "http-01": true } + "challenges": { "http-01": true }, + "identifiers": { "dns": true, "ip": true } }`) invalidPAConfig = []byte(`{ "dbConnect": "dummyDBConnect", "enforcePolicyWhitelist": false, - "challenges": { "nonsense": true } + "challenges": { "nonsense": true }, + "identifiers": { "openpgp": true } }`) - noChallengesPAConfig = []byte(`{ + noChallengesIdentsPAConfig = []byte(`{ "dbConnect": "dummyDBConnect", "enforcePolicyWhitelist": false }`) - - emptyChallengesPAConfig = []byte(`{ + emptyChallengesIdentsPAConfig = []byte(`{ "dbConnect": "dummyDBConnect", "enforcePolicyWhitelist": false, - "challenges": {} + "challenges": {}, + "identifiers": {} }`) ) @@ -41,21 +49,25 @@ func TestPAConfigUnmarshal(t *testing.T) { err := json.Unmarshal(validPAConfig, &pc1) test.AssertNotError(t, err, "Failed to unmarshal PAConfig") test.AssertNotError(t, pc1.CheckChallenges(), "Flagged valid challenges as bad") + test.AssertNotError(t, pc1.CheckIdentifiers(), "Flagged valid identifiers as bad") var pc2 PAConfig err = json.Unmarshal(invalidPAConfig, &pc2) test.AssertNotError(t, err, "Failed to unmarshal PAConfig") test.AssertError(t, pc2.CheckChallenges(), "Considered invalid challenges as good") + test.AssertError(t, pc2.CheckIdentifiers(), "Considered invalid identifiers as good") var pc3 PAConfig - err = json.Unmarshal(noChallengesPAConfig, &pc3) + err = json.Unmarshal(noChallengesIdentsPAConfig, &pc3) test.AssertNotError(t, err, "Failed to unmarshal PAConfig") test.AssertError(t, pc3.CheckChallenges(), "Disallow empty challenges map") + test.AssertNotError(t, pc3.CheckIdentifiers(), "Disallowed empty identifiers map") var pc4 PAConfig - err = json.Unmarshal(emptyChallengesPAConfig, &pc4) + err = json.Unmarshal(emptyChallengesIdentsPAConfig, &pc4) test.AssertNotError(t, err, "Failed to unmarshal PAConfig") test.AssertError(t, pc4.CheckChallenges(), "Disallow empty challenges map") + test.AssertNotError(t, pc4.CheckIdentifiers(), "Disallowed empty identifiers map") } func TestMysqlLogger(t *testing.T) { @@ -63,20 +75,20 @@ func TestMysqlLogger(t *testing.T) { mLog := mysqlLogger{log} testCases := []struct { - args []interface{} + args []any expected string }{ { - []interface{}{nil}, - `ERR: [AUDIT] [mysql] `, + []any{nil}, + `ERR: [mysql] `, }, { - []interface{}{""}, - `ERR: [AUDIT] [mysql] `, + []any{""}, + `ERR: [mysql] `, }, { - []interface{}{"Sup ", 12345, " Sup sup"}, - `ERR: [AUDIT] [mysql] Sup 12345 Sup sup`, + []any{"Sup ", 12345, " Sup sup"}, + `ERR: [mysql] Sup 12345 Sup sup`, }, } @@ -106,14 +118,16 @@ func TestCaptureStdlibLog(t *testing.T) { } } -func TestVersionString(t *testing.T) { +func TestLogStartup(t *testing.T) { core.BuildID = "TestBuildID" core.BuildTime = "RightNow!" core.BuildHost = "Localhost" - versionStr := VersionString() - expected := fmt.Sprintf("Versions: cmd.test=(TestBuildID RightNow!) Golang=(%s) BuildHost=(Localhost)", runtime.Version()) - test.AssertEquals(t, versionStr, expected) + log := blog.NewMock() + LogStartup(log) + logged := strings.Join(log.GetAll(), "\n") + expected := fmt.Sprintf(`INFO: [AUDIT] Process starting JSON={"Command":"cmd.test","BuildID":"TestBuildID","BuildTime":"RightNow!","GoVersion":"%s","BuildHost":"Localhost"}`, runtime.Version()) + test.AssertEquals(t, logged, expected) } func TestReadConfigFile(t *testing.T) { @@ -121,16 +135,13 @@ func TestReadConfigFile(t *testing.T) { test.AssertError(t, err, "ReadConfigFile('') did not error") type config struct { - NotifyMailer struct { - DB DBConfig - SMTPConfig - } - Syslog SyslogConfig + GRPC *GRPCClientConfig + TLS *TLSConfig } var c config - err = ReadConfigFile("../test/config/notify-mailer.json", &c) - test.AssertNotError(t, err, "ReadConfigFile(../test/config/notify-mailer.json) errored") - test.AssertEquals(t, c.NotifyMailer.SMTPConfig.Server, "localhost") + err = ReadConfigFile("../test/config/health-checker.json", &c) + test.AssertNotError(t, err, "ReadConfigFile(../test/config/health-checker.json) errored") + test.AssertEquals(t, c.GRPC.Timeout.Duration, 1*time.Second) } func TestLogWriter(t *testing.T) { @@ -155,3 +166,149 @@ func TestGRPCLoggerWarningFilter(t *testing.T) { lines = m.GetAllMatching(".*") test.AssertEquals(t, len(lines), 0) } + +func Test_newVersionCollector(t *testing.T) { + // 'buildTime' + core.BuildTime = core.Unspecified + version := newVersionCollector() + // Default 'Unspecified' should emit 'Unspecified'. + test.AssertMetricWithLabelsEquals(t, version, prometheus.Labels{"buildTime": core.Unspecified}, 1) + // Parsable UnixDate should emit UnixTime. + now := time.Now().UTC() + core.BuildTime = now.Format(time.UnixDate) + version = newVersionCollector() + test.AssertMetricWithLabelsEquals(t, version, prometheus.Labels{"buildTime": now.Format(time.RFC3339)}, 1) + // Unparsable timestamp should emit 'Unsparsable'. + core.BuildTime = "outta time" + version = newVersionCollector() + test.AssertMetricWithLabelsEquals(t, version, prometheus.Labels{"buildTime": "Unparsable"}, 1) + + // 'buildId' + expectedBuildID := "TestBuildId" + core.BuildID = expectedBuildID + version = newVersionCollector() + test.AssertMetricWithLabelsEquals(t, version, prometheus.Labels{"buildId": expectedBuildID}, 1) + + // 'goVersion' + test.AssertMetricWithLabelsEquals(t, version, prometheus.Labels{"goVersion": runtime.Version()}, 1) +} + +func loadConfigFile(t *testing.T, path string) *os.File { + cf, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + return cf +} + +func TestFailedConfigValidation(t *testing.T) { + type FooConfig struct { + VitalValue string `yaml:"vitalValue" validate:"required"` + VoluntarilyVoid string `yaml:"voluntarilyVoid"` + VisciouslyVetted string `yaml:"visciouslyVetted" validate:"omitempty,endswith=baz"` + VolatileVagary config.Duration `yaml:"volatileVagary" validate:"required,lte=120s"` + VernalVeil config.Duration `yaml:"vernalVeil" validate:"required"` + } + + // Violates 'endswith' tag JSON. + cf := loadConfigFile(t, "testdata/1_missing_endswith.json") + defer cf.Close() + err := ValidateJSONConfig(&ConfigValidator{&FooConfig{}, nil}, cf) + test.AssertError(t, err, "Expected validation error") + test.AssertContains(t, err.Error(), "'endswith'") + + // Violates 'endswith' tag YAML. + cf = loadConfigFile(t, "testdata/1_missing_endswith.yaml") + defer cf.Close() + err = ValidateYAMLConfig(&ConfigValidator{&FooConfig{}, nil}, cf) + test.AssertError(t, err, "Expected validation error") + test.AssertContains(t, err.Error(), "'endswith'") + + // Violates 'required' tag JSON. + cf = loadConfigFile(t, "testdata/2_missing_required.json") + defer cf.Close() + err = ValidateJSONConfig(&ConfigValidator{&FooConfig{}, nil}, cf) + test.AssertError(t, err, "Expected validation error") + test.AssertContains(t, err.Error(), "'required'") + + // Violates 'required' tag YAML. + cf = loadConfigFile(t, "testdata/2_missing_required.yaml") + defer cf.Close() + err = ValidateYAMLConfig(&ConfigValidator{&FooConfig{}, nil}, cf) + test.AssertError(t, err, "Expected validation error") + test.AssertContains(t, err.Error(), "'required'") + + // Violates 'lte' tag JSON for config.Duration type. + cf = loadConfigFile(t, "testdata/3_configDuration_too_darn_big.json") + defer cf.Close() + err = ValidateJSONConfig(&ConfigValidator{&FooConfig{}, nil}, cf) + test.AssertError(t, err, "Expected validation error") + test.AssertContains(t, err.Error(), "'lte'") + + // Violates 'lte' tag JSON for config.Duration type. + cf = loadConfigFile(t, "testdata/3_configDuration_too_darn_big.json") + defer cf.Close() + err = ValidateJSONConfig(&ConfigValidator{&FooConfig{}, nil}, cf) + test.AssertError(t, err, "Expected validation error") + test.AssertContains(t, err.Error(), "'lte'") + + // Incorrect value for the config.Duration type. + cf = loadConfigFile(t, "testdata/4_incorrect_data_for_type.json") + defer cf.Close() + err = ValidateJSONConfig(&ConfigValidator{&FooConfig{}, nil}, cf) + test.AssertError(t, err, "Expected error") + test.AssertContains(t, err.Error(), "missing unit in duration") + + // Incorrect value for the config.Duration type. + cf = loadConfigFile(t, "testdata/4_incorrect_data_for_type.yaml") + defer cf.Close() + err = ValidateYAMLConfig(&ConfigValidator{&FooConfig{}, nil}, cf) + test.AssertError(t, err, "Expected error") + test.AssertContains(t, err.Error(), "missing unit in duration") +} + +func TestFailExit(t *testing.T) { + // Test that when Fail is called with a `defer AuditPanic()`, + // the program exits with a non-zero exit code and logs + // the result (but not stack trace). + // Inspired by https://go.dev/talks/2014/testing.slide#23 + if os.Getenv("TIME_TO_DIE") == "1" { + defer AuditPanic() + Fail("tears in the rain") + return + } + + cmd := exec.Command(os.Args[0], "-test.run=TestFailExit") + cmd.Env = append(os.Environ(), "TIME_TO_DIE=1") + output, err := cmd.CombinedOutput() + test.AssertError(t, err, "running a failing program") + test.AssertContains(t, string(output), "[AUDIT] tears in the rain") + // "goroutine" usually shows up in stack traces, so we check it + // to make sure we didn't print a stack trace. + test.AssertNotContains(t, string(output), "goroutine") +} + +func testPanicStackTraceHelper() { + var x *int + *x = 1 //nolint: govet // Purposeful nil pointer dereference to trigger a panic +} + +func TestPanicStackTrace(t *testing.T) { + // Test that when a nil pointer dereference is hit after a + // `defer AuditPanic()`, the program exits with a non-zero + // exit code and prints the result (but not stack trace). + // Inspired by https://go.dev/talks/2014/testing.slide#23 + if os.Getenv("AT_THE_DISCO") == "1" { + defer AuditPanic() + testPanicStackTraceHelper() + return + } + + cmd := exec.Command(os.Args[0], "-test.run=TestPanicStackTrace") + cmd.Env = append(os.Environ(), "AT_THE_DISCO=1") + output, err := cmd.CombinedOutput() + test.AssertError(t, err, "running a failing program") + test.AssertContains(t, string(output), "nil pointer dereference") + test.AssertContains(t, string(output), "runtime/debug.Stack()") + test.AssertContains(t, string(output), "cmd/shell_test.go:") +} diff --git a/cmd/testdata/1_missing_endswith.json b/cmd/testdata/1_missing_endswith.json new file mode 100644 index 00000000000..af9286b6326 --- /dev/null +++ b/cmd/testdata/1_missing_endswith.json @@ -0,0 +1,5 @@ +{ + "vitalValue": "Gotcha", + "voluntarilyVoid": "Not used", + "visciouslyVetted": "Whatever" +} diff --git a/cmd/testdata/1_missing_endswith.yaml b/cmd/testdata/1_missing_endswith.yaml new file mode 100644 index 00000000000..f101121ecac --- /dev/null +++ b/cmd/testdata/1_missing_endswith.yaml @@ -0,0 +1,3 @@ +vitalValue: "Gotcha" +voluntarilyVoid: "Not used" +visciouslyVetted: "Whatever" diff --git a/cmd/testdata/2_missing_required.json b/cmd/testdata/2_missing_required.json new file mode 100644 index 00000000000..7fd2fe293f8 --- /dev/null +++ b/cmd/testdata/2_missing_required.json @@ -0,0 +1,4 @@ +{ + "voluntarilyVoid": "Not used", + "visciouslyVetted": "barbaz" +} diff --git a/cmd/testdata/2_missing_required.yaml b/cmd/testdata/2_missing_required.yaml new file mode 100644 index 00000000000..10a918d4c09 --- /dev/null +++ b/cmd/testdata/2_missing_required.yaml @@ -0,0 +1,2 @@ +voluntarilyVoid: "Not used" +visciouslyVetted: "barbaz" diff --git a/cmd/testdata/3_configDuration_too_darn_big.json b/cmd/testdata/3_configDuration_too_darn_big.json new file mode 100644 index 00000000000..0b108edb7fb --- /dev/null +++ b/cmd/testdata/3_configDuration_too_darn_big.json @@ -0,0 +1,6 @@ +{ + "vitalValue": "Gotcha", + "voluntarilyVoid": "Not used", + "visciouslyVetted": "Whateverbaz", + "volatileVagary": "121s" +} diff --git a/cmd/testdata/4_incorrect_data_for_type.json b/cmd/testdata/4_incorrect_data_for_type.json new file mode 100644 index 00000000000..5805d59ee4d --- /dev/null +++ b/cmd/testdata/4_incorrect_data_for_type.json @@ -0,0 +1,7 @@ +{ + "vitalValue": "Gotcha", + "voluntarilyVoid": "Not used", + "visciouslyVetted": "Whateverbaz", + "volatileVagary": "120s", + "vernalVeil": "60" +} diff --git a/cmd/testdata/4_incorrect_data_for_type.yaml b/cmd/testdata/4_incorrect_data_for_type.yaml new file mode 100644 index 00000000000..02093be825e --- /dev/null +++ b/cmd/testdata/4_incorrect_data_for_type.yaml @@ -0,0 +1,5 @@ +vitalValue: "Gotcha" +voluntarilyVoid: "Not used" +visciouslyVetted: "Whateverbaz" +volatileVagary: "120s" +vernalVeil: "60" diff --git a/cmd/testdata/cert.pem b/cmd/testdata/cert.pem deleted file mode 100644 index 00267513ddb..00000000000 --- a/cmd/testdata/cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDETCCAfmgAwIBAgIIPLS8tZQ1QBAwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE -AxMVbWluaWNhIHJvb3QgY2EgM2I4YjJjMCAXDTE2MTIyNDIwMTcxNVoYDzIxMDYx -MjI0MjAxNzE1WjAWMRQwEgYDVQQDEwt3ZmUuYm91bGRlcjCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAOQ2jzG5CcATMKtNhAEwHYIdhC0mFhHSLNr1lmde -qJeM2R0PH9sbC6+MuMksbbn1E3rfwIQLoEqrhcr+80Gh40EQE/H+rI8ynK23S/jx -xkVizBPmlwH6kc8Pts6E6x8xwOk/KfZI5SMMr7ujyliipth7ZPAgJPSUkDxxWgww -3HhVB4IAip73Zm541rvY3ZVw27OGB+brzgH7oH4u8+8Ox5on6d00fZEvOPVPPpWx -XNQVhtvVy/xDx/FYnMFHOu0XC7AK6GLdtdWyaIUQM4gdQaEHJ/ujtRlerNhAPn3L -3hBB/hSvYazylU/85QpP1mLN+HxW3eMDu6TqvCYNMGKGRS0CAwEAAaNXMFUwDgYD -VR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV -HRMBAf8EAjAAMBYGA1UdEQQPMA2CC3dmZS5ib3VsZGVyMA0GCSqGSIb3DQEBCwUA -A4IBAQAaw4H0BNVVc0IrtWSnazAcGvIBtR6CIDn6lXZ40ttzX5MJ7B9j1AvBCdyU -e7fF9Zf5ozGAF4aoixec2W4YH4gjrPZyEfU57PJ34o8F34oiHNYU0zoSvGTXjIzc -3yDK7XZ1mIjdqY9In6o5xvnCfmn5EE4tXHgZKIsIovdCO/QbBetiQnDQqDCb5tdl -Ayq99i88A+5Tdgcb24qrdBPHtUh/MIb2uttnlwQrdXcgDnf07bMENJ/IWmCYjkfN -tRpNrhWkeQt6wyczIyhQ+7DIsdUJkgnQkC8BCaX9eBJpe5IZjfVuw1/qTGfRdej7 -U4ckyzGgK2yNwPl75K5WxhrkD2+e ------END CERTIFICATE----- diff --git a/cmd/testdata/key.pem b/cmd/testdata/key.pem deleted file mode 100644 index 6242dd4e2d4..00000000000 --- a/cmd/testdata/key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA5DaPMbkJwBMwq02EATAdgh2ELSYWEdIs2vWWZ16ol4zZHQ8f -2xsLr4y4ySxtufUTet/AhAugSquFyv7zQaHjQRAT8f6sjzKcrbdL+PHGRWLME+aX -AfqRzw+2zoTrHzHA6T8p9kjlIwyvu6PKWKKm2Htk8CAk9JSQPHFaDDDceFUHggCK -nvdmbnjWu9jdlXDbs4YH5uvOAfugfi7z7w7Hmifp3TR9kS849U8+lbFc1BWG29XL -/EPH8VicwUc67RcLsAroYt211bJohRAziB1BoQcn+6O1GV6s2EA+fcveEEH+FK9h -rPKVT/zlCk/WYs34fFbd4wO7pOq8Jg0wYoZFLQIDAQABAoIBAD3gERkRK3SZf2Oi -w7yyt/10VUcMgYVZb/H6Dkp+nsVgWemIFUIJ5jQ1ulBJLILz0NFK416p7E7yjHcK -gxfzdbNJUa8WNuEoftSW0xFvNCvR6rUmmyDdODVk9FKwNTOjAMP87Tqqbv3+zZLZ -iN5ZU1V0t1xnyr1JnoXU0e5mPRCesi3pJwYGB6Adeo1kzULnqiigkvrG1kCWeKfv -E86R+oPVccC1uosxBEdaj0D6OOw1UutO4813wCoSoL8rMT4Pdf+qxcds79LePdYR -sVUyCczCrhe5enRzUeb7ditGaqB5Ny6bKV2r4pBk4f8L2F2Nb5MUk+po1Cer96dM -TYfqQp0CgYEA9uc93oAS7eSKwaplOxZUNeVU85vbBGF8f5AzgxECw4ymBo1UM0wU -cDtGRHaiHyEoel5h75GT9ajOEKS81YQkauxYqMTEWp4gbnhM+FvM79Ko/x40SbPZ -ibB+jj5dP5yd5eHWBZ949NxbF5BuJYK1sKZUF+0LXvzQbBiMCrpeufsCgYEA7J8I -2bAWKoH9fQC/rjMHbNIzY9vRBXQcvSs+vYD+llEFCraXh9F6vjukz14ghD6OYLgm -YMFdzpkCBzHDXs7PcekhFyUnyeU0LSIMeJtNAYhXoqR5xpDYctYxU7b1oSIOR6iB -yyX2rVePf/7j7rgpQ8e3fwU3cUt6ScgmNjx+PPcCgYEA18y7bbrhUJ4djTHicueM -SOMeAiJqJoUSbOhbraXzlXlcL4PdYUyQ3JtiSjqqbqzrlTDjRqImAWQ+X5utFK3/ -qpjS0QfSWyW0UPnCxJxwHfkxDnKW3whcDZV+t9LYa932wFNIXffJn7Ltx0XMj+qT -5UcrXxIniylK/f/ifwnv+3sCgYANgcKCkp+hS4xJv2oxqm4JEzKlx5113zvovHg3 -Xw6pS/SquOOLeDS2eTceMzRN+DguYzSln0QYj/liqb+KQOnsia8+3/sWdkP1M5Vc -qRjLNpoyCYt9/zSr0Xg9XBtHWGdSRdcq3n7nRDFH0Cf4r4CDgvoBgS26BOH1Jjyw -KeHW3wKBgQDJQ9L7ULWHbhRPvnXV5FdGZ3JcPddD3nskVJm5mhWs1caYy3haMr7v -OMWuN5uA/KpEYt3hX/tPBZ2cW0MNmbbft1TtA9Hw2jURwbPiqluQoi5j6gZhRpYK -l0Er7/v/octJjv8M5aad2FztfWxZ6n+TWB146AfLwhMhEZbEftFe2g== ------END RSA PRIVATE KEY----- diff --git a/cmd/testdata/minica.pem b/cmd/testdata/minica.pem deleted file mode 100644 index f57f06f9724..00000000000 --- a/cmd/testdata/minica.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDCTCCAfGgAwIBAgIIO4ssrd6kNBYwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE -AxMVbWluaWNhIHJvb3QgY2EgM2I4YjJjMCAXDTE2MTEwNDIxMTY0OVoYDzIxMTYx -MTA0MjIxNjQ5WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAzYjhiMmMwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzEBIAP0lX/VmQSAVkcf4N7lvG -eJClWsO8MRMdGDoCao6XWE72dVBI2A8VwqOnIKC8G6p/+prCgpdB4/Mdyb6X4iaT -pSsA3hGTWlrHnkXKvFEDnauGLDPclco4V3emBF56YOC4ii1F83AvDrqrsnFDrl5V -AfAnYyCIw9qfDvy/PqdGulhbi0+x1Y1navWp0DbQ7Ec9NUK0MLE2DH/nL4NvtzYM -wbzwuouNkTnwhYh54/SkVFkMBTFf2CBAtZPz4/Q2QaiZvvJYKVQ2yKCaStthobcn -5Uz+PDxfJcAR6Ma1tuzsp0sBbtjZAcKZHUabFFRmlHnE/9kWE55nB3oelrhnAgMB -AAGjRTBDMA4GA1UdDwEB/wQEAwIChDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB -BQUHAwIwEgYDVR0TAQH/BAgwBgEB/wIBADANBgkqhkiG9w0BAQsFAAOCAQEAFwZS -o7hfeK1sUKoXJeqrw6fIuwJsM0Hpa+j5VW+pJIA1J0Ntb1e0JI8StnE3hxYoQ30m -pZ9ZMRPov8AqU97l1aBbNYu9CwQsSMmFwJNuAQKw0PZ8U+dPgt2JE++z4349QDz0 -EWAAH8sFU1bXiAWHJLNpiLf+IKYyCETYwlFkWAUyZtWTbsmW+iJD8qZ44ehydGqZ -3e4NzpJUjN0IK8c1BpSjDqbjiTxhlJKXyAR3vAvhXa7V3SkHly5SFpggZi1KgumD -jVJRk88vTo95Tqsrer0ouyyFwst8ZPmUt/vqbwhU6Z3DgX9jYcS9ON5KVGbC1KO9 -JNrFIxoQe9I3x5w6kw== ------END CERTIFICATE----- diff --git a/config/duration.go b/config/duration.go new file mode 100644 index 00000000000..3e30c0283e4 --- /dev/null +++ b/config/duration.go @@ -0,0 +1,74 @@ +package config + +import ( + "encoding/json" + "errors" + "reflect" + "time" +) + +// Duration is custom type embedding a time.Duration which allows defining +// methods such as serialization to YAML or JSON. +type Duration struct { + time.Duration `validate:"required"` +} + +// DurationCustomTypeFunc enables registration of our custom config.Duration +// type as a time.Duration and performing validation on the configured value +// using the standard suite of validation functions. +func DurationCustomTypeFunc(field reflect.Value) any { + if c, ok := field.Interface().(Duration); ok { + return c.Duration + } + + return reflect.Invalid +} + +// ErrDurationMustBeString is returned when a non-string value is +// presented to be deserialized as a ConfigDuration +var ErrDurationMustBeString = errors.New("cannot JSON unmarshal something other than a string into a ConfigDuration") + +// UnmarshalJSON parses a string into a ConfigDuration using +// time.ParseDuration. If the input does not unmarshal as a +// string, then UnmarshalJSON returns ErrDurationMustBeString. +func (d *Duration) UnmarshalJSON(b []byte) error { + s := "" + err := json.Unmarshal(b, &s) + if err != nil { + var jsonUnmarshalTypeErr *json.UnmarshalTypeError + if errors.As(err, &jsonUnmarshalTypeErr) { + return ErrDurationMustBeString + } + return err + } + dd, err := time.ParseDuration(s) + d.Duration = dd + return err +} + +// MarshalJSON returns the string form of the duration, as a byte array. +func (d Duration) MarshalJSON() ([]byte, error) { + return []byte(d.Duration.String()), nil +} + +// UnmarshalYAML uses the same format as JSON, but is called by the YAML +// parser (vs. the JSON parser). +func (d *Duration) UnmarshalYAML(unmarshal func(any) error) error { + var s string + err := unmarshal(&s) + if err != nil { + return err + } + dur, err := time.ParseDuration(s) + if err != nil { + return err + } + + d.Duration = dur + return nil +} + +// MarshalYAML returns the string form of the duration, as a string. +func (d Duration) MarshalYAML() (any, error) { + return d.Duration.String(), nil +} diff --git a/core/challenges.go b/core/challenges.go index 4b4a67c4868..104d754a879 100644 --- a/core/challenges.go +++ b/core/challenges.go @@ -8,20 +8,22 @@ func newChallenge(challengeType AcmeChallenge, token string) Challenge { } } -// HTTPChallenge01 constructs a random http-01 challenge. If token is empty a random token -// will be generated, otherwise the provided token is used. +// HTTPChallenge01 constructs a http-01 challenge. func HTTPChallenge01(token string) Challenge { return newChallenge(ChallengeTypeHTTP01, token) } -// DNSChallenge01 constructs a random dns-01 challenge. If token is empty a random token -// will be generated, otherwise the provided token is used. +// DNSChallenge01 constructs a dns-01 challenge. func DNSChallenge01(token string) Challenge { return newChallenge(ChallengeTypeDNS01, token) } -// TLSALPNChallenge01 constructs a random tls-alpn-01 challenge. If token is empty a random token -// will be generated, otherwise the provided token is used. +// TLSALPNChallenge01 constructs a tls-alpn-01 challenge. func TLSALPNChallenge01(token string) Challenge { return newChallenge(ChallengeTypeTLSALPN01, token) } + +// DNSAccountChallenge01 constructs a dns-account-01 challenge. +func DNSAccountChallenge01(token string) Challenge { + return newChallenge(ChallengeTypeDNSAccount01, token) +} diff --git a/core/core_test.go b/core/core_test.go index ed1b8cc337d..1b7ff0cb50a 100644 --- a/core/core_test.go +++ b/core/core_test.go @@ -5,8 +5,9 @@ import ( "encoding/json" "testing" + "github.com/go-jose/go-jose/v4" + "github.com/letsencrypt/boulder/test" - "gopkg.in/square/go-jose.v2" ) // challenges.go @@ -26,57 +27,24 @@ func TestChallenges(t *testing.T) { token := NewToken() http01 := HTTPChallenge01(token) - test.AssertNotError(t, http01.CheckConsistencyForClientOffer(), "CheckConsistencyForClientOffer returned an error") + test.AssertNotError(t, http01.CheckPending(), "CheckConsistencyForClientOffer returned an error") dns01 := DNSChallenge01(token) - test.AssertNotError(t, dns01.CheckConsistencyForClientOffer(), "CheckConsistencyForClientOffer returned an error") + test.AssertNotError(t, dns01.CheckPending(), "CheckConsistencyForClientOffer returned an error") + + dnsAccount01 := DNSAccountChallenge01(token) + test.AssertNotError(t, dnsAccount01.CheckPending(), "CheckConsistencyForClientOffer returned an error") tlsalpn01 := TLSALPNChallenge01(token) - test.AssertNotError(t, tlsalpn01.CheckConsistencyForClientOffer(), "CheckConsistencyForClientOffer returned an error") + test.AssertNotError(t, tlsalpn01.CheckPending(), "CheckConsistencyForClientOffer returned an error") test.Assert(t, ChallengeTypeHTTP01.IsValid(), "Refused valid challenge") test.Assert(t, ChallengeTypeDNS01.IsValid(), "Refused valid challenge") test.Assert(t, ChallengeTypeTLSALPN01.IsValid(), "Refused valid challenge") + test.Assert(t, ChallengeTypeDNSAccount01.IsValid(), "Refused valid challenge") test.Assert(t, !AcmeChallenge("nonsense-71").IsValid(), "Accepted invalid challenge") } -// objects.go - -var testCertificateRequestBadCSR = []byte(`{"csr":"AAAA"}`) -var testCertificateRequestGood = []byte(`{ - "csr": "MIHRMHgCAQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQWUlnRrm5ErSVkTzBTk3isg1hNydfyY4NM1P_N1S-ZeD39HMrYJsQkUh2tKvy3ztfmEqWpekvO4WRktSa000BPoAAwCgYIKoZIzj0EAwMDSQAwRgIhAIZIBwu4xOUD_4dJuGgceSKaoXTFBQKA3BFBNVJvbpdsAiEAlfq3Dq_8dnYbtmyDdXgopeKkSV5_76VSpcog-wkwEwo" -}`) - -func TestCertificateRequest(t *testing.T) { - - // Good - var goodCR CertificateRequest - err := json.Unmarshal(testCertificateRequestGood, &goodCR) - if err != nil { - t.Errorf("Error unmarshaling good certificate request: %v", err) - } - if err = goodCR.CSR.CheckSignature(); err != nil { - t.Errorf("Valid CSR in CertificateRequest failed to verify: %v", err) - } - - // Bad CSR - var badCR CertificateRequest - err = json.Unmarshal(testCertificateRequestBadCSR, &badCR) - if err == nil { - t.Errorf("Unexpectedly accepted certificate request with bad CSR") - } - - // Marshal - jsonCR, err := json.Marshal(goodCR) - if err != nil { - t.Errorf("Failed to marshal good certificate request: %v", err) - } - err = json.Unmarshal(jsonCR, &goodCR) - if err != nil { - t.Errorf("Marshalled certificate request failed to unmarshal: %v", err) - } -} - // util.go func TestRandomString(t *testing.T) { diff --git a/core/interfaces.go b/core/interfaces.go index 85cdc9a49bc..1b3a1eedd22 100644 --- a/core/interfaces.go +++ b/core/interfaces.go @@ -7,8 +7,8 @@ import ( // PolicyAuthority defines the public interface for the Boulder PA // TODO(#5891): Move this interface to a more appropriate location. type PolicyAuthority interface { - WillingToIssue(domain identifier.ACMEIdentifier) error - WillingToIssueWildcards(identifiers []identifier.ACMEIdentifier) error - ChallengesFor(domain identifier.ACMEIdentifier) ([]Challenge, error) - ChallengeTypeEnabled(t AcmeChallenge) bool + WillingToIssue(identifier.ACMEIdentifiers) error + ChallengeTypesFor(identifier.ACMEIdentifier) ([]AcmeChallenge, error) + ChallengeTypeEnabled(AcmeChallenge) bool + CheckAuthzChallenges(*Authorization) error } diff --git a/core/objects.go b/core/objects.go index 9e328e82391..8f9ed49bc1f 100644 --- a/core/objects.go +++ b/core/objects.go @@ -2,16 +2,16 @@ package core import ( "crypto" - "crypto/x509" "encoding/base64" "encoding/json" "fmt" "hash/fnv" - "net" + "net/netip" "strings" "time" - "gopkg.in/square/go-jose.v2" + "github.com/go-jose/go-jose/v4" + "golang.org/x/crypto/ocsp" "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/probs" @@ -33,43 +33,28 @@ const ( StatusDeactivated = AcmeStatus("deactivated") // Object has been deactivated ) -// AcmeResource values identify different types of ACME resources -type AcmeResource string - -// The types of ACME resources -const ( - ResourceNewReg = AcmeResource("new-reg") - ResourceNewAuthz = AcmeResource("new-authz") - ResourceNewCert = AcmeResource("new-cert") - ResourceRevokeCert = AcmeResource("revoke-cert") - ResourceRegistration = AcmeResource("reg") - ResourceChallenge = AcmeResource("challenge") - ResourceAuthz = AcmeResource("authz") - ResourceKeyChange = AcmeResource("key-change") -) - // AcmeChallenge values identify different types of ACME challenges type AcmeChallenge string // These types are the available challenges -// TODO(#5009): Make this a custom type as well. const ( - ChallengeTypeHTTP01 = AcmeChallenge("http-01") - ChallengeTypeDNS01 = AcmeChallenge("dns-01") - ChallengeTypeTLSALPN01 = AcmeChallenge("tls-alpn-01") + ChallengeTypeHTTP01 = AcmeChallenge("http-01") + ChallengeTypeDNS01 = AcmeChallenge("dns-01") + ChallengeTypeTLSALPN01 = AcmeChallenge("tls-alpn-01") + ChallengeTypeDNSAccount01 = AcmeChallenge("dns-account-01") ) // IsValid tests whether the challenge is a known challenge func (c AcmeChallenge) IsValid() bool { switch c { - case ChallengeTypeHTTP01, ChallengeTypeDNS01, ChallengeTypeTLSALPN01: + case ChallengeTypeHTTP01, ChallengeTypeDNS01, ChallengeTypeTLSALPN01, ChallengeTypeDNSAccount01: return true default: return false } } -// OCSPStatus defines the state of OCSP for a domain +// OCSPStatus defines the state of OCSP for a certificate type OCSPStatus string // These status are the states of OCSP @@ -78,52 +63,23 @@ const ( OCSPStatusRevoked = OCSPStatus("revoked") ) +var OCSPStatusToInt = map[OCSPStatus]int{ + OCSPStatusGood: ocsp.Good, + OCSPStatusRevoked: ocsp.Revoked, +} + // DNSPrefix is attached to DNS names in DNS challenges const DNSPrefix = "_acme-challenge" -// CertificateRequest is just a CSR -// -// This data is unmarshalled from JSON by way of RawCertificateRequest, which -// represents the actual structure received from the client. -type CertificateRequest struct { - CSR *x509.CertificateRequest // The CSR - Bytes []byte // The original bytes of the CSR, for logging. -} - type RawCertificateRequest struct { CSR JSONBuffer `json:"csr"` // The encoded CSR } -// UnmarshalJSON provides an implementation for decoding CertificateRequest objects. -func (cr *CertificateRequest) UnmarshalJSON(data []byte) error { - var raw RawCertificateRequest - err := json.Unmarshal(data, &raw) - if err != nil { - return err - } - - csr, err := x509.ParseCertificateRequest(raw.CSR) - if err != nil { - return err - } - - cr.CSR = csr - cr.Bytes = raw.CSR - return nil -} - -// MarshalJSON provides an implementation for encoding CertificateRequest objects. -func (cr CertificateRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(RawCertificateRequest{ - CSR: cr.CSR.Raw, - }) -} - // Registration objects represent non-public metadata attached // to account keys. type Registration struct { // Unique identifier - ID int64 `json:"id,omitempty" db:"id"` + ID int64 `json:"-"` // Account key to which the details are attached Key *jose.JSONWebKey `json:"key"` @@ -132,10 +88,7 @@ type Registration struct { Contact *[]string `json:"contact,omitempty"` // Agreement with terms of service - Agreement string `json:"agreement,omitempty"` - - // InitialIP is the IP address from which the registration was created - InitialIP net.IP `json:"initialIp"` + Agreement string `json:"-"` // CreatedAt is the time the registration was created. CreatedAt *time.Time `json:"createdAt,omitempty"` @@ -144,16 +97,19 @@ type Registration struct { } // ValidationRecord represents a validation attempt against a specific URL/hostname -// and the IP addresses that were resolved and used +// and the IP addresses that were resolved and used. type ValidationRecord struct { // SimpleHTTP only URL string `json:"url,omitempty"` // Shared - Hostname string `json:"hostname"` - Port string `json:"port,omitempty"` - AddressesResolved []net.IP `json:"addressesResolved,omitempty"` - AddressUsed net.IP `json:"addressUsed,omitempty"` + // + // Hostname can hold either a DNS name or an IP address. + Hostname string `json:"hostname,omitempty"` + Port string `json:"port,omitempty"` + AddressesResolved []netip.Addr `json:"addressesResolved,omitempty"` + AddressUsed netip.Addr `json:"addressUsed"` + // AddressesTried contains a list of addresses tried before the `AddressUsed`. // Presently this will only ever be one IP from `AddressesResolved` since the // only retry is in the case of a v6 failure with one v4 fallback. E.g. if @@ -168,26 +124,12 @@ type ValidationRecord struct { // AddressesTried: [ ::1 ], // ... // } - AddressesTried []net.IP `json:"addressesTried,omitempty"` - - // OldTLS is true if any request in the validation chain used HTTPS and negotiated - // a TLS version lower than 1.2. - // TODO(#6011): Remove once TLS 1.0 and 1.1 support is gone. - OldTLS bool `json:"oldTLS,omitempty"` -} + AddressesTried []netip.Addr `json:"addressesTried,omitempty"` -func looksLikeKeyAuthorization(str string) error { - parts := strings.Split(str, ".") - if len(parts) != 2 { - return fmt.Errorf("Invalid key authorization: does not look like a key authorization") - } else if !LooksLikeAToken(parts[0]) { - return fmt.Errorf("Invalid key authorization: malformed token") - } else if !LooksLikeAToken(parts[1]) { - // Thumbprints have the same syntax as tokens in boulder - // Both are base64-encoded and 32 octets - return fmt.Errorf("Invalid key authorization: malformed key thumbprint") - } - return nil + // ResolverAddrs is the host:port of the DNS resolver(s) that fulfilled the + // lookup for AddressUsed. During recursive A and AAAA lookups, a record may + // instead look like A:host:port or AAAA:host:port + ResolverAddrs []string `json:"resolverAddrs,omitempty"` } // Challenge is an aggregate of all data needed for any challenges. @@ -196,38 +138,30 @@ func looksLikeKeyAuthorization(str string) error { // challenge, we just throw all the elements into one bucket, // together with the common metadata elements. type Challenge struct { - // The type of challenge + // Type is the type of challenge encoded in this object. Type AcmeChallenge `json:"type"` - // The status of this challenge - Status AcmeStatus `json:"status,omitempty"` + // URL is the URL to which a response can be posted. Required for all types. + URL string `json:"url,omitempty"` - // Contains the error that occurred during challenge validation, if any - Error *probs.ProblemDetails `json:"error,omitempty"` + // Status is the status of this challenge. Required for all types. + Status AcmeStatus `json:"status,omitempty"` - // A URI to which a response can be POSTed - URI string `json:"uri,omitempty"` + // Validated is the time at which the server validated the challenge. Required + // if status is valid. + Validated *time.Time `json:"validated,omitempty"` - // For the V2 API the "URI" field is deprecated in favour of URL. - URL string `json:"url,omitempty"` + // Error contains the error that occurred during challenge validation, if any. + // If set, the Status must be "invalid". + Error *probs.ProblemDetails `json:"error,omitempty"` - // Used by http-01, tls-sni-01, tls-alpn-01 and dns-01 challenges + // Token is a random value that uniquely identifies the challenge. It is used + // by all current challenges (http-01, tls-alpn-01, and dns-01). Token string `json:"token,omitempty"` - // The expected KeyAuthorization for validation of the challenge. Populated by - // the RA prior to passing the challenge to the VA. For legacy reasons this - // field is called "ProvidedKeyAuthorization" because it was initially set by - // the content of the challenge update POST from the client. It is no longer - // set that way and should be renamed to "KeyAuthorization". - // TODO(@cpu): Rename `ProvidedKeyAuthorization` to `KeyAuthorization`. - ProvidedKeyAuthorization string `json:"keyAuthorization,omitempty"` - // Contains information about URLs used or redirected to and IPs resolved and // used ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"` - // The time at which the server validated the challenge. Required by - // RFC8555 if status is valid. - Validated *time.Time `json:"validated,omitempty"` } // ExpectedKeyAuthorization computes the expected KeyAuthorization value for @@ -248,14 +182,16 @@ func (ch Challenge) ExpectedKeyAuthorization(key *jose.JSONWebKey) (string, erro // RecordsSane checks the sanity of a ValidationRecord object before sending it // back to the RA to be stored. func (ch Challenge) RecordsSane() bool { - if ch.ValidationRecord == nil || len(ch.ValidationRecord) == 0 { + if len(ch.ValidationRecord) == 0 { return false } switch ch.Type { case ChallengeTypeHTTP01: for _, rec := range ch.ValidationRecord { - if rec.URL == "" || rec.Hostname == "" || rec.Port == "" || rec.AddressUsed == nil || + // TODO(#7140): Add a check for ResolverAddress == "" only after the + // core.proto change has been deployed. + if rec.URL == "" || rec.Hostname == "" || rec.Port == "" || (rec.AddressUsed == netip.Addr{}) || len(rec.AddressesResolved) == 0 { return false } @@ -267,14 +203,18 @@ func (ch Challenge) RecordsSane() bool { if ch.ValidationRecord[0].URL != "" { return false } + // TODO(#7140): Add a check for ResolverAddress == "" only after the + // core.proto change has been deployed. if ch.ValidationRecord[0].Hostname == "" || ch.ValidationRecord[0].Port == "" || - ch.ValidationRecord[0].AddressUsed == nil || len(ch.ValidationRecord[0].AddressesResolved) == 0 { + (ch.ValidationRecord[0].AddressUsed == netip.Addr{}) || len(ch.ValidationRecord[0].AddressesResolved) == 0 { return false } - case ChallengeTypeDNS01: + case ChallengeTypeDNS01, ChallengeTypeDNSAccount01: if len(ch.ValidationRecord) > 1 { return false } + // TODO(#7140): Add a check for ResolverAddress == "" only after the + // core.proto change has been deployed. if ch.ValidationRecord[0].Hostname == "" { return false } @@ -286,43 +226,18 @@ func (ch Challenge) RecordsSane() bool { return true } -// CheckConsistencyForClientOffer checks the fields of a challenge object before it is -// given to the client. -func (ch Challenge) CheckConsistencyForClientOffer() error { - err := ch.checkConsistency() - if err != nil { - return err - } - - // Before completion, the key authorization field should be empty - if ch.ProvidedKeyAuthorization != "" { - return fmt.Errorf("A response to this challenge was already submitted.") - } - return nil -} - -// CheckConsistencyForValidation checks the fields of a challenge object before it is -// given to the VA. -func (ch Challenge) CheckConsistencyForValidation() error { - err := ch.checkConsistency() - if err != nil { - return err - } - - // If the challenge is completed, then there should be a key authorization - return looksLikeKeyAuthorization(ch.ProvidedKeyAuthorization) -} - -// checkConsistency checks the sanity of a challenge object before issued to the client. -func (ch Challenge) checkConsistency() error { +// CheckPending ensures that a challenge object is pending and has a token. +// This is used before offering the challenge to the client, and before actually +// validating a challenge. +func (ch Challenge) CheckPending() error { if ch.Status != StatusPending { - return fmt.Errorf("The challenge is not pending.") + return fmt.Errorf("challenge is not pending") } - // There always needs to be a token - if !LooksLikeAToken(ch.Token) { - return fmt.Errorf("The token is missing.") + if !looksLikeAToken(ch.Token) { + return fmt.Errorf("token is missing or malformed") } + return nil } @@ -337,30 +252,30 @@ func (ch Challenge) StringID() string { return base64.RawURLEncoding.EncodeToString(h.Sum(nil)[0:4]) } -// Authorization represents the authorization of an account key holder -// to act on behalf of a domain. This struct is intended to be used both -// internally and for JSON marshaling on the wire. Any fields that should be -// suppressed on the wire (e.g., ID, regID) must be made empty before marshaling. +// Authorization represents the authorization of an account key holder to act on +// behalf of an identifier. This struct is intended to be used both internally +// and for JSON marshaling on the wire. Any fields that should be suppressed on +// the wire (e.g., ID, regID) must be made empty before marshaling. type Authorization struct { // An identifier for this authorization, unique across // authorizations and certificates within this instance. - ID string `json:"id,omitempty" db:"id"` + ID string `json:"-"` // The identifier for which authorization is being given - Identifier identifier.ACMEIdentifier `json:"identifier,omitempty" db:"identifier"` + Identifier identifier.ACMEIdentifier `json:"identifier"` // The registration ID associated with the authorization - RegistrationID int64 `json:"regId,omitempty" db:"registrationID"` + RegistrationID int64 `json:"-"` // The status of the validation of this authorization - Status AcmeStatus `json:"status,omitempty" db:"status"` + Status AcmeStatus `json:"status,omitempty"` // The date after which this authorization will be no // longer be considered valid. Note: a certificate may be issued even on the // last day of an authorization's lifetime. The last day for which someone can // hold a valid certificate based on an authorization is authorization // lifetime + certificate lifetime. - Expires *time.Time `json:"expires,omitempty" db:"expires"` + Expires *time.Time `json:"expires,omitempty"` // An array of challenges objects used to validate the // applicant's control of the identifier. For authorizations @@ -370,17 +285,26 @@ type Authorization struct { // // There should only ever be one challenge of each type in this // slice and the order of these challenges may not be predictable. - Challenges []Challenge `json:"challenges,omitempty" db:"-"` - - // This field is deprecated. It's filled in by WFE for the ACMEv1 API. - Combinations [][]int `json:"combinations,omitempty" db:"combinations"` + Challenges []Challenge `json:"challenges,omitempty"` - // Wildcard is a Boulder-specific Authorization field that indicates the - // authorization was created as a result of an order containing a name with - // a `*.`wildcard prefix. This will help convey to users that an - // Authorization with the identifier `example.com` and one DNS-01 challenge - // corresponds to a name `*.example.com` from an associated order. - Wildcard bool `json:"wildcard,omitempty" db:"-"` + // https://datatracker.ietf.org/doc/html/rfc8555#page-29 + // + // wildcard (optional, boolean): This field MUST be present and true + // for authorizations created as a result of a newOrder request + // containing a DNS identifier with a value that was a wildcard + // domain name. For other authorizations, it MUST be absent. + // Wildcard domain names are described in Section 7.1.3. + // + // This is not represented in the database because we calculate it from + // the identifier stored in the database. Unlike the identifier returned + // as part of the authorization, the identifier we store in the database + // can contain an asterisk. + Wildcard bool `json:"wildcard,omitempty"` + + // CertificateProfileName is the name of the profile associated with the + // order that first resulted in the creation of this authorization. Omitted + // from API responses. + CertificateProfileName string `json:"-"` } // FindChallengeByStringID will look for a challenge matching the given ID inside @@ -398,38 +322,25 @@ func (authz *Authorization) FindChallengeByStringID(id string) int { // SolvedBy will look through the Authorizations challenges, returning the type // of the *first* challenge it finds with Status: valid, or an error if no // challenge is valid. -func (authz *Authorization) SolvedBy() (*AcmeChallenge, error) { +func (authz *Authorization) SolvedBy() (AcmeChallenge, error) { if len(authz.Challenges) == 0 { - return nil, fmt.Errorf("Authorization has no challenges") + return "", fmt.Errorf("authorization has no challenges") } for _, chal := range authz.Challenges { if chal.Status == StatusValid { - return &chal.Type, nil + return chal.Type, nil } } - return nil, fmt.Errorf("Authorization not solved by any challenge") + return "", fmt.Errorf("authorization not solved by any challenge") } // JSONBuffer fields get encoded and decoded JOSE-style, in base64url encoding // with stripped padding. type JSONBuffer []byte -// URL-safe base64 encode that strips padding -func base64URLEncode(data []byte) string { - var result = base64.URLEncoding.EncodeToString(data) - return strings.TrimRight(result, "=") -} - -// URL-safe base64 decoder that adds padding -func base64URLDecode(data string) ([]byte, error) { - var missing = (4 - len(data)%4) % 4 - data += strings.Repeat("=", missing) - return base64.URLEncoding.DecodeString(data) -} - // MarshalJSON encodes a JSONBuffer for transmission. func (jb JSONBuffer) MarshalJSON() (result []byte, err error) { - return json.Marshal(base64URLEncode(jb)) + return json.Marshal(base64.RawURLEncoding.EncodeToString(jb)) } // UnmarshalJSON decodes a JSONBuffer to an object. @@ -439,7 +350,7 @@ func (jb *JSONBuffer) UnmarshalJSON(data []byte) (err error) { if err != nil { return err } - *jb, err = base64URLDecode(str) + *jb, err = base64.RawURLEncoding.DecodeString(strings.TrimRight(str, "=")) return } @@ -457,63 +368,46 @@ type Certificate struct { } // CertificateStatus structs are internal to the server. They represent the -// latest data about the status of the certificate, required for OCSP updating -// and for validating that the subscriber has accepted the certificate. +// latest data about the status of the certificate, required for generating new +// OCSP responses and determining if a certificate has been revoked. type CertificateStatus struct { ID int64 `db:"id"` Serial string `db:"serial"` // status: 'good' or 'revoked'. Note that good, expired certificates remain - // with status 'good' but don't necessarily get fresh OCSP responses. + // with status 'good' but don't necessarily get fresh OCSP responses. Status OCSPStatus `db:"status"` // ocspLastUpdated: The date and time of the last time we generated an OCSP - // response. If we have never generated one, this has the zero value of - // time.Time, i.e. Jan 1 1970. + // response. If we have never generated one, this has the zero value of + // time.Time, i.e. Jan 1 1970. OCSPLastUpdated time.Time `db:"ocspLastUpdated"` // revokedDate: If status is 'revoked', this is the date and time it was - // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970. + // revoked. Otherwise it has the zero value of time.Time, i.e. Jan 1 1970. RevokedDate time.Time `db:"revokedDate"` // revokedReason: If status is 'revoked', this is the reason code for the - // revocation. Otherwise it is zero (which happens to be the reason - // code for 'unspecified'). + // revocation. Otherwise it is zero (which happens to be the reason + // code for 'unspecified'). RevokedReason revocation.Reason `db:"revokedReason"` LastExpirationNagSent time.Time `db:"lastExpirationNagSent"` - // The encoded and signed OCSP response. - OCSPResponse []byte `db:"ocspResponse"` - - // For performance reasons[0] we duplicate the `Expires` field of the - // `Certificates` object/table in `CertificateStatus` to avoid a costly `JOIN` - // later on just to retrieve this `Time` value. This helps both the OCSP - // updater and the expiration-mailer stay performant. - // - // Similarly, we add an explicit `IsExpired` boolean to `CertificateStatus` - // table that the OCSP updater so that the database can create a meaningful - // index on `(isExpired, ocspLastUpdated)` without a `JOIN` on `certificates`. - // For more detail see Boulder #1864[0]. - // - // [0]: https://github.com/letsencrypt/boulder/issues/1864 + // NotAfter and IsExpired are convenience columns which allow expensive + // queries to quickly filter out certificates that we don't need to care + // about anymore. These are particularly useful for the CRL updater. See + // https://github.com/letsencrypt/boulder/issues/1864. NotAfter time.Time `db:"notAfter"` IsExpired bool `db:"isExpired"` - // TODO(#5152): Change this to an issuance.Issuer(Name)ID after it no longer - // has to support both IssuerNameIDs and IssuerIDs. - IssuerID int64 -} - -// FQDNSet contains the SHA256 hash of the lowercased, comma joined dNSNames -// contained in a certificate. -type FQDNSet struct { - ID int64 - SetHash []byte - Serial string - Issued time.Time - Expires time.Time + // Note: this is not an issuance.IssuerNameID because that would create an + // import cycle between core and issuance. + // Note2: This field used to be called `issuerID`. We keep the old name in + // the DB, but update the Go field name to be clear which type of ID this + // is. + IssuerNameID int64 `db:"issuerID"` } // SCTDERs is a convenience type @@ -529,8 +423,53 @@ type SuggestedWindow struct { End time.Time `json:"end"` } +// IsWithin returns true if the given time is within the suggested window, +// inclusive of the start time and exclusive of the end time. +func (window SuggestedWindow) IsWithin(now time.Time) bool { + return !now.Before(window.Start) && now.Before(window.End) +} + // RenewalInfo is a type which is exposed to clients which query the renewalInfo // endpoint specified in draft-aaron-ari. type RenewalInfo struct { SuggestedWindow SuggestedWindow `json:"suggestedWindow"` + ExplanationURL string `json:"explanationURL,omitempty"` +} + +// RenewalInfoSimple constructs a `RenewalInfo` object and suggested window +// using a very simple renewal calculation: calculate a point 2/3rds of the way +// through the validity period (or halfway through, for short-lived certs), then +// give a 2%-of-validity wide window around that. Both the `issued` and +// `expires` timestamps are expected to be UTC. +func RenewalInfoSimple(issued time.Time, expires time.Time) RenewalInfo { + validity := expires.Add(time.Second).Sub(issued) + renewalOffset := validity / time.Duration(3) + if validity < 10*24*time.Hour { + renewalOffset = validity / time.Duration(2) + } + idealRenewal := expires.Add(-renewalOffset) + margin := validity / time.Duration(100) + return RenewalInfo{ + SuggestedWindow: SuggestedWindow{ + Start: idealRenewal.Add(-1 * margin).Truncate(time.Second), + End: idealRenewal.Add(margin).Truncate(time.Second), + }, + } +} + +// RenewalInfoImmediate constructs a `RenewalInfo` object with a suggested +// window in the past. Per the draft-ietf-acme-ari-01 spec, clients should +// attempt to renew immediately if the suggested window is in the past. The +// passed `now` is assumed to be a timestamp representing the current moment in +// time. The `explanationURL` is an optional URL that the subscriber can use to +// learn more about why the renewal is suggested. +func RenewalInfoImmediate(now time.Time, explanationURL string) RenewalInfo { + oneHourAgo := now.Add(-1 * time.Hour) + return RenewalInfo{ + SuggestedWindow: SuggestedWindow{ + Start: oneHourAgo.Truncate(time.Second), + End: oneHourAgo.Add(time.Minute * 30).Truncate(time.Second), + }, + ExplanationURL: explanationURL, + } } diff --git a/core/objects_test.go b/core/objects_test.go index f41aa5c5555..302934a669e 100644 --- a/core/objects_test.go +++ b/core/objects_test.go @@ -4,10 +4,11 @@ import ( "crypto/rsa" "encoding/json" "math/big" - "net" + "net/netip" "testing" + "time" - "gopkg.in/square/go-jose.v2" + "github.com/go-jose/go-jose/v4" "github.com/letsencrypt/boulder/test" ) @@ -32,14 +33,15 @@ func TestExpectedKeyAuthorization(t *testing.T) { } } -func TestRecordSanityCheckOnUnsupportChallengeType(t *testing.T) { +func TestRecordSanityCheckOnUnsupportedChallengeType(t *testing.T) { rec := []ValidationRecord{ { URL: "http://localhost/test", Hostname: "localhost", Port: "80", - AddressesResolved: []net.IP{{127, 0, 0, 1}}, - AddressUsed: net.IP{127, 0, 0, 1}, + AddressesResolved: []netip.Addr{netip.MustParseAddr("127.0.0.1")}, + AddressUsed: netip.MustParseAddr("127.0.0.1"), + ResolverAddrs: []string{"eastUnboundAndDown"}, }, } @@ -57,30 +59,20 @@ func TestChallengeSanityCheck(t *testing.T) { }`), &accountKey) test.AssertNotError(t, err, "Error unmarshaling JWK") - types := []AcmeChallenge{ChallengeTypeHTTP01, ChallengeTypeDNS01, ChallengeTypeTLSALPN01} + types := []AcmeChallenge{ChallengeTypeHTTP01, ChallengeTypeDNS01, ChallengeTypeTLSALPN01, ChallengeTypeDNSAccount01} for _, challengeType := range types { chall := Challenge{ Type: challengeType, Status: StatusInvalid, } - test.AssertError(t, chall.CheckConsistencyForClientOffer(), "CheckConsistencyForClientOffer didn't return an error") + test.AssertError(t, chall.CheckPending(), "CheckConsistencyForClientOffer didn't return an error") chall.Status = StatusPending - test.AssertError(t, chall.CheckConsistencyForClientOffer(), "CheckConsistencyForClientOffer didn't return an error") + test.AssertError(t, chall.CheckPending(), "CheckConsistencyForClientOffer didn't return an error") chall.Token = "KQqLsiS5j0CONR_eUXTUSUDNVaHODtc-0pD6ACif7U4" - test.AssertNotError(t, chall.CheckConsistencyForClientOffer(), "CheckConsistencyForClientOffer returned an error") - - chall.ProvidedKeyAuthorization = chall.Token + ".AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" - test.AssertNotError(t, chall.CheckConsistencyForValidation(), "CheckConsistencyForValidation returned an error") - - chall.ProvidedKeyAuthorization = "aaaa.aaaa" - test.AssertError(t, chall.CheckConsistencyForValidation(), "CheckConsistencyForValidation didn't return an error") + test.AssertNotError(t, chall.CheckPending(), "CheckConsistencyForClientOffer returned an error") } - - chall := Challenge{Type: "bogus", Status: StatusPending} - test.AssertError(t, chall.CheckConsistencyForClientOffer(), "CheckConsistencyForClientOffer didn't return an error") - test.AssertError(t, chall.CheckConsistencyForValidation(), "CheckConsistencyForValidation didn't return an error") } func TestJSONBufferUnmarshal(t *testing.T) { @@ -108,7 +100,7 @@ func TestAuthorizationSolvedBy(t *testing.T) { { Name: "No challenges", Authz: Authorization{}, - ExpectedError: "Authorization has no challenges", + ExpectedError: "authorization has no challenges", }, // An authz with all non-valid challenges should return nil { @@ -116,7 +108,7 @@ func TestAuthorizationSolvedBy(t *testing.T) { Authz: Authorization{ Challenges: []Challenge{HTTPChallenge01(""), DNSChallenge01("")}, }, - ExpectedError: "Authorization not solved by any challenge", + ExpectedError: "authorization not solved by any challenge", }, // An authz with one valid HTTP01 challenge amongst other challenges should // return the HTTP01 challenge @@ -146,7 +138,7 @@ func TestAuthorizationSolvedBy(t *testing.T) { test.AssertEquals(t, err.Error(), tc.ExpectedError) } if tc.ExpectedResult != "" { - test.AssertEquals(t, *result, tc.ExpectedResult) + test.AssertEquals(t, result, tc.ExpectedResult) } }) } @@ -160,6 +152,8 @@ func TestChallengeStringID(t *testing.T) { test.AssertEquals(t, ch.StringID(), "iFVMwA") ch.Type = ChallengeTypeHTTP01 test.AssertEquals(t, ch.StringID(), "0Gexug") + ch.Type = ChallengeTypeDNSAccount01 + test.AssertEquals(t, ch.StringID(), "8z2wSg") } func TestFindChallengeByType(t *testing.T) { @@ -173,3 +167,26 @@ func TestFindChallengeByType(t *testing.T) { test.AssertEquals(t, 1, authz.FindChallengeByStringID(authz.Challenges[1].StringID())) test.AssertEquals(t, -1, authz.FindChallengeByStringID("hello")) } + +func TestRenewalInfoSuggestedWindowIsWithin(t *testing.T) { + now := time.Now().UTC() + window := SuggestedWindow{ + Start: now, + End: now.Add(time.Hour), + } + + // Exactly the beginning, inclusive of the first nanosecond. + test.Assert(t, window.IsWithin(now), "Start of window should be within the window") + + // Exactly the middle. + test.Assert(t, window.IsWithin(now.Add(time.Minute*30)), "Middle of window should be within the window") + + // Exactly the end time. + test.Assert(t, !window.IsWithin(now.Add(time.Hour)), "End of window should be outside the window") + + // Exactly the end of the window. + test.Assert(t, window.IsWithin(now.Add(time.Hour-time.Nanosecond)), "Should be just inside the window") + + // Just before the first nanosecond. + test.Assert(t, !window.IsWithin(now.Add(-time.Nanosecond)), "Before the window should not be within the window") +} diff --git a/core/proto/core.pb.go b/core/proto/core.pb.go index 3a9cc1036e9..be9431bbad5 100644 --- a/core/proto/core.pb.go +++ b/core/proto/core.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.6 +// protoc-gen-go v1.36.5 +// protoc v3.20.1 // source: core.proto package proto @@ -9,8 +9,10 @@ package proto import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -20,29 +22,80 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type Challenge struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type Identifier struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Identifier) Reset() { + *x = Identifier{} + mi := &file_core_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Identifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identifier) ProtoMessage() {} - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` - Status string `protobuf:"bytes,6,opt,name=status,proto3" json:"status,omitempty"` - Uri string `protobuf:"bytes,9,opt,name=uri,proto3" json:"uri,omitempty"` - Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` - KeyAuthorization string `protobuf:"bytes,5,opt,name=keyAuthorization,proto3" json:"keyAuthorization,omitempty"` +func (x *Identifier) ProtoReflect() protoreflect.Message { + mi := &file_core_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identifier.ProtoReflect.Descriptor instead. +func (*Identifier) Descriptor() ([]byte, []int) { + return file_core_proto_rawDescGZIP(), []int{0} +} + +func (x *Identifier) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Identifier) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type Challenge struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Fields specified by RFC 8555, Section 8. + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Url string `protobuf:"bytes,9,opt,name=url,proto3" json:"url,omitempty"` + Status string `protobuf:"bytes,6,opt,name=status,proto3" json:"status,omitempty"` + Validated *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=validated,proto3" json:"validated,omitempty"` + Error *ProblemDetails `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` + // Fields specified by individual validation methods. + Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` + // Additional fields for our own record keeping. Validationrecords []*ValidationRecord `protobuf:"bytes,10,rep,name=validationrecords,proto3" json:"validationrecords,omitempty"` - Error *ProblemDetails `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` - Validated int64 `protobuf:"varint,11,opt,name=validated,proto3" json:"validated,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Challenge) Reset() { *x = Challenge{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Challenge) String() string { @@ -52,8 +105,8 @@ func (x *Challenge) String() string { func (*Challenge) ProtoMessage() {} func (x *Challenge) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_core_proto_msgTypes[1] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -65,7 +118,7 @@ func (x *Challenge) ProtoReflect() protoreflect.Message { // Deprecated: Use Challenge.ProtoReflect.Descriptor instead. func (*Challenge) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{0} + return file_core_proto_rawDescGZIP(), []int{1} } func (x *Challenge) GetId() int64 { @@ -82,79 +135,71 @@ func (x *Challenge) GetType() string { return "" } -func (x *Challenge) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *Challenge) GetUri() string { +func (x *Challenge) GetUrl() string { if x != nil { - return x.Uri + return x.Url } return "" } -func (x *Challenge) GetToken() string { +func (x *Challenge) GetStatus() string { if x != nil { - return x.Token + return x.Status } return "" } -func (x *Challenge) GetKeyAuthorization() string { +func (x *Challenge) GetValidated() *timestamppb.Timestamp { if x != nil { - return x.KeyAuthorization + return x.Validated } - return "" + return nil } -func (x *Challenge) GetValidationrecords() []*ValidationRecord { +func (x *Challenge) GetError() *ProblemDetails { if x != nil { - return x.Validationrecords + return x.Error } return nil } -func (x *Challenge) GetError() *ProblemDetails { +func (x *Challenge) GetToken() string { if x != nil { - return x.Error + return x.Token } - return nil + return "" } -func (x *Challenge) GetValidated() int64 { +func (x *Challenge) GetValidationrecords() []*ValidationRecord { if x != nil { - return x.Validated + return x.Validationrecords } - return 0 + return nil } type ValidationRecord struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 9 Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` Port string `protobuf:"bytes,2,opt,name=port,proto3" json:"port,omitempty"` - AddressesResolved [][]byte `protobuf:"bytes,3,rep,name=addressesResolved,proto3" json:"addressesResolved,omitempty"` // net.IP.MarshalText() - AddressUsed []byte `protobuf:"bytes,4,opt,name=addressUsed,proto3" json:"addressUsed,omitempty"` // net.IP.MarshalText() + AddressesResolved [][]byte `protobuf:"bytes,3,rep,name=addressesResolved,proto3" json:"addressesResolved,omitempty"` // netip.Addr.MarshalText() + AddressUsed []byte `protobuf:"bytes,4,opt,name=addressUsed,proto3" json:"addressUsed,omitempty"` // netip.Addr.MarshalText() Authorities []string `protobuf:"bytes,5,rep,name=authorities,proto3" json:"authorities,omitempty"` Url string `protobuf:"bytes,6,opt,name=url,proto3" json:"url,omitempty"` // A list of addresses tried before the address used (see // core/objects.go and the comment on the ValidationRecord structure // definition for more information. - AddressesTried [][]byte `protobuf:"bytes,7,rep,name=addressesTried,proto3" json:"addressesTried,omitempty"` // net.IP.MarshalText() + AddressesTried [][]byte `protobuf:"bytes,7,rep,name=addressesTried,proto3" json:"addressesTried,omitempty"` // netip.Addr.MarshalText() + ResolverAddrs []string `protobuf:"bytes,8,rep,name=resolverAddrs,proto3" json:"resolverAddrs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ValidationRecord) Reset() { *x = ValidationRecord{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ValidationRecord) String() string { @@ -164,8 +209,8 @@ func (x *ValidationRecord) String() string { func (*ValidationRecord) ProtoMessage() {} func (x *ValidationRecord) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_core_proto_msgTypes[2] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -177,7 +222,7 @@ func (x *ValidationRecord) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidationRecord.ProtoReflect.Descriptor instead. func (*ValidationRecord) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{1} + return file_core_proto_rawDescGZIP(), []int{2} } func (x *ValidationRecord) GetHostname() string { @@ -229,23 +274,27 @@ func (x *ValidationRecord) GetAddressesTried() [][]byte { return nil } +func (x *ValidationRecord) GetResolverAddrs() []string { + if x != nil { + return x.ResolverAddrs + } + return nil +} + type ProblemDetails struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ProblemType string `protobuf:"bytes,1,opt,name=problemType,proto3" json:"problemType,omitempty"` + Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` + HttpStatus int32 `protobuf:"varint,3,opt,name=httpStatus,proto3" json:"httpStatus,omitempty"` unknownFields protoimpl.UnknownFields - - ProblemType string `protobuf:"bytes,1,opt,name=problemType,proto3" json:"problemType,omitempty"` - Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` - HttpStatus int32 `protobuf:"varint,3,opt,name=httpStatus,proto3" json:"httpStatus,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ProblemDetails) Reset() { *x = ProblemDetails{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ProblemDetails) String() string { @@ -255,8 +304,8 @@ func (x *ProblemDetails) String() string { func (*ProblemDetails) ProtoMessage() {} func (x *ProblemDetails) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_core_proto_msgTypes[3] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -268,7 +317,7 @@ func (x *ProblemDetails) ProtoReflect() protoreflect.Message { // Deprecated: Use ProblemDetails.ProtoReflect.Descriptor instead. func (*ProblemDetails) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{2} + return file_core_proto_rawDescGZIP(), []int{3} } func (x *ProblemDetails) GetProblemType() string { @@ -293,25 +342,23 @@ func (x *ProblemDetails) GetHttpStatus() int32 { } type Certificate struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"` - Digest string `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"` - Der []byte `protobuf:"bytes,4,opt,name=der,proto3" json:"der,omitempty"` - Issued int64 `protobuf:"varint,5,opt,name=issued,proto3" json:"issued,omitempty"` // Unix timestamp (nanoseconds) - Expires int64 `protobuf:"varint,6,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds) + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 9 + RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"` + Digest string `protobuf:"bytes,3,opt,name=digest,proto3" json:"digest,omitempty"` + Der []byte `protobuf:"bytes,4,opt,name=der,proto3" json:"der,omitempty"` + Issued *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=issued,proto3" json:"issued,omitempty"` + Expires *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=expires,proto3" json:"expires,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Certificate) Reset() { *x = Certificate{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Certificate) String() string { @@ -321,8 +368,8 @@ func (x *Certificate) String() string { func (*Certificate) ProtoMessage() {} func (x *Certificate) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_core_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -334,7 +381,7 @@ func (x *Certificate) ProtoReflect() protoreflect.Message { // Deprecated: Use Certificate.ProtoReflect.Descriptor instead. func (*Certificate) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{3} + return file_core_proto_rawDescGZIP(), []int{4} } func (x *Certificate) GetRegistrationID() int64 { @@ -365,44 +412,41 @@ func (x *Certificate) GetDer() []byte { return nil } -func (x *Certificate) GetIssued() int64 { +func (x *Certificate) GetIssued() *timestamppb.Timestamp { if x != nil { return x.Issued } - return 0 + return nil } -func (x *Certificate) GetExpires() int64 { +func (x *Certificate) GetExpires() *timestamppb.Timestamp { if x != nil { return x.Expires } - return 0 + return nil } type CertificateStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` - Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` - OcspLastUpdated int64 `protobuf:"varint,4,opt,name=ocspLastUpdated,proto3" json:"ocspLastUpdated,omitempty"` - RevokedDate int64 `protobuf:"varint,5,opt,name=revokedDate,proto3" json:"revokedDate,omitempty"` - RevokedReason int64 `protobuf:"varint,6,opt,name=revokedReason,proto3" json:"revokedReason,omitempty"` - LastExpirationNagSent int64 `protobuf:"varint,7,opt,name=lastExpirationNagSent,proto3" json:"lastExpirationNagSent,omitempty"` - OcspResponse []byte `protobuf:"bytes,8,opt,name=ocspResponse,proto3" json:"ocspResponse,omitempty"` - NotAfter int64 `protobuf:"varint,9,opt,name=notAfter,proto3" json:"notAfter,omitempty"` - IsExpired bool `protobuf:"varint,10,opt,name=isExpired,proto3" json:"isExpired,omitempty"` - IssuerID int64 `protobuf:"varint,11,opt,name=issuerID,proto3" json:"issuerID,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 16 + Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + OcspLastUpdated *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=ocspLastUpdated,proto3" json:"ocspLastUpdated,omitempty"` + RevokedDate *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=revokedDate,proto3" json:"revokedDate,omitempty"` + RevokedReason int64 `protobuf:"varint,6,opt,name=revokedReason,proto3" json:"revokedReason,omitempty"` + LastExpirationNagSent *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=lastExpirationNagSent,proto3" json:"lastExpirationNagSent,omitempty"` + NotAfter *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=notAfter,proto3" json:"notAfter,omitempty"` + IsExpired bool `protobuf:"varint,10,opt,name=isExpired,proto3" json:"isExpired,omitempty"` + IssuerID int64 `protobuf:"varint,11,opt,name=issuerID,proto3" json:"issuerID,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CertificateStatus) Reset() { *x = CertificateStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CertificateStatus) String() string { @@ -412,8 +456,8 @@ func (x *CertificateStatus) String() string { func (*CertificateStatus) ProtoMessage() {} func (x *CertificateStatus) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_core_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -425,7 +469,7 @@ func (x *CertificateStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use CertificateStatus.ProtoReflect.Descriptor instead. func (*CertificateStatus) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{4} + return file_core_proto_rawDescGZIP(), []int{5} } func (x *CertificateStatus) GetSerial() string { @@ -442,18 +486,18 @@ func (x *CertificateStatus) GetStatus() string { return "" } -func (x *CertificateStatus) GetOcspLastUpdated() int64 { +func (x *CertificateStatus) GetOcspLastUpdated() *timestamppb.Timestamp { if x != nil { return x.OcspLastUpdated } - return 0 + return nil } -func (x *CertificateStatus) GetRevokedDate() int64 { +func (x *CertificateStatus) GetRevokedDate() *timestamppb.Timestamp { if x != nil { return x.RevokedDate } - return 0 + return nil } func (x *CertificateStatus) GetRevokedReason() int64 { @@ -463,25 +507,18 @@ func (x *CertificateStatus) GetRevokedReason() int64 { return 0 } -func (x *CertificateStatus) GetLastExpirationNagSent() int64 { +func (x *CertificateStatus) GetLastExpirationNagSent() *timestamppb.Timestamp { if x != nil { return x.LastExpirationNagSent } - return 0 -} - -func (x *CertificateStatus) GetOcspResponse() []byte { - if x != nil { - return x.OcspResponse - } return nil } -func (x *CertificateStatus) GetNotAfter() int64 { +func (x *CertificateStatus) GetNotAfter() *timestamppb.Timestamp { if x != nil { return x.NotAfter } - return 0 + return nil } func (x *CertificateStatus) GetIsExpired() bool { @@ -499,27 +536,22 @@ func (x *CertificateStatus) GetIssuerID() int64 { } type Registration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 10 + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Agreement string `protobuf:"bytes,5,opt,name=agreement,proto3" json:"agreement,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=createdAt,proto3" json:"createdAt,omitempty"` + Status string `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"` unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Contact []string `protobuf:"bytes,3,rep,name=contact,proto3" json:"contact,omitempty"` - ContactsPresent bool `protobuf:"varint,4,opt,name=contactsPresent,proto3" json:"contactsPresent,omitempty"` - Agreement string `protobuf:"bytes,5,opt,name=agreement,proto3" json:"agreement,omitempty"` - InitialIP []byte `protobuf:"bytes,6,opt,name=initialIP,proto3" json:"initialIP,omitempty"` - CreatedAt int64 `protobuf:"varint,7,opt,name=createdAt,proto3" json:"createdAt,omitempty"` // Unix timestamp (nanoseconds) - Status string `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Registration) Reset() { *x = Registration{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Registration) String() string { @@ -529,8 +561,8 @@ func (x *Registration) String() string { func (*Registration) ProtoMessage() {} func (x *Registration) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_core_proto_msgTypes[6] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -542,7 +574,7 @@ func (x *Registration) ProtoReflect() protoreflect.Message { // Deprecated: Use Registration.ProtoReflect.Descriptor instead. func (*Registration) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{5} + return file_core_proto_rawDescGZIP(), []int{6} } func (x *Registration) GetId() int64 { @@ -559,20 +591,6 @@ func (x *Registration) GetKey() []byte { return nil } -func (x *Registration) GetContact() []string { - if x != nil { - return x.Contact - } - return nil -} - -func (x *Registration) GetContactsPresent() bool { - if x != nil { - return x.ContactsPresent - } - return false -} - func (x *Registration) GetAgreement() string { if x != nil { return x.Agreement @@ -580,18 +598,11 @@ func (x *Registration) GetAgreement() string { return "" } -func (x *Registration) GetInitialIP() []byte { - if x != nil { - return x.InitialIP - } - return nil -} - -func (x *Registration) GetCreatedAt() int64 { +func (x *Registration) GetCreatedAt() *timestamppb.Timestamp { if x != nil { return x.CreatedAt } - return 0 + return nil } func (x *Registration) GetStatus() string { @@ -602,25 +613,23 @@ func (x *Registration) GetStatus() string { } type Authorization struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Identifier string `protobuf:"bytes,2,opt,name=identifier,proto3" json:"identifier,omitempty"` - RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` - Expires int64 `protobuf:"varint,5,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds) - Challenges []*Challenge `protobuf:"bytes,6,rep,name=challenges,proto3" json:"challenges,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + Identifier *Identifier `protobuf:"bytes,11,opt,name=identifier,proto3" json:"identifier,omitempty"` + Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` + Expires *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=expires,proto3" json:"expires,omitempty"` + Challenges []*Challenge `protobuf:"bytes,6,rep,name=challenges,proto3" json:"challenges,omitempty"` + CertificateProfileName string `protobuf:"bytes,10,opt,name=certificateProfileName,proto3" json:"certificateProfileName,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Authorization) Reset() { *x = Authorization{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Authorization) String() string { @@ -630,8 +639,8 @@ func (x *Authorization) String() string { func (*Authorization) ProtoMessage() {} func (x *Authorization) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_core_proto_msgTypes[7] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -643,7 +652,7 @@ func (x *Authorization) ProtoReflect() protoreflect.Message { // Deprecated: Use Authorization.ProtoReflect.Descriptor instead. func (*Authorization) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{6} + return file_core_proto_rawDescGZIP(), []int{7} } func (x *Authorization) GetId() string { @@ -653,18 +662,18 @@ func (x *Authorization) GetId() string { return "" } -func (x *Authorization) GetIdentifier() string { +func (x *Authorization) GetRegistrationID() int64 { if x != nil { - return x.Identifier + return x.RegistrationID } - return "" + return 0 } -func (x *Authorization) GetRegistrationID() int64 { +func (x *Authorization) GetIdentifier() *Identifier { if x != nil { - return x.RegistrationID + return x.Identifier } - return 0 + return nil } func (x *Authorization) GetStatus() string { @@ -674,11 +683,11 @@ func (x *Authorization) GetStatus() string { return "" } -func (x *Authorization) GetExpires() int64 { +func (x *Authorization) GetExpires() *timestamppb.Timestamp { if x != nil { return x.Expires } - return 0 + return nil } func (x *Authorization) GetChallenges() []*Challenge { @@ -688,30 +697,40 @@ func (x *Authorization) GetChallenges() []*Challenge { return nil } -type Order struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *Authorization) GetCertificateProfileName() string { + if x != nil { + return x.CertificateProfileName + } + return "" +} - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Expires int64 `protobuf:"varint,3,opt,name=expires,proto3" json:"expires,omitempty"` - Error *ProblemDetails `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` - CertificateSerial string `protobuf:"bytes,5,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"` - Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"` - Names []string `protobuf:"bytes,8,rep,name=names,proto3" json:"names,omitempty"` - BeganProcessing bool `protobuf:"varint,9,opt,name=beganProcessing,proto3" json:"beganProcessing,omitempty"` - Created int64 `protobuf:"varint,10,opt,name=created,proto3" json:"created,omitempty"` - V2Authorizations []int64 `protobuf:"varint,11,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"` +type Order struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + // Fields specified by RFC 8555, Section 7.1.3 + // Note that we do not respect notBefore and notAfter, and we infer the + // finalize and certificate URLs from the id and certificateSerial fields. + Status string `protobuf:"bytes,7,opt,name=status,proto3" json:"status,omitempty"` + Expires *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=expires,proto3" json:"expires,omitempty"` + Identifiers []*Identifier `protobuf:"bytes,16,rep,name=identifiers,proto3" json:"identifiers,omitempty"` + Error *ProblemDetails `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` + V2Authorizations []int64 `protobuf:"varint,11,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"` + CertificateSerial string `protobuf:"bytes,5,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"` + // Additional fields for our own record-keeping. + Created *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=created,proto3" json:"created,omitempty"` + CertificateProfileName string `protobuf:"bytes,14,opt,name=certificateProfileName,proto3" json:"certificateProfileName,omitempty"` + Replaces string `protobuf:"bytes,15,opt,name=replaces,proto3" json:"replaces,omitempty"` + BeganProcessing bool `protobuf:"varint,9,opt,name=beganProcessing,proto3" json:"beganProcessing,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Order) Reset() { *x = Order{} - if protoimpl.UnsafeEnabled { - mi := &file_core_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_core_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Order) String() string { @@ -721,8 +740,8 @@ func (x *Order) String() string { func (*Order) ProtoMessage() {} func (x *Order) ProtoReflect() protoreflect.Message { - mi := &file_core_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_core_proto_msgTypes[8] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -734,7 +753,7 @@ func (x *Order) ProtoReflect() protoreflect.Message { // Deprecated: Use Order.ProtoReflect.Descriptor instead. func (*Order) Descriptor() ([]byte, []int) { - return file_core_proto_rawDescGZIP(), []int{7} + return file_core_proto_rawDescGZIP(), []int{8} } func (x *Order) GetId() int64 { @@ -751,11 +770,25 @@ func (x *Order) GetRegistrationID() int64 { return 0 } -func (x *Order) GetExpires() int64 { +func (x *Order) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *Order) GetExpires() *timestamppb.Timestamp { if x != nil { return x.Expires } - return 0 + return nil +} + +func (x *Order) GetIdentifiers() []*Identifier { + if x != nil { + return x.Identifiers + } + return nil } func (x *Order) GetError() *ProblemDetails { @@ -765,6 +798,13 @@ func (x *Order) GetError() *ProblemDetails { return nil } +func (x *Order) GetV2Authorizations() []int64 { + if x != nil { + return x.V2Authorizations + } + return nil +} + func (x *Order) GetCertificateSerial() string { if x != nil { return x.CertificateSerial @@ -772,18 +812,25 @@ func (x *Order) GetCertificateSerial() string { return "" } -func (x *Order) GetStatus() string { +func (x *Order) GetCreated() *timestamppb.Timestamp { if x != nil { - return x.Status + return x.Created + } + return nil +} + +func (x *Order) GetCertificateProfileName() string { + if x != nil { + return x.CertificateProfileName } return "" } -func (x *Order) GetNames() []string { +func (x *Order) GetReplaces() string { if x != nil { - return x.Names + return x.Replaces } - return nil + return "" } func (x *Order) GetBeganProcessing() bool { @@ -793,187 +840,294 @@ func (x *Order) GetBeganProcessing() bool { return false } -func (x *Order) GetCreated() int64 { +type CRLEntry struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 5 + Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` + Reason int32 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"` + RevokedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=revokedAt,proto3" json:"revokedAt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CRLEntry) Reset() { + *x = CRLEntry{} + mi := &file_core_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CRLEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CRLEntry) ProtoMessage() {} + +func (x *CRLEntry) ProtoReflect() protoreflect.Message { + mi := &file_core_proto_msgTypes[9] if x != nil { - return x.Created + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CRLEntry.ProtoReflect.Descriptor instead. +func (*CRLEntry) Descriptor() ([]byte, []int) { + return file_core_proto_rawDescGZIP(), []int{9} +} + +func (x *CRLEntry) GetSerial() string { + if x != nil { + return x.Serial + } + return "" +} + +func (x *CRLEntry) GetReason() int32 { + if x != nil { + return x.Reason } return 0 } -func (x *Order) GetV2Authorizations() []int64 { +func (x *CRLEntry) GetRevokedAt() *timestamppb.Timestamp { if x != nil { - return x.V2Authorizations + return x.RevokedAt } return nil } var File_core_proto protoreflect.FileDescriptor -var file_core_proto_rawDesc = []byte{ +var file_core_proto_rawDesc = string([]byte{ 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x63, 0x6f, - 0x72, 0x65, 0x22, 0xab, 0x02, 0x0a, 0x09, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03, - 0x75, 0x72, 0x69, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x14, - 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, - 0x6b, 0x65, 0x79, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x44, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x72, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, + 0x72, 0x65, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x36, 0x0a, 0x0a, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb3, 0x02, 0x0a, 0x09, + 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x03, 0x75, 0x72, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x12, 0x44, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, + 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x0b, 0x10, + 0x0c, 0x22, 0x94, 0x02, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, + 0x6c, 0x76, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x55, + 0x73, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, 0x65, 0x64, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0c, 0x52, 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, + 0x65, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x41, 0x64, + 0x64, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x6c, + 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x73, 0x22, 0x6a, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x62, + 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, + 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x12, 0x1e, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x22, 0xed, 0x01, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x32, + 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, + 0x65, 0x64, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, + 0x08, 0x06, 0x10, 0x07, 0x22, 0xd5, 0x03, 0x0a, 0x11, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x0f, 0x6f, 0x63, + 0x73, 0x70, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x0f, 0x6f, 0x63, 0x73, 0x70, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x0b, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x24, + 0x0a, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, + 0x65, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, + 0x0a, 0x09, 0x69, 0x73, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x69, 0x73, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, + 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xb8, 0x01, 0x0a, + 0x0c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x1c, 0x0a, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, + 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4a, + 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x06, 0x10, + 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0xc8, 0x02, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x44, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, + 0x73, 0x12, 0x2f, 0x0a, 0x0a, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x68, 0x61, + 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, + 0x65, 0x73, 0x12, 0x36, 0x0a, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, + 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x02, + 0x10, 0x03, 0x22, 0x93, 0x04, 0x0a, 0x05, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a, 0x0e, + 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x07, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x64, - 0x22, 0xee, 0x01, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x11, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, - 0x76, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x55, 0x73, - 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, 0x65, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x54, 0x72, 0x69, 0x65, - 0x64, 0x22, 0x6a, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x62, 0x6c, 0x65, - 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x1e, 0x0a, - 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xa9, 0x01, - 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, - 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, - 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, - 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12, - 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x22, 0xeb, 0x02, 0x0a, 0x11, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x28, 0x0a, 0x0f, 0x6f, 0x63, 0x73, 0x70, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x6f, 0x63, 0x73, 0x70, 0x4c, 0x61, - 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x76, - 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, - 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x72, - 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, - 0x6e, 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x61, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f, - 0x63, 0x73, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6e, - 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e, - 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x45, 0x78, 0x70, - 0x69, 0x72, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x45, 0x78, - 0x70, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, - 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, - 0x44, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0xe6, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, - 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x12, 0x1c, - 0x0a, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x61, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x50, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x09, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0xd6, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x0a, - 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, - 0x65, 0x52, 0x0a, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x73, 0x4a, 0x04, 0x08, - 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0xd7, 0x02, 0x0a, 0x05, 0x4f, 0x72, - 0x64, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, - 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, - 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x12, 0x2c, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x28, 0x0a, + 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2c, + 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x34, 0x0a, 0x07, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x36, 0x0a, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x62, 0x65, 0x67, 0x61, 0x6e, 0x50, 0x72, 0x6f, - 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, - 0x06, 0x10, 0x07, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, - 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} + 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x0a, + 0x10, 0x0b, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x7a, 0x0a, 0x08, 0x43, 0x52, 0x4c, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, + 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x4a, 0x04, + 0x08, 0x03, 0x10, 0x04, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, + 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) var ( file_core_proto_rawDescOnce sync.Once - file_core_proto_rawDescData = file_core_proto_rawDesc + file_core_proto_rawDescData []byte ) func file_core_proto_rawDescGZIP() []byte { file_core_proto_rawDescOnce.Do(func() { - file_core_proto_rawDescData = protoimpl.X.CompressGZIP(file_core_proto_rawDescData) + file_core_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_core_proto_rawDesc), len(file_core_proto_rawDesc))) }) return file_core_proto_rawDescData } -var file_core_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_core_proto_goTypes = []interface{}{ - (*Challenge)(nil), // 0: core.Challenge - (*ValidationRecord)(nil), // 1: core.ValidationRecord - (*ProblemDetails)(nil), // 2: core.ProblemDetails - (*Certificate)(nil), // 3: core.Certificate - (*CertificateStatus)(nil), // 4: core.CertificateStatus - (*Registration)(nil), // 5: core.Registration - (*Authorization)(nil), // 6: core.Authorization - (*Order)(nil), // 7: core.Order +var file_core_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_core_proto_goTypes = []any{ + (*Identifier)(nil), // 0: core.Identifier + (*Challenge)(nil), // 1: core.Challenge + (*ValidationRecord)(nil), // 2: core.ValidationRecord + (*ProblemDetails)(nil), // 3: core.ProblemDetails + (*Certificate)(nil), // 4: core.Certificate + (*CertificateStatus)(nil), // 5: core.CertificateStatus + (*Registration)(nil), // 6: core.Registration + (*Authorization)(nil), // 7: core.Authorization + (*Order)(nil), // 8: core.Order + (*CRLEntry)(nil), // 9: core.CRLEntry + (*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp } var file_core_proto_depIdxs = []int32{ - 1, // 0: core.Challenge.validationrecords:type_name -> core.ValidationRecord - 2, // 1: core.Challenge.error:type_name -> core.ProblemDetails - 0, // 2: core.Authorization.challenges:type_name -> core.Challenge - 2, // 3: core.Order.error:type_name -> core.ProblemDetails - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name + 10, // 0: core.Challenge.validated:type_name -> google.protobuf.Timestamp + 3, // 1: core.Challenge.error:type_name -> core.ProblemDetails + 2, // 2: core.Challenge.validationrecords:type_name -> core.ValidationRecord + 10, // 3: core.Certificate.issued:type_name -> google.protobuf.Timestamp + 10, // 4: core.Certificate.expires:type_name -> google.protobuf.Timestamp + 10, // 5: core.CertificateStatus.ocspLastUpdated:type_name -> google.protobuf.Timestamp + 10, // 6: core.CertificateStatus.revokedDate:type_name -> google.protobuf.Timestamp + 10, // 7: core.CertificateStatus.lastExpirationNagSent:type_name -> google.protobuf.Timestamp + 10, // 8: core.CertificateStatus.notAfter:type_name -> google.protobuf.Timestamp + 10, // 9: core.Registration.createdAt:type_name -> google.protobuf.Timestamp + 0, // 10: core.Authorization.identifier:type_name -> core.Identifier + 10, // 11: core.Authorization.expires:type_name -> google.protobuf.Timestamp + 1, // 12: core.Authorization.challenges:type_name -> core.Challenge + 10, // 13: core.Order.expires:type_name -> google.protobuf.Timestamp + 0, // 14: core.Order.identifiers:type_name -> core.Identifier + 3, // 15: core.Order.error:type_name -> core.ProblemDetails + 10, // 16: core.Order.created:type_name -> google.protobuf.Timestamp + 10, // 17: core.CRLEntry.revokedAt:type_name -> google.protobuf.Timestamp + 18, // [18:18] is the sub-list for method output_type + 18, // [18:18] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name } func init() { file_core_proto_init() } @@ -981,111 +1135,13 @@ func file_core_proto_init() { if File_core_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_core_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Challenge); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidationRecord); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProblemDetails); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Certificate); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CertificateStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Registration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authorization); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_core_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Order); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_core_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_core_proto_rawDesc), len(file_core_proto_rawDesc)), NumEnums: 0, - NumMessages: 8, + NumMessages: 10, NumExtensions: 0, NumServices: 0, }, @@ -1094,7 +1150,6 @@ func file_core_proto_init() { MessageInfos: file_core_proto_msgTypes, }.Build() File_core_proto = out.File - file_core_proto_rawDesc = nil file_core_proto_goTypes = nil file_core_proto_depIdxs = nil } diff --git a/core/proto/core.proto b/core/proto/core.proto index 06abe5e99ed..c9def4ec74c 100644 --- a/core/proto/core.proto +++ b/core/proto/core.proto @@ -3,30 +3,43 @@ syntax = "proto3"; package core; option go_package = "github.com/letsencrypt/boulder/core/proto"; +import "google/protobuf/timestamp.proto"; + +message Identifier { + string type = 1; + string value = 2; +} + message Challenge { + // Next unused field number: 13 + reserved 4, 5, 8, 11; int64 id = 1; + // Fields specified by RFC 8555, Section 8. string type = 2; + string url = 9; string status = 6; - string uri = 9; + google.protobuf.Timestamp validated = 12; + ProblemDetails error = 7; + // Fields specified by individual validation methods. string token = 3; - string keyAuthorization = 5; + // Additional fields for our own record keeping. repeated ValidationRecord validationrecords = 10; - ProblemDetails error = 7; - int64 validated = 11; } message ValidationRecord { + // Next unused field number: 9 string hostname = 1; string port = 2; - repeated bytes addressesResolved = 3; // net.IP.MarshalText() - bytes addressUsed = 4; // net.IP.MarshalText() + repeated bytes addressesResolved = 3; // netip.Addr.MarshalText() + bytes addressUsed = 4; // netip.Addr.MarshalText() repeated string authorities = 5; string url = 6; // A list of addresses tried before the address used (see // core/objects.go and the comment on the ValidationRecord structure // definition for more information. - repeated bytes addressesTried = 7; // net.IP.MarshalText() + repeated bytes addressesTried = 7; // netip.Addr.MarshalText() + repeated string resolverAddrs = 8; } message ProblemDetails { @@ -36,60 +49,91 @@ message ProblemDetails { } message Certificate { + // Next unused field number: 9 int64 registrationID = 1; string serial = 2; string digest = 3; bytes der = 4; - int64 issued = 5; // Unix timestamp (nanoseconds) - int64 expires = 6; // Unix timestamp (nanoseconds) + reserved 5; // Previously issuedNS + google.protobuf.Timestamp issued = 7; + reserved 6; // Previously expiresNS + google.protobuf.Timestamp expires = 8; } message CertificateStatus { + // Next unused field number: 16 string serial = 1; reserved 2; // previously subscriberApproved string status = 3; - int64 ocspLastUpdated = 4; - int64 revokedDate = 5; + reserved 4; // Previously ocspLastUpdatedNS + google.protobuf.Timestamp ocspLastUpdated = 15; + reserved 5; // Previously revokedDateNS + google.protobuf.Timestamp revokedDate = 12; int64 revokedReason = 6; - int64 lastExpirationNagSent = 7; - bytes ocspResponse = 8; - int64 notAfter = 9; + reserved 7; // Previously lastExpirationNagSentNS + reserved 8; // previously ocspResponse + google.protobuf.Timestamp lastExpirationNagSent = 13; + reserved 9; // Previously notAfterNS + google.protobuf.Timestamp notAfter = 14; bool isExpired = 10; int64 issuerID = 11; } message Registration { + // Next unused field number: 10 int64 id = 1; bytes key = 2; - repeated string contact = 3; - bool contactsPresent = 4; + reserved 3; // Previously contact + reserved 4; // Previously contactsPresent string agreement = 5; - bytes initialIP = 6; - int64 createdAt = 7; // Unix timestamp (nanoseconds) + reserved 6; // Previously initialIP + reserved 7; // Previously createdAtNS + google.protobuf.Timestamp createdAt = 9; string status = 8; } message Authorization { + // Next unused field number: 12 + reserved 5, 7, 8; string id = 1; - string identifier = 2; int64 registrationID = 3; + // Fields specified by RFC 8555, Section 7.1.4 + reserved 2; // Previously dnsName + Identifier identifier = 11; string status = 4; - int64 expires = 5; // Unix timestamp (nanoseconds) + google.protobuf.Timestamp expires = 9; repeated core.Challenge challenges = 6; - reserved 7; // previously combinations - reserved 8; // previously v2 + string certificateProfileName = 10; + // We do not directly represent the "wildcard" field, instead inferring it + // from the identifier value. } message Order { + // Next unused field number: 17 + reserved 3, 6, 10; int64 id = 1; int64 registrationID = 2; - int64 expires = 3; + // Fields specified by RFC 8555, Section 7.1.3 + // Note that we do not respect notBefore and notAfter, and we infer the + // finalize and certificate URLs from the id and certificateSerial fields. + string status = 7; + google.protobuf.Timestamp expires = 12; + reserved 8; // Previously dnsNames + repeated Identifier identifiers = 16; ProblemDetails error = 4; + repeated int64 v2Authorizations = 11; string certificateSerial = 5; - reserved 6; // previously authorizations, deprecated in favor of v2Authorizations - string status = 7; - repeated string names = 8; + // Additional fields for our own record-keeping. + google.protobuf.Timestamp created = 13; + string certificateProfileName = 14; + string replaces = 15; bool beganProcessing = 9; - int64 created = 10; - repeated int64 v2Authorizations = 11; +} + +message CRLEntry { + // Next unused field number: 5 + string serial = 1; + int32 reason = 2; + reserved 3; // Previously revokedAtNS + google.protobuf.Timestamp revokedAt = 4; } diff --git a/core/util.go b/core/util.go index 29f0d9c3dde..a85ff2817eb 100644 --- a/core/util.go +++ b/core/util.go @@ -1,9 +1,11 @@ package core import ( - "bytes" + "context" "crypto" + "crypto/ecdsa" "crypto/rand" + "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/base64" @@ -13,9 +15,10 @@ import ( "expvar" "fmt" "io" - "io/ioutil" "math/big" - mrand "math/rand" + mrand "math/rand/v2" + "os" + "path" "reflect" "regexp" "sort" @@ -23,9 +26,17 @@ import ( "time" "unicode" - jose "gopkg.in/square/go-jose.v2" + "github.com/go-jose/go-jose/v4" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/letsencrypt/boulder/identifier" ) +const Unspecified = "Unspecified" + // Package Variables Variables // BuildID is set by the compiler (using -ldflags "-X core.BuildID $(git rev-parse --short HEAD)") @@ -70,9 +81,9 @@ func NewToken() string { var tokenFormat = regexp.MustCompile(`^[\w-]{43}$`) -// LooksLikeAToken checks whether a string represents a 32-octet value in +// looksLikeAToken checks whether a string represents a 32-octet value in // the URL-safe base64 alphabet. -func LooksLikeAToken(token string) bool { +func looksLikeAToken(token string) bool { return tokenFormat.MatchString(token) } @@ -88,13 +99,12 @@ func Fingerprint256(data []byte) string { type Sha256Digest [sha256.Size]byte -// KeyDigest produces a Base64-encoded SHA256 digest of a -// provided public key. +// KeyDigest produces the SHA256 digest of a provided public key. func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) { switch t := key.(type) { case *jose.JSONWebKey: if t == nil { - return Sha256Digest{}, fmt.Errorf("Cannot compute digest of nil key") + return Sha256Digest{}, errors.New("cannot compute digest of nil key") } return KeyDigest(t.Key) case jose.JSONWebKey: @@ -130,21 +140,16 @@ func KeyDigestEquals(j, k crypto.PublicKey) bool { return digestJ == digestK } -// PublicKeysEqual determines whether two public keys have the same marshalled -// bytes as one another -func PublicKeysEqual(a, b interface{}) (bool, error) { - if a == nil || b == nil { - return false, errors.New("One or more nil arguments to PublicKeysEqual") - } - aBytes, err := x509.MarshalPKIXPublicKey(a) - if err != nil { - return false, err - } - bBytes, err := x509.MarshalPKIXPublicKey(b) - if err != nil { - return false, err +// PublicKeysEqual determines whether two public keys are identical. +func PublicKeysEqual(a, b crypto.PublicKey) (bool, error) { + switch ak := a.(type) { + case *rsa.PublicKey: + return ak.Equal(b), nil + case *ecdsa.PublicKey: + return ak.Equal(b), nil + default: + return false, fmt.Errorf("unsupported public key type %T", ak) } - return bytes.Equal(aBytes, bBytes), nil } // SerialToString converts a certificate serial number (big.Int) to a String @@ -158,7 +163,7 @@ func SerialToString(serial *big.Int) string { func StringToSerial(serial string) (*big.Int, error) { var serialNum big.Int if !ValidSerial(serial) { - return &serialNum, errors.New("Invalid serial number") + return &serialNum, fmt.Errorf("invalid serial number %q", serial) } _, err := fmt.Sscanf(serial, "%036x", &serialNum) return &serialNum, err @@ -182,7 +187,7 @@ func ValidSerial(serial string) bool { func GetBuildID() (retID string) { retID = BuildID if retID == "" { - retID = "Unspecified" + retID = Unspecified } return } @@ -191,7 +196,7 @@ func GetBuildID() (retID string) { func GetBuildTime() (retID string) { retID = BuildTime if retID == "" { - retID = "Unspecified" + retID = Unspecified } return } @@ -200,7 +205,7 @@ func GetBuildTime() (retID string) { func GetBuildHost() (retID string) { retID = BuildHost if retID == "" { - retID = "Unspecified" + retID = Unspecified } return } @@ -208,15 +213,88 @@ func GetBuildHost() (retID string) { // IsAnyNilOrZero returns whether any of the supplied values are nil, or (if not) // if any of them is its type's zero-value. This is useful for validating that // all required fields on a proto message are present. -func IsAnyNilOrZero(vals ...interface{}) bool { +func IsAnyNilOrZero(vals ...any) bool { for _, val := range vals { switch v := val.(type) { case nil: return true + case bool: + if !v { + return true + } + case string: + if v == "" { + return true + } + case []string: + if len(v) == 0 { + return true + } + case byte: + // Byte is an alias for uint8 and will cover that case. + if v == 0 { + return true + } case []byte: if len(v) == 0 { return true } + case int: + if v == 0 { + return true + } + case int8: + if v == 0 { + return true + } + case int16: + if v == 0 { + return true + } + case int32: + if v == 0 { + return true + } + case int64: + if v == 0 { + return true + } + case uint: + if v == 0 { + return true + } + case uint16: + if v == 0 { + return true + } + case uint32: + if v == 0 { + return true + } + case uint64: + if v == 0 { + return true + } + case float32: + if v == 0 { + return true + } + case float64: + if v == 0 { + return true + } + case time.Time: + if v.IsZero() { + return true + } + case *timestamppb.Timestamp: + if v == nil || v.AsTime().IsZero() { + return true + } + case *durationpb.Duration: + if v == nil || v.AsDuration() == time.Duration(0) { + return true + } default: if reflect.ValueOf(v).IsZero() { return true @@ -243,15 +321,27 @@ func UniqueLowerNames(names []string) (unique []string) { return } +// HashIdentifiers returns a hash of the identifiers requested. This is intended +// for use when interacting with the orderFqdnSets table and rate limiting. +func HashIdentifiers(idents identifier.ACMEIdentifiers) []byte { + var values []string + for _, ident := range identifier.Normalize(idents) { + values = append(values, ident.Value) + } + + hash := sha256.Sum256([]byte(strings.Join(values, ","))) + return hash[:] +} + // LoadCert loads a PEM certificate specified by filename or returns an error func LoadCert(filename string) (*x509.Certificate, error) { - certPEM, err := ioutil.ReadFile(filename) + certPEM, err := os.ReadFile(filename) if err != nil { return nil, err } block, _ := pem.Decode(certPEM) if block == nil { - return nil, fmt.Errorf("No data in cert PEM file %s", filename) + return nil, fmt.Errorf("no data in cert PEM file %q", filename) } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { @@ -296,3 +386,15 @@ func IsASCII(str string) bool { } return true } + +// IsCanceled returns true if err is non-nil and is either context.Canceled, or +// has a grpc code of Canceled. This is useful because cancellations propagate +// through gRPC boundaries, and if we choose to treat in-process cancellations a +// certain way, we usually want to treat cross-process cancellations the same way. +func IsCanceled(err error) bool { + return errors.Is(err, context.Canceled) || status.Code(err) == codes.Canceled +} + +func Command() string { + return path.Base(os.Args[0]) +} diff --git a/core/util_test.go b/core/util_test.go index 8979edff26f..8ccd459661d 100644 --- a/core/util_test.go +++ b/core/util_test.go @@ -1,18 +1,27 @@ package core import ( + "context" "encoding/json" + "errors" "fmt" "math" "math/big" + "net/netip" "os" + "slices" "sort" "strings" "testing" "time" - "gopkg.in/square/go-jose.v2" + "github.com/go-jose/go-jose/v4" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/test" ) @@ -28,7 +37,7 @@ func TestNewToken(t *testing.T) { // Test for very blatant RNG failures: // Try 2^20 birthdays in a 2^72 search space... // our naive collision probability here is 2^-32... - for i := 0; i < 1000000; i++ { + for range 1000000 { token = NewToken()[:12] // just sample a portion test.Assert(t, !collider[token], "Token collision!") collider[token] = true @@ -36,9 +45,9 @@ func TestNewToken(t *testing.T) { } func TestLooksLikeAToken(t *testing.T) { - test.Assert(t, !LooksLikeAToken("R-UL_7MrV3tUUjO9v5ym2srK3dGGCwlxbVyKBdwLOS"), "Accepted short token") - test.Assert(t, !LooksLikeAToken("R-UL_7MrV3tUUjO9v5ym2srK3dGGCwlxbVyKBdwLOS%"), "Accepted invalid token") - test.Assert(t, LooksLikeAToken("R-UL_7MrV3tUUjO9v5ym2srK3dGGCwlxbVyKBdwLOSU"), "Rejected valid token") + test.Assert(t, !looksLikeAToken("R-UL_7MrV3tUUjO9v5ym2srK3dGGCwlxbVyKBdwLOS"), "Accepted short token") + test.Assert(t, !looksLikeAToken("R-UL_7MrV3tUUjO9v5ym2srK3dGGCwlxbVyKBdwLOS%"), "Accepted invalid token") + test.Assert(t, looksLikeAToken("R-UL_7MrV3tUUjO9v5ym2srK3dGGCwlxbVyKBdwLOSU"), "Rejected valid token") } func TestSerialUtils(t *testing.T) { @@ -52,12 +61,12 @@ func TestSerialUtils(t *testing.T) { } badSerial, err := StringToSerial("doop!!!!000") - test.AssertEquals(t, fmt.Sprintf("%v", err), "Invalid serial number") + test.AssertContains(t, err.Error(), "invalid serial number") fmt.Println(badSerial) } func TestBuildID(t *testing.T) { - test.AssertEquals(t, "Unspecified", GetBuildID()) + test.AssertEquals(t, Unspecified, GetBuildID()) } const JWK1JSON = `{ @@ -114,16 +123,49 @@ func TestIsAnyNilOrZero(t *testing.T) { test.Assert(t, IsAnyNilOrZero(false), "False bool seen as non-zero") test.Assert(t, !IsAnyNilOrZero(true), "True bool seen as zero") - test.Assert(t, IsAnyNilOrZero(0), "Zero num seen as non-zero") - test.Assert(t, !IsAnyNilOrZero(uint32(5)), "Non-zero num seen as zero") - test.Assert(t, !IsAnyNilOrZero(-12.345), "Non-zero num seen as zero") + test.Assert(t, IsAnyNilOrZero(0), "Untyped constant zero seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(1), "Untyped constant 1 seen as zero") + test.Assert(t, IsAnyNilOrZero(int(0)), "int(0) seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(int(1)), "int(1) seen as zero") + test.Assert(t, IsAnyNilOrZero(int8(0)), "int8(0) seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(int8(1)), "int8(1) seen as zero") + test.Assert(t, IsAnyNilOrZero(int16(0)), "int16(0) seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(int16(1)), "int16(1) seen as zero") + test.Assert(t, IsAnyNilOrZero(int32(0)), "int32(0) seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(int32(1)), "int32(1) seen as zero") + test.Assert(t, IsAnyNilOrZero(int64(0)), "int64(0) seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(int64(1)), "int64(1) seen as zero") + + test.Assert(t, IsAnyNilOrZero(uint(0)), "uint(0) seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(uint(1)), "uint(1) seen as zero") + test.Assert(t, IsAnyNilOrZero(uint8(0)), "uint8(0) seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(uint8(1)), "uint8(1) seen as zero") + test.Assert(t, IsAnyNilOrZero(uint16(0)), "uint16(0) seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(uint16(1)), "uint16(1) seen as zero") + test.Assert(t, IsAnyNilOrZero(uint32(0)), "uint32(0) seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(uint32(1)), "uint32(1) seen as zero") + test.Assert(t, IsAnyNilOrZero(uint64(0)), "uint64(0) seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(uint64(1)), "uint64(1) seen as zero") + + test.Assert(t, !IsAnyNilOrZero(-12.345), "Untyped float32 seen as zero") + test.Assert(t, !IsAnyNilOrZero(float32(6.66)), "Non-empty float32 seen as zero") + test.Assert(t, IsAnyNilOrZero(float32(0)), "Empty float32 seen as non-zero") + + test.Assert(t, !IsAnyNilOrZero(float64(7.77)), "Non-empty float64 seen as zero") + test.Assert(t, IsAnyNilOrZero(float64(0)), "Empty float64 seen as non-zero") test.Assert(t, IsAnyNilOrZero(""), "Empty string seen as non-zero") test.Assert(t, !IsAnyNilOrZero("string"), "Non-empty string seen as zero") + test.Assert(t, IsAnyNilOrZero([]string{}), "Empty string slice seen as non-zero") + test.Assert(t, !IsAnyNilOrZero([]string{"barncats"}), "Non-empty string slice seen as zero") + test.Assert(t, IsAnyNilOrZero([]byte{}), "Empty byte slice seen as non-zero") test.Assert(t, !IsAnyNilOrZero([]byte("byte")), "Non-empty byte slice seen as zero") + test.Assert(t, IsAnyNilOrZero(time.Time{}), "No specified time value seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(time.Now()), "Current time seen as zero") + type Foo struct { foo int } @@ -134,6 +176,78 @@ func TestIsAnyNilOrZero(t *testing.T) { test.Assert(t, IsAnyNilOrZero(1, ""), "Mixed values seen as non-zero") test.Assert(t, IsAnyNilOrZero("", 1), "Mixed values seen as non-zero") + + var p *timestamppb.Timestamp + test.Assert(t, IsAnyNilOrZero(p), "Pointer to uninitialized timestamppb.Timestamp seen as non-zero") + test.Assert(t, IsAnyNilOrZero(timestamppb.New(time.Time{})), "*timestamppb.Timestamp containing an uninitialized inner time.Time{} is seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(timestamppb.Now()), "A *timestamppb.Timestamp with valid inner time is seen as zero") + + var d *durationpb.Duration + var zeroDuration time.Duration + test.Assert(t, IsAnyNilOrZero(d), "Pointer to uninitialized durationpb.Duration seen as non-zero") + test.Assert(t, IsAnyNilOrZero(durationpb.New(zeroDuration)), "*durationpb.Duration containing an zero value time.Duration is seen as non-zero") + test.Assert(t, !IsAnyNilOrZero(durationpb.New(666)), "A *durationpb.Duration with valid inner duration is seen as zero") +} + +func BenchmarkIsAnyNilOrZero(b *testing.B) { + var thyme *time.Time + var sage *time.Duration + var table = []struct { + input any + }{ + {input: int(0)}, + {input: int(1)}, + {input: int8(0)}, + {input: int8(1)}, + {input: int16(0)}, + {input: int16(1)}, + {input: int32(0)}, + {input: int32(1)}, + {input: int64(0)}, + {input: int64(1)}, + {input: uint(0)}, + {input: uint(1)}, + {input: uint8(0)}, + {input: uint8(1)}, + {input: uint16(0)}, + {input: uint16(1)}, + {input: uint32(0)}, + {input: uint32(1)}, + {input: uint64(0)}, + {input: uint64(1)}, + {input: float32(0)}, + {input: float32(0.1)}, + {input: float64(0)}, + {input: float64(0.1)}, + {input: ""}, + {input: "ahoyhoy"}, + {input: []string{}}, + {input: []string{""}}, + {input: []string{"oodley_doodley"}}, + {input: []byte{}}, + {input: []byte{0}}, + {input: []byte{1}}, + {input: []rune{}}, + {input: []rune{2}}, + {input: []rune{3}}, + {input: nil}, + {input: false}, + {input: true}, + {input: thyme}, + {input: time.Time{}}, + {input: time.Date(2015, time.June, 04, 11, 04, 38, 0, time.UTC)}, + {input: sage}, + {input: time.Duration(1)}, + {input: time.Duration(0)}, + } + + for _, v := range table { + b.Run(fmt.Sprintf("input_%T_%v", v.input, v.input), func(b *testing.B) { + for b.Loop() { + _ = IsAnyNilOrZero(v.input) + } + }) + } } func TestUniqueLowerNames(t *testing.T) { @@ -164,17 +278,17 @@ func TestLoadCert(t *testing.T) { test.AssertError(t, err, "Loading nonexistent path did not error") test.AssertErrorWraps(t, err, &osPathErr) - _, err = LoadCert("../test/test-ca.der") + _, err = LoadCert("../test/hierarchy/README.md") test.AssertError(t, err, "Loading non-PEM file did not error") - test.AssertEquals(t, err.Error(), "No data in cert PEM file ../test/test-ca.der") + test.AssertContains(t, err.Error(), "no data in cert PEM file") - _, err = LoadCert("../test/test-ca.key") - test.AssertError(t, err, "Loading non-cert file did not error") - test.AssertEquals(t, err.Error(), "x509: malformed tbs certificate") + _, err = LoadCert("../test/hierarchy/int-e1.key.pem") + test.AssertError(t, err, "Loading non-cert PEM file did not error") + test.AssertContains(t, err.Error(), "x509: malformed tbs certificate") - cert, err := LoadCert("../test/test-ca.pem") - test.AssertNotError(t, err, "Failed to load cert file") - test.AssertEquals(t, cert.Subject.CommonName, "happy hacker fake CA") + cert, err := LoadCert("../test/hierarchy/int-r3.cert.pem") + test.AssertNotError(t, err, "Failed to load cert PEM file") + test.AssertEquals(t, cert.Subject.CommonName, "(TEST) Radical Rhino R3") } func TestRetryBackoff(t *testing.T) { @@ -189,15 +303,126 @@ func TestRetryBackoff(t *testing.T) { base := time.Minute max := 10 * time.Minute + backoff := RetryBackoff(0, base, max, factor) + assertBetween(float64(backoff), 0, 0) + expected := base - backoff := RetryBackoff(1, base, max, factor) + backoff = RetryBackoff(1, base, max, factor) assertBetween(float64(backoff), float64(expected)*0.8, float64(expected)*1.2) + expected = time.Second * 90 backoff = RetryBackoff(2, base, max, factor) assertBetween(float64(backoff), float64(expected)*0.8, float64(expected)*1.2) + expected = time.Minute * 10 // should be truncated backoff = RetryBackoff(7, base, max, factor) assertBetween(float64(backoff), float64(expected)*0.8, float64(expected)*1.2) } + +func TestHashIdentifiers(t *testing.T) { + dns1 := identifier.NewDNS("example.com") + dns1_caps := identifier.NewDNS("eXaMpLe.COM") + dns2 := identifier.NewDNS("high-energy-cheese-lab.nrc-cnrc.gc.ca") + dns2_caps := identifier.NewDNS("HIGH-ENERGY-CHEESE-LAB.NRC-CNRC.GC.CA") + ipv4_1 := identifier.NewIP(netip.MustParseAddr("10.10.10.10")) + ipv4_2 := identifier.NewIP(netip.MustParseAddr("172.16.16.16")) + ipv6_1 := identifier.NewIP(netip.MustParseAddr("2001:0db8:0bad:0dab:c0ff:fee0:0007:1337")) + ipv6_2 := identifier.NewIP(netip.MustParseAddr("3fff::")) + + testCases := []struct { + Name string + Idents1 identifier.ACMEIdentifiers + Idents2 identifier.ACMEIdentifiers + ExpectedEqual bool + }{ + { + Name: "Deterministic for DNS", + Idents1: identifier.ACMEIdentifiers{dns1}, + Idents2: identifier.ACMEIdentifiers{dns1}, + ExpectedEqual: true, + }, + { + Name: "Deterministic for IPv4", + Idents1: identifier.ACMEIdentifiers{ipv4_1}, + Idents2: identifier.ACMEIdentifiers{ipv4_1}, + ExpectedEqual: true, + }, + { + Name: "Deterministic for IPv6", + Idents1: identifier.ACMEIdentifiers{ipv6_1}, + Idents2: identifier.ACMEIdentifiers{ipv6_1}, + ExpectedEqual: true, + }, + { + Name: "Differentiates for DNS", + Idents1: identifier.ACMEIdentifiers{dns1}, + Idents2: identifier.ACMEIdentifiers{dns2}, + ExpectedEqual: false, + }, + { + Name: "Differentiates for IPv4", + Idents1: identifier.ACMEIdentifiers{ipv4_1}, + Idents2: identifier.ACMEIdentifiers{ipv4_2}, + ExpectedEqual: false, + }, + { + Name: "Differentiates for IPv6", + Idents1: identifier.ACMEIdentifiers{ipv6_1}, + Idents2: identifier.ACMEIdentifiers{ipv6_2}, + ExpectedEqual: false, + }, + { + Name: "Not subject to ordering", + Idents1: identifier.ACMEIdentifiers{ + dns1, dns2, ipv4_1, ipv4_2, ipv6_1, ipv6_2, + }, + Idents2: identifier.ACMEIdentifiers{ + ipv6_1, dns2, ipv4_2, dns1, ipv4_1, ipv6_2, + }, + ExpectedEqual: true, + }, + { + Name: "Not case sensitive", + Idents1: identifier.ACMEIdentifiers{ + dns1, dns2, + }, + Idents2: identifier.ACMEIdentifiers{ + dns1_caps, dns2_caps, + }, + ExpectedEqual: true, + }, + { + Name: "Not subject to duplication", + Idents1: identifier.ACMEIdentifiers{ + dns1, dns1, + }, + Idents2: identifier.ACMEIdentifiers{dns1}, + ExpectedEqual: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + h1 := HashIdentifiers(tc.Idents1) + h2 := HashIdentifiers(tc.Idents2) + if slices.Equal(h1, h2) != tc.ExpectedEqual { + t.Errorf("Comparing hashes of idents %#v and %#v, expected equality to be %v", tc.Idents1, tc.Idents2, tc.ExpectedEqual) + } + }) + } +} + +func TestIsCanceled(t *testing.T) { + if !IsCanceled(context.Canceled) { + t.Errorf("Expected context.Canceled to be canceled, but wasn't.") + } + if !IsCanceled(status.Errorf(codes.Canceled, "hi")) { + t.Errorf("Expected gRPC cancellation to be canceled, but wasn't.") + } + if IsCanceled(errors.New("hi")) { + t.Errorf("Expected random error to not be canceled, but was.") + } +} diff --git a/crl/checker/checker.go b/crl/checker/checker.go new file mode 100644 index 00000000000..08a1add8f56 --- /dev/null +++ b/crl/checker/checker.go @@ -0,0 +1,116 @@ +package checker + +import ( + "bytes" + "crypto/x509" + "fmt" + "math/big" + "sort" + "time" + + zlint_x509 "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3" + + "github.com/letsencrypt/boulder/linter" +) + +// Validate runs the given CRL through our set of lints, ensures its signature +// validates (if supplied with a non-nil issuer), and checks that the CRL is +// less than ageLimit old. It returns an error if any of these conditions are +// not met. +func Validate(crl *x509.RevocationList, issuer *x509.Certificate, ageLimit time.Duration) error { + zcrl, err := zlint_x509.ParseRevocationList(crl.Raw) + if err != nil { + return fmt.Errorf("parsing CRL: %w", err) + } + + err = linter.ProcessResultSet(zlint.LintRevocationList(zcrl)) + if err != nil { + return fmt.Errorf("linting CRL: %w", err) + } + + if issuer != nil { + err = crl.CheckSignatureFrom(issuer) + if err != nil { + return fmt.Errorf("checking CRL signature: %w", err) + } + } + + if time.Since(crl.ThisUpdate) >= ageLimit { + return fmt.Errorf("thisUpdate more than %s in the past: %v", ageLimit, crl.ThisUpdate) + } + + return nil +} + +type diffResult struct { + Added []*big.Int + Removed []*big.Int + // TODO: consider adding a "changed" field, for entries whose revocation time + // or revocation reason changes. +} + +// Diff returns the sets of serials that were added and removed between two +// CRLs. In order to be comparable, the CRLs must come from the same issuer, and +// be given in the correct order (the "old" CRL's Number and ThisUpdate must +// both precede the "new" CRL's). +func Diff(old, new *x509.RevocationList) (*diffResult, error) { + if !bytes.Equal(old.AuthorityKeyId, new.AuthorityKeyId) { + return nil, fmt.Errorf("CRLs were not issued by same issuer") + } + + if old.Number.Cmp(new.Number) >= 0 { + return nil, fmt.Errorf("old CRL does not precede new CRL") + } + + if new.ThisUpdate.Before(old.ThisUpdate) { + return nil, fmt.Errorf("old CRL does not precede new CRL") + } + + // Sort both sets of serials so we can march through them in order. + oldSerials := make([]*big.Int, len(old.RevokedCertificateEntries)) + for i, rc := range old.RevokedCertificateEntries { + oldSerials[i] = rc.SerialNumber + } + sort.Slice(oldSerials, func(i, j int) bool { + return oldSerials[i].Cmp(oldSerials[j]) < 0 + }) + + newSerials := make([]*big.Int, len(new.RevokedCertificateEntries)) + for j, rc := range new.RevokedCertificateEntries { + newSerials[j] = rc.SerialNumber + } + sort.Slice(newSerials, func(i, j int) bool { + return newSerials[i].Cmp(newSerials[j]) < 0 + }) + + // Work our way through both lists of sorted serials. If the old list skips + // past a serial seen in the new list, then that serial was added. If the new + // list skips past a serial seen in the old list, then it was removed. + i, j := 0, 0 + added := make([]*big.Int, 0) + removed := make([]*big.Int, 0) + for { + if i >= len(oldSerials) { + added = append(added, newSerials[j:]...) + break + } + if j >= len(newSerials) { + removed = append(removed, oldSerials[i:]...) + break + } + cmp := oldSerials[i].Cmp(newSerials[j]) + if cmp < 0 { + removed = append(removed, oldSerials[i]) + i++ + } else if cmp > 0 { + added = append(added, newSerials[j]) + j++ + } else { + i++ + j++ + } + } + + return &diffResult{added, removed}, nil +} diff --git a/crl/checker/checker_test.go b/crl/checker/checker_test.go new file mode 100644 index 00000000000..53fc507f219 --- /dev/null +++ b/crl/checker/checker_test.go @@ -0,0 +1,117 @@ +package checker + +import ( + "crypto/rand" + "crypto/x509" + "encoding/pem" + "io" + "math/big" + "os" + "testing" + "time" + + "github.com/jmhodges/clock" + + "github.com/letsencrypt/boulder/core" + "github.com/letsencrypt/boulder/issuance" + "github.com/letsencrypt/boulder/test" +) + +func TestValidate(t *testing.T) { + crlFile, err := os.Open("../../test/hierarchy/int-e1.crl.pem") + test.AssertNotError(t, err, "opening test crl file") + crlPEM, err := io.ReadAll(crlFile) + test.AssertNotError(t, err, "reading test crl file") + crlDER, _ := pem.Decode(crlPEM) + crl, err := x509.ParseRevocationList(crlDER.Bytes) + test.AssertNotError(t, err, "parsing test crl") + issuer, err := core.LoadCert("../../test/hierarchy/int-e1.cert.pem") + test.AssertNotError(t, err, "loading test issuer") + + err = Validate(crl, issuer, 100*365*24*time.Hour) + test.AssertNotError(t, err, "validating good crl") + + err = Validate(crl, issuer, 0) + test.AssertError(t, err, "validating too-old crl") + test.AssertContains(t, err.Error(), "in the past") + + issuer2, err := core.LoadCert("../../test/hierarchy/int-r3.cert.pem") + test.AssertNotError(t, err, "loading test issuer") + err = Validate(crl, issuer2, 100*365*24*time.Hour) + test.AssertError(t, err, "validating crl from wrong issuer") + test.AssertContains(t, err.Error(), "signature") + + crlFile, err = os.Open("../../linter/lints/cabf_br/testdata/crl_long_validity.pem") + test.AssertNotError(t, err, "opening test crl file") + crlPEM, err = io.ReadAll(crlFile) + test.AssertNotError(t, err, "reading test crl file") + crlDER, _ = pem.Decode(crlPEM) + crl, err = x509.ParseRevocationList(crlDER.Bytes) + test.AssertNotError(t, err, "parsing test crl") + err = Validate(crl, issuer, 100*365*24*time.Hour) + test.AssertError(t, err, "validating crl with lint error") + test.AssertContains(t, err.Error(), "linting") +} + +func TestDiff(t *testing.T) { + issuer, err := issuance.LoadIssuer( + issuance.IssuerConfig{ + Location: issuance.IssuerLoc{ + File: "../../test/hierarchy/int-e1.key.pem", + CertFile: "../../test/hierarchy/int-e1.cert.pem", + }, + IssuerURL: "http://not-example.com/issuer-url", + CRLURLBase: "http://not-example.com/crl/", + CRLShards: 1, + }, clock.NewFake()) + test.AssertNotError(t, err, "loading test issuer") + + now := time.Now() + template := x509.RevocationList{ + ThisUpdate: now, + NextUpdate: now.Add(24 * time.Hour), + Number: big.NewInt(1), + RevokedCertificateEntries: []x509.RevocationListEntry{ + { + SerialNumber: big.NewInt(1), + RevocationTime: now.Add(-time.Hour), + }, + { + SerialNumber: big.NewInt(2), + RevocationTime: now.Add(-time.Hour), + }, + }, + } + + oldCRLDER, err := x509.CreateRevocationList(rand.Reader, &template, issuer.Cert.Certificate, issuer.Signer) + test.AssertNotError(t, err, "creating old crl") + oldCRL, err := x509.ParseRevocationList(oldCRLDER) + test.AssertNotError(t, err, "parsing old crl") + + now = now.Add(time.Hour) + template = x509.RevocationList{ + ThisUpdate: now, + NextUpdate: now.Add(24 * time.Hour), + Number: big.NewInt(2), + RevokedCertificateEntries: []x509.RevocationListEntry{ + { + SerialNumber: big.NewInt(1), + RevocationTime: now.Add(-2 * time.Hour), + }, + { + SerialNumber: big.NewInt(3), + RevocationTime: now.Add(-time.Hour), + }, + }, + } + + newCRLDER, err := x509.CreateRevocationList(rand.Reader, &template, issuer.Cert.Certificate, issuer.Signer) + test.AssertNotError(t, err, "creating old crl") + newCRL, err := x509.ParseRevocationList(newCRLDER) + test.AssertNotError(t, err, "parsing old crl") + + res, err := Diff(oldCRL, newCRL) + test.AssertNotError(t, err, "diffing crls") + test.AssertEquals(t, len(res.Added), 1) + test.AssertEquals(t, len(res.Removed), 1) +} diff --git a/crl/crl.go b/crl/crl.go new file mode 100644 index 00000000000..7e128d6a736 --- /dev/null +++ b/crl/crl.go @@ -0,0 +1,44 @@ +package crl + +import ( + "encoding/json" + "math/big" + "time" + + "github.com/letsencrypt/boulder/issuance" +) + +// number represents the 'crlNumber' field of a CRL. It must be constructed by +// calling `Number()`. +type number *big.Int + +// Number derives the 'CRLNumber' field for a CRL from the value of the +// 'thisUpdate' field provided as a `time.Time`. +func Number(thisUpdate time.Time) number { + // Per RFC 5280 Section 5.2.3, 'CRLNumber' is a monotonically increasing + // sequence number for a given CRL scope and CRL that MUST be at most 20 + // octets. A 64-bit (8-byte) integer will never exceed that requirement, but + // lets us guarantee that the CRL Number is always increasing without having + // to store or look up additional state. + return number(big.NewInt(thisUpdate.UnixNano())) +} + +// id is a unique identifier for a CRL which is primarily used for logging. This +// identifier is composed of the 'Issuer', 'CRLNumber', and the shard index +// (e.g. {"issuerID": 123, "crlNum": 456, "shardIdx": 78}). It must be constructed +// by calling `Id()`. +type id string + +// Id is a utility function which constructs a new `id`. +func Id(issuerID issuance.NameID, shardIdx int, crlNumber number) id { + type info struct { + IssuerID issuance.NameID `json:"issuerID"` + ShardIdx int `json:"shardIdx"` + CRLNumber number `json:"crlNumber"` + } + jsonBytes, err := json.Marshal(info{issuerID, shardIdx, crlNumber}) + if err != nil { + panic(err) + } + return id(jsonBytes) +} diff --git a/crl/crl_test.go b/crl/crl_test.go new file mode 100644 index 00000000000..5a26b25edaa --- /dev/null +++ b/crl/crl_test.go @@ -0,0 +1,17 @@ +package crl + +import ( + "fmt" + "math/big" + "testing" + "time" + + "github.com/letsencrypt/boulder/test" +) + +func TestId(t *testing.T) { + thisUpdate := time.Now() + out := Id(1337, 1, Number(thisUpdate)) + expectCRLId := fmt.Sprintf("{\"issuerID\":1337,\"shardIdx\":1,\"crlNumber\":%d}", big.NewInt(thisUpdate.UnixNano())) + test.AssertEquals(t, string(out), expectCRLId) +} diff --git a/crl/idp/idp.go b/crl/idp/idp.go new file mode 100644 index 00000000000..2ed835dfd79 --- /dev/null +++ b/crl/idp/idp.go @@ -0,0 +1,102 @@ +package idp + +import ( + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +var idpOID = asn1.ObjectIdentifier{2, 5, 29, 28} // id-ce-issuingDistributionPoint + +// issuingDistributionPoint represents the ASN.1 IssuingDistributionPoint +// SEQUENCE as defined in RFC 5280 Section 5.2.5. We only use three of the +// fields, so the others are omitted. +type issuingDistributionPoint struct { + DistributionPoint distributionPointName `asn1:"optional,tag:0"` + OnlyContainsUserCerts bool `asn1:"optional,tag:1"` + OnlyContainsCACerts bool `asn1:"optional,tag:2"` +} + +// distributionPointName represents the ASN.1 DistributionPointName CHOICE as +// defined in RFC 5280 Section 4.2.1.13. We only use one of the fields, so the +// others are omitted. +type distributionPointName struct { + // Technically, FullName is of type GeneralNames, which is of type SEQUENCE OF + // GeneralName. But GeneralName itself is of type CHOICE, and the asn1.Marshal + // function doesn't support marshalling structs to CHOICEs, so we have to use + // asn1.RawValue and encode the GeneralName ourselves. + FullName []asn1.RawValue `asn1:"optional,tag:0"` +} + +// MakeUserCertsExt returns a critical IssuingDistributionPoint extension +// containing the given URLs and with the OnlyContainsUserCerts boolean set to +// true. +func MakeUserCertsExt(urls []string) (pkix.Extension, error) { + var gns []asn1.RawValue + for _, url := range urls { + gns = append(gns, asn1.RawValue{ // GeneralName + Class: 2, // context-specific + Tag: 6, // uniformResourceIdentifier, IA5String + Bytes: []byte(url), + }) + } + + val := issuingDistributionPoint{ + DistributionPoint: distributionPointName{FullName: gns}, + OnlyContainsUserCerts: true, + } + + valBytes, err := asn1.Marshal(val) + if err != nil { + return pkix.Extension{}, err + } + + return pkix.Extension{ + Id: idpOID, + Value: valBytes, + Critical: true, + }, nil +} + +// MakeCACertsExt returns a critical IssuingDistributionPoint extension +// asserting the OnlyContainsCACerts boolean. +func MakeCACertsExt() (*pkix.Extension, error) { + val := issuingDistributionPoint{ + OnlyContainsCACerts: true, + } + + valBytes, err := asn1.Marshal(val) + if err != nil { + return nil, err + } + + return &pkix.Extension{ + Id: idpOID, + Value: valBytes, + Critical: true, + }, nil +} + +// GetIDPURIs returns the URIs contained within the issuingDistributionPoint +// extension, if present, or an error otherwise. +func GetIDPURIs(exts []pkix.Extension) ([]string, error) { + for _, ext := range exts { + if ext.Id.Equal(idpOID) { + val := issuingDistributionPoint{} + rest, err := asn1.Unmarshal(ext.Value, &val) + if err != nil { + return nil, fmt.Errorf("parsing IssuingDistributionPoint extension: %w", err) + } + if len(rest) != 0 { + return nil, fmt.Errorf("parsing IssuingDistributionPoint extension: got %d unexpected trailing bytes", len(rest)) + } + var uris []string + for _, generalName := range val.DistributionPoint.FullName { + uris = append(uris, string(generalName.Bytes)) + } + return uris, nil + } + } + return nil, errors.New("no IssuingDistributionPoint extension found") +} diff --git a/crl/idp/idp_test.go b/crl/idp/idp_test.go new file mode 100644 index 00000000000..904a3586f8f --- /dev/null +++ b/crl/idp/idp_test.go @@ -0,0 +1,39 @@ +package idp + +import ( + "encoding/hex" + "testing" + + "github.com/letsencrypt/boulder/test" +) + +func TestMakeUserCertsExt(t *testing.T) { + t.Parallel() + dehex := func(s string) []byte { r, _ := hex.DecodeString(s); return r } + tests := []struct { + name string + urls []string + want []byte + }{ + { + name: "one (real) url", + urls: []string{"http://prod.c.lencr.org/20506757847264211/126.crl"}, + want: dehex("303AA035A0338631687474703A2F2F70726F642E632E6C656E63722E6F72672F32303530363735373834373236343231312F3132362E63726C8101FF"), + }, + { + name: "two urls", + urls: []string{"http://old.style/12345678/90.crl", "http://new.style/90.crl"}, + want: dehex("3042A03DA03B8620687474703A2F2F6F6C642E7374796C652F31323334353637382F39302E63726C8617687474703A2F2F6E65772E7374796C652F39302E63726C8101FF"), + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + got, err := MakeUserCertsExt(tc.urls) + test.AssertNotError(t, err, "should never fail to marshal asn1 to bytes") + test.AssertDeepEquals(t, got.Id, idpOID) + test.AssertEquals(t, got.Critical, true) + test.AssertDeepEquals(t, got.Value, tc.want) + }) + } +} diff --git a/crl/storer/proto/storer.pb.go b/crl/storer/proto/storer.pb.go new file mode 100644 index 00000000000..7484333fc5b --- /dev/null +++ b/crl/storer/proto/storer.pb.go @@ -0,0 +1,280 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v3.20.1 +// source: storer.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type UploadCRLRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Payload: + // + // *UploadCRLRequest_Metadata + // *UploadCRLRequest_CrlChunk + Payload isUploadCRLRequest_Payload `protobuf_oneof:"payload"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UploadCRLRequest) Reset() { + *x = UploadCRLRequest{} + mi := &file_storer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UploadCRLRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UploadCRLRequest) ProtoMessage() {} + +func (x *UploadCRLRequest) ProtoReflect() protoreflect.Message { + mi := &file_storer_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UploadCRLRequest.ProtoReflect.Descriptor instead. +func (*UploadCRLRequest) Descriptor() ([]byte, []int) { + return file_storer_proto_rawDescGZIP(), []int{0} +} + +func (x *UploadCRLRequest) GetPayload() isUploadCRLRequest_Payload { + if x != nil { + return x.Payload + } + return nil +} + +func (x *UploadCRLRequest) GetMetadata() *CRLMetadata { + if x != nil { + if x, ok := x.Payload.(*UploadCRLRequest_Metadata); ok { + return x.Metadata + } + } + return nil +} + +func (x *UploadCRLRequest) GetCrlChunk() []byte { + if x != nil { + if x, ok := x.Payload.(*UploadCRLRequest_CrlChunk); ok { + return x.CrlChunk + } + } + return nil +} + +type isUploadCRLRequest_Payload interface { + isUploadCRLRequest_Payload() +} + +type UploadCRLRequest_Metadata struct { + Metadata *CRLMetadata `protobuf:"bytes,1,opt,name=metadata,proto3,oneof"` +} + +type UploadCRLRequest_CrlChunk struct { + CrlChunk []byte `protobuf:"bytes,2,opt,name=crlChunk,proto3,oneof"` +} + +func (*UploadCRLRequest_Metadata) isUploadCRLRequest_Payload() {} + +func (*UploadCRLRequest_CrlChunk) isUploadCRLRequest_Payload() {} + +type CRLMetadata struct { + state protoimpl.MessageState `protogen:"open.v1"` + IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"` + Number int64 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` + ShardIdx int64 `protobuf:"varint,3,opt,name=shardIdx,proto3" json:"shardIdx,omitempty"` + Expires *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expires,proto3" json:"expires,omitempty"` + CacheControl string `protobuf:"bytes,5,opt,name=cacheControl,proto3" json:"cacheControl,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CRLMetadata) Reset() { + *x = CRLMetadata{} + mi := &file_storer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CRLMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CRLMetadata) ProtoMessage() {} + +func (x *CRLMetadata) ProtoReflect() protoreflect.Message { + mi := &file_storer_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CRLMetadata.ProtoReflect.Descriptor instead. +func (*CRLMetadata) Descriptor() ([]byte, []int) { + return file_storer_proto_rawDescGZIP(), []int{1} +} + +func (x *CRLMetadata) GetIssuerNameID() int64 { + if x != nil { + return x.IssuerNameID + } + return 0 +} + +func (x *CRLMetadata) GetNumber() int64 { + if x != nil { + return x.Number + } + return 0 +} + +func (x *CRLMetadata) GetShardIdx() int64 { + if x != nil { + return x.ShardIdx + } + return 0 +} + +func (x *CRLMetadata) GetExpires() *timestamppb.Timestamp { + if x != nil { + return x.Expires + } + return nil +} + +func (x *CRLMetadata) GetCacheControl() string { + if x != nil { + return x.CacheControl + } + return "" +} + +var File_storer_proto protoreflect.FileDescriptor + +var file_storer_proto_rawDesc = string([]byte{ + 0x0a, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x72, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6e, 0x0a, 0x10, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x52, + 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x72, 0x2e, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, + 0x00, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x08, 0x63, + 0x72, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, + 0x08, 0x63, 0x72, 0x6c, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xbf, 0x01, 0x0a, 0x0b, 0x43, 0x52, 0x4c, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75, + 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x12, 0x34, 0x0a, 0x07, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x32, 0x4e, 0x0a, 0x09, 0x43, 0x52, 0x4c, 0x53, 0x74, 0x6f, + 0x72, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x09, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x52, 0x4c, + 0x12, 0x18, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x43, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x00, 0x28, 0x01, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x72, 0x6c, 0x2f, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +}) + +var ( + file_storer_proto_rawDescOnce sync.Once + file_storer_proto_rawDescData []byte +) + +func file_storer_proto_rawDescGZIP() []byte { + file_storer_proto_rawDescOnce.Do(func() { + file_storer_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_storer_proto_rawDesc), len(file_storer_proto_rawDesc))) + }) + return file_storer_proto_rawDescData +} + +var file_storer_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_storer_proto_goTypes = []any{ + (*UploadCRLRequest)(nil), // 0: storer.UploadCRLRequest + (*CRLMetadata)(nil), // 1: storer.CRLMetadata + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 3: google.protobuf.Empty +} +var file_storer_proto_depIdxs = []int32{ + 1, // 0: storer.UploadCRLRequest.metadata:type_name -> storer.CRLMetadata + 2, // 1: storer.CRLMetadata.expires:type_name -> google.protobuf.Timestamp + 0, // 2: storer.CRLStorer.UploadCRL:input_type -> storer.UploadCRLRequest + 3, // 3: storer.CRLStorer.UploadCRL:output_type -> google.protobuf.Empty + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_storer_proto_init() } +func file_storer_proto_init() { + if File_storer_proto != nil { + return + } + file_storer_proto_msgTypes[0].OneofWrappers = []any{ + (*UploadCRLRequest_Metadata)(nil), + (*UploadCRLRequest_CrlChunk)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_storer_proto_rawDesc), len(file_storer_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_storer_proto_goTypes, + DependencyIndexes: file_storer_proto_depIdxs, + MessageInfos: file_storer_proto_msgTypes, + }.Build() + File_storer_proto = out.File + file_storer_proto_goTypes = nil + file_storer_proto_depIdxs = nil +} diff --git a/crl/storer/proto/storer.proto b/crl/storer/proto/storer.proto new file mode 100644 index 00000000000..fa5f55c548f --- /dev/null +++ b/crl/storer/proto/storer.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package storer; +option go_package = "github.com/letsencrypt/boulder/crl/storer/proto"; + +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +service CRLStorer { + rpc UploadCRL(stream UploadCRLRequest) returns (google.protobuf.Empty) {} +} + +message UploadCRLRequest { + oneof payload { + CRLMetadata metadata = 1; + bytes crlChunk = 2; + } +} + +message CRLMetadata { + int64 issuerNameID = 1; + int64 number = 2; + int64 shardIdx = 3; + google.protobuf.Timestamp expires = 4; + string cacheControl = 5; +} diff --git a/crl/storer/proto/storer_grpc.pb.go b/crl/storer/proto/storer_grpc.pb.go new file mode 100644 index 00000000000..32c9e128efe --- /dev/null +++ b/crl/storer/proto/storer_grpc.pb.go @@ -0,0 +1,115 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.20.1 +// source: storer.proto + +package proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + CRLStorer_UploadCRL_FullMethodName = "/storer.CRLStorer/UploadCRL" +) + +// CRLStorerClient is the client API for CRLStorer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CRLStorerClient interface { + UploadCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[UploadCRLRequest, emptypb.Empty], error) +} + +type cRLStorerClient struct { + cc grpc.ClientConnInterface +} + +func NewCRLStorerClient(cc grpc.ClientConnInterface) CRLStorerClient { + return &cRLStorerClient{cc} +} + +func (c *cRLStorerClient) UploadCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[UploadCRLRequest, emptypb.Empty], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &CRLStorer_ServiceDesc.Streams[0], CRLStorer_UploadCRL_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[UploadCRLRequest, emptypb.Empty]{ClientStream: stream} + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type CRLStorer_UploadCRLClient = grpc.ClientStreamingClient[UploadCRLRequest, emptypb.Empty] + +// CRLStorerServer is the server API for CRLStorer service. +// All implementations must embed UnimplementedCRLStorerServer +// for forward compatibility. +type CRLStorerServer interface { + UploadCRL(grpc.ClientStreamingServer[UploadCRLRequest, emptypb.Empty]) error + mustEmbedUnimplementedCRLStorerServer() +} + +// UnimplementedCRLStorerServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedCRLStorerServer struct{} + +func (UnimplementedCRLStorerServer) UploadCRL(grpc.ClientStreamingServer[UploadCRLRequest, emptypb.Empty]) error { + return status.Errorf(codes.Unimplemented, "method UploadCRL not implemented") +} +func (UnimplementedCRLStorerServer) mustEmbedUnimplementedCRLStorerServer() {} +func (UnimplementedCRLStorerServer) testEmbeddedByValue() {} + +// UnsafeCRLStorerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CRLStorerServer will +// result in compilation errors. +type UnsafeCRLStorerServer interface { + mustEmbedUnimplementedCRLStorerServer() +} + +func RegisterCRLStorerServer(s grpc.ServiceRegistrar, srv CRLStorerServer) { + // If the following call pancis, it indicates UnimplementedCRLStorerServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&CRLStorer_ServiceDesc, srv) +} + +func _CRLStorer_UploadCRL_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(CRLStorerServer).UploadCRL(&grpc.GenericServerStream[UploadCRLRequest, emptypb.Empty]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type CRLStorer_UploadCRLServer = grpc.ClientStreamingServer[UploadCRLRequest, emptypb.Empty] + +// CRLStorer_ServiceDesc is the grpc.ServiceDesc for CRLStorer service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CRLStorer_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "storer.CRLStorer", + HandlerType: (*CRLStorerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "UploadCRL", + Handler: _CRLStorer_UploadCRL_Handler, + ClientStreams: true, + }, + }, + Metadata: "storer.proto", +} diff --git a/crl/storer/storer.go b/crl/storer/storer.go new file mode 100644 index 00000000000..f410489cb1f --- /dev/null +++ b/crl/storer/storer.go @@ -0,0 +1,258 @@ +package storer + +import ( + "bytes" + "context" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io" + "math/big" + "slices" + "time" + + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/jmhodges/clock" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/letsencrypt/boulder/crl" + "github.com/letsencrypt/boulder/crl/idp" + cspb "github.com/letsencrypt/boulder/crl/storer/proto" + "github.com/letsencrypt/boulder/issuance" + blog "github.com/letsencrypt/boulder/log" +) + +// simpleS3 matches the subset of the s3.Client interface which we use, to allow +// simpler mocking in tests. +type simpleS3 interface { + PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) + GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) +} + +type crlStorer struct { + cspb.UnsafeCRLStorerServer + s3Client simpleS3 + s3Bucket string + issuers map[issuance.NameID]*issuance.Certificate + uploadCount *prometheus.CounterVec + sizeHistogram *prometheus.HistogramVec + latencyHistogram *prometheus.HistogramVec + log blog.Logger + clk clock.Clock +} + +var _ cspb.CRLStorerServer = (*crlStorer)(nil) + +func New( + issuers []*issuance.Certificate, + s3Client simpleS3, + s3Bucket string, + stats prometheus.Registerer, + log blog.Logger, + clk clock.Clock, +) (*crlStorer, error) { + issuersByNameID := make(map[issuance.NameID]*issuance.Certificate, len(issuers)) + for _, issuer := range issuers { + issuersByNameID[issuer.NameID()] = issuer + } + + uploadCount := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ + Name: "crl_storer_uploads", + Help: "A counter of the number of CRLs uploaded by crl-storer", + }, []string{"issuer", "result"}) + + sizeHistogram := promauto.With(stats).NewHistogramVec(prometheus.HistogramOpts{ + Name: "crl_storer_sizes", + Help: "A histogram of the sizes (in bytes) of CRLs uploaded by crl-storer", + Buckets: []float64{0, 256, 1024, 4096, 16384, 65536}, + }, []string{"issuer"}) + + latencyHistogram := promauto.With(stats).NewHistogramVec(prometheus.HistogramOpts{ + Name: "crl_storer_upload_times", + Help: "A histogram of the time (in seconds) it took crl-storer to upload CRLs", + Buckets: []float64{0.01, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000}, + }, []string{"issuer"}) + + return &crlStorer{ + issuers: issuersByNameID, + s3Client: s3Client, + s3Bucket: s3Bucket, + uploadCount: uploadCount, + sizeHistogram: sizeHistogram, + latencyHistogram: latencyHistogram, + log: log, + clk: clk, + }, nil +} + +// TODO(#6261): Unify all error messages to identify the shard they're working +// on as a JSON object including issuer, crl number, and shard number. + +// UploadCRL implements the gRPC method of the same name. It takes a stream of +// bytes as its input, parses and runs some sanity checks on the CRL, and then +// uploads it to S3. +func (cs *crlStorer) UploadCRL(stream grpc.ClientStreamingServer[cspb.UploadCRLRequest, emptypb.Empty]) error { + var issuer *issuance.Certificate + var shardIdx int64 + var crlNumber *big.Int + crlBytes := make([]byte, 0) + var cacheControl string + var expires time.Time + + // Read all of the messages from the input stream. + for { + in, err := stream.Recv() + if err != nil { + if err == io.EOF { + break + } + return err + } + + switch payload := in.Payload.(type) { + case *cspb.UploadCRLRequest_Metadata: + if crlNumber != nil || issuer != nil { + return errors.New("got more than one metadata message") + } + if payload.Metadata.IssuerNameID == 0 || payload.Metadata.Number == 0 { + return errors.New("got incomplete metadata message") + } + + cacheControl = payload.Metadata.CacheControl + expires = payload.Metadata.Expires.AsTime() + + shardIdx = payload.Metadata.ShardIdx + crlNumber = crl.Number(time.Unix(0, payload.Metadata.Number)) + + var ok bool + issuer, ok = cs.issuers[issuance.NameID(payload.Metadata.IssuerNameID)] + if !ok { + return fmt.Errorf("got unrecognized IssuerID: %d", payload.Metadata.IssuerNameID) + } + + case *cspb.UploadCRLRequest_CrlChunk: + crlBytes = append(crlBytes, payload.CrlChunk...) + } + } + + // Do some basic sanity checks on the received metadata and CRL. + if issuer == nil || crlNumber == nil { + return errors.New("got no metadata message") + } + + crlId := crl.Id(issuer.NameID(), int(shardIdx), crlNumber) + + cs.sizeHistogram.WithLabelValues(issuer.Subject.CommonName).Observe(float64(len(crlBytes))) + + crl, err := x509.ParseRevocationList(crlBytes) + if err != nil { + return fmt.Errorf("parsing CRL for %s: %w", crlId, err) + } + + if crl.Number.Cmp(crlNumber) != 0 { + return errors.New("got mismatched CRL Number") + } + + err = crl.CheckSignatureFrom(issuer.Certificate) + if err != nil { + return fmt.Errorf("validating signature for %s: %w", crlId, err) + } + + // Before uploading this CRL, we want to compare it against the previous CRL + // to ensure that the CRL Number field is not going backwards. This is an + // additional safety check against clock skew and potential races, if multiple + // crl-updaters are working on the same shard at the same time. We only run + // these checks if we found a CRL, so we don't block uploading brand new CRLs. + filename := fmt.Sprintf("%d/%d.crl", issuer.NameID(), shardIdx) + prevObj, err := cs.s3Client.GetObject(stream.Context(), &s3.GetObjectInput{ + Bucket: &cs.s3Bucket, + Key: &filename, + }) + if err != nil { + var smithyErr *smithyhttp.ResponseError + if !errors.As(err, &smithyErr) || smithyErr.HTTPStatusCode() != 404 { + return fmt.Errorf("getting previous CRL for %s: %w", crlId, err) + } + cs.log.Infof("No previous CRL found for %s, proceeding", crlId) + } else { + prevBytes, err := io.ReadAll(prevObj.Body) + if err != nil { + return fmt.Errorf("downloading previous CRL for %s: %w", crlId, err) + } + + prevCRL, err := x509.ParseRevocationList(prevBytes) + if err != nil { + return fmt.Errorf("parsing previous CRL for %s: %w", crlId, err) + } + + if crl.Number.Cmp(prevCRL.Number) <= 0 { + return fmt.Errorf("crlNumber not strictly increasing: %d <= %d", crl.Number, prevCRL.Number) + } + + idpURIs, err := idp.GetIDPURIs(crl.Extensions) + if err != nil { + return fmt.Errorf("getting IDP for %s: %w", crlId, err) + } + + prevURIs, err := idp.GetIDPURIs(prevCRL.Extensions) + if err != nil { + return fmt.Errorf("getting previous IDP for %s: %w", crlId, err) + } + + uriMatch := false + for _, uri := range idpURIs { + if slices.Contains(prevURIs, uri) { + uriMatch = true + break + } + } + if !uriMatch { + return fmt.Errorf("IDP does not match previous: %v !∩ %v", idpURIs, prevURIs) + } + } + + // Finally actually upload the new CRL. + start := cs.clk.Now() + + checksum := sha256.Sum256(crlBytes) + checksumb64 := base64.StdEncoding.EncodeToString(checksum[:]) + crlContentType := "application/pkix-crl" + _, err = cs.s3Client.PutObject(stream.Context(), &s3.PutObjectInput{ + Bucket: &cs.s3Bucket, + Key: &filename, + Body: bytes.NewReader(crlBytes), + ChecksumAlgorithm: types.ChecksumAlgorithmSha256, + ChecksumSHA256: &checksumb64, + ContentType: &crlContentType, + Metadata: map[string]string{"crlNumber": crlNumber.String()}, + Expires: &expires, + CacheControl: &cacheControl, + }) + + latency := cs.clk.Now().Sub(start) + cs.latencyHistogram.WithLabelValues(issuer.Subject.CommonName).Observe(latency.Seconds()) + + if err != nil { + cs.uploadCount.WithLabelValues(issuer.Subject.CommonName, "failed").Inc() + cs.log.AuditErr("CRL upload failed", err, map[string]any{"id": crlId}) + return fmt.Errorf("uploading to S3: %w", err) + } + + cs.uploadCount.WithLabelValues(issuer.Subject.CommonName, "success").Inc() + cs.log.AuditInfo("CRL uploaded", map[string]any{ + "id": crlId, + "issuerCN": issuer.Subject.CommonName, + "thisUpdate": crl.ThisUpdate.Format(time.RFC3339), + "nextUpdate": crl.NextUpdate.Format(time.RFC3339), + "numEntries": len(crl.RevokedCertificateEntries), + }) + + return stream.SendAndClose(&emptypb.Empty{}) +} diff --git a/crl/storer/storer_test.go b/crl/storer/storer_test.go new file mode 100644 index 00000000000..22654b9ebcc --- /dev/null +++ b/crl/storer/storer_test.go @@ -0,0 +1,528 @@ +package storer + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "errors" + "io" + "math/big" + "net/http" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/service/s3" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/jmhodges/clock" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/letsencrypt/boulder/crl/idp" + cspb "github.com/letsencrypt/boulder/crl/storer/proto" + "github.com/letsencrypt/boulder/issuance" + blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/metrics" + "github.com/letsencrypt/boulder/test" +) + +type fakeUploadCRLServerStream struct { + grpc.ServerStream + input <-chan *cspb.UploadCRLRequest +} + +func (s *fakeUploadCRLServerStream) Recv() (*cspb.UploadCRLRequest, error) { + next, ok := <-s.input + if !ok { + return nil, io.EOF + } + return next, nil +} + +func (s *fakeUploadCRLServerStream) SendAndClose(*emptypb.Empty) error { + return nil +} + +func (s *fakeUploadCRLServerStream) Context() context.Context { + return context.Background() +} + +func setupTestUploadCRL(t *testing.T) (*crlStorer, *issuance.Issuer) { + t.Helper() + + r3, err := issuance.LoadCertificate("../../test/hierarchy/int-r3.cert.pem") + test.AssertNotError(t, err, "loading fake RSA issuer cert") + issuerE1, err := issuance.LoadIssuer( + issuance.IssuerConfig{ + Location: issuance.IssuerLoc{ + File: "../../test/hierarchy/int-e1.key.pem", + CertFile: "../../test/hierarchy/int-e1.cert.pem", + }, + IssuerURL: "http://not-example.com/issuer-url", + CRLURLBase: "http://not-example.com/crl/", + CRLShards: 1, + }, clock.NewFake()) + test.AssertNotError(t, err, "loading fake ECDSA issuer cert") + + storer, err := New( + []*issuance.Certificate{r3, issuerE1.Cert}, + nil, "le-crl.s3.us-west.amazonaws.com", + metrics.NoopRegisterer, blog.NewMock(), clock.NewFake(), + ) + test.AssertNotError(t, err, "creating test crl-storer") + + return storer, issuerE1 +} + +// Test that we get an error when no metadata is sent. +func TestUploadCRLNoMetadata(t *testing.T) { + storer, _ := setupTestUploadCRL(t) + errs := make(chan error, 1) + + ins := make(chan *cspb.UploadCRLRequest) + go func() { + errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins}) + }() + close(ins) + err := <-errs + test.AssertError(t, err, "can't upload CRL with no metadata") + test.AssertContains(t, err.Error(), "no metadata") +} + +// Test that we get an error when incomplete metadata is sent. +func TestUploadCRLIncompleteMetadata(t *testing.T) { + storer, _ := setupTestUploadCRL(t) + errs := make(chan error, 1) + + ins := make(chan *cspb.UploadCRLRequest) + go func() { + errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins}) + }() + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_Metadata{ + Metadata: &cspb.CRLMetadata{}, + }, + } + close(ins) + err := <-errs + test.AssertError(t, err, "can't upload CRL with incomplete metadata") + test.AssertContains(t, err.Error(), "incomplete metadata") +} + +// Test that we get an error when a bad issuer is sent. +func TestUploadCRLUnrecognizedIssuer(t *testing.T) { + storer, _ := setupTestUploadCRL(t) + errs := make(chan error, 1) + + ins := make(chan *cspb.UploadCRLRequest) + go func() { + errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins}) + }() + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_Metadata{ + Metadata: &cspb.CRLMetadata{ + IssuerNameID: 1, + Number: 1, + }, + }, + } + close(ins) + err := <-errs + test.AssertError(t, err, "can't upload CRL with unrecognized issuer") + test.AssertContains(t, err.Error(), "unrecognized") +} + +// Test that we get an error when two metadata are sent. +func TestUploadCRLMultipleMetadata(t *testing.T) { + storer, iss := setupTestUploadCRL(t) + errs := make(chan error, 1) + + ins := make(chan *cspb.UploadCRLRequest) + go func() { + errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins}) + }() + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_Metadata{ + Metadata: &cspb.CRLMetadata{ + IssuerNameID: int64(iss.Cert.NameID()), + Number: 1, + }, + }, + } + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_Metadata{ + Metadata: &cspb.CRLMetadata{ + IssuerNameID: int64(iss.Cert.NameID()), + Number: 1, + }, + }, + } + close(ins) + err := <-errs + test.AssertError(t, err, "can't upload CRL with multiple metadata") + test.AssertContains(t, err.Error(), "more than one") +} + +// Test that we get an error when a malformed CRL is sent. +func TestUploadCRLMalformedBytes(t *testing.T) { + storer, iss := setupTestUploadCRL(t) + errs := make(chan error, 1) + + ins := make(chan *cspb.UploadCRLRequest) + go func() { + errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins}) + }() + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_Metadata{ + Metadata: &cspb.CRLMetadata{ + IssuerNameID: int64(iss.Cert.NameID()), + Number: 1, + }, + }, + } + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_CrlChunk{ + CrlChunk: []byte("this is not a valid crl"), + }, + } + close(ins) + err := <-errs + test.AssertError(t, err, "can't upload unparsable CRL") + test.AssertContains(t, err.Error(), "parsing CRL") +} + +// Test that we get an error when an invalid CRL (signed by a throwaway +// private key but tagged as being from a "real" issuer) is sent. +func TestUploadCRLInvalidSignature(t *testing.T) { + storer, iss := setupTestUploadCRL(t) + errs := make(chan error, 1) + + ins := make(chan *cspb.UploadCRLRequest) + go func() { + errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins}) + }() + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_Metadata{ + Metadata: &cspb.CRLMetadata{ + IssuerNameID: int64(iss.Cert.NameID()), + Number: 1, + }, + }, + } + fakeSigner, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "creating throwaway signer") + crlBytes, err := x509.CreateRevocationList( + rand.Reader, + &x509.RevocationList{ + ThisUpdate: time.Now(), + NextUpdate: time.Now().Add(time.Hour), + Number: big.NewInt(1), + }, + iss.Cert.Certificate, + fakeSigner, + ) + test.AssertNotError(t, err, "creating test CRL") + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_CrlChunk{ + CrlChunk: crlBytes, + }, + } + close(ins) + err = <-errs + test.AssertError(t, err, "can't upload unverifiable CRL") + test.AssertContains(t, err.Error(), "validating signature") +} + +// Test that we get an error if the CRL Numbers mismatch. +func TestUploadCRLMismatchedNumbers(t *testing.T) { + storer, iss := setupTestUploadCRL(t) + errs := make(chan error, 1) + + ins := make(chan *cspb.UploadCRLRequest) + go func() { + errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins}) + }() + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_Metadata{ + Metadata: &cspb.CRLMetadata{ + IssuerNameID: int64(iss.Cert.NameID()), + Number: 1, + }, + }, + } + crlBytes, err := x509.CreateRevocationList( + rand.Reader, + &x509.RevocationList{ + ThisUpdate: time.Now(), + NextUpdate: time.Now().Add(time.Hour), + Number: big.NewInt(2), + }, + iss.Cert.Certificate, + iss.Signer, + ) + test.AssertNotError(t, err, "creating test CRL") + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_CrlChunk{ + CrlChunk: crlBytes, + }, + } + close(ins) + err = <-errs + test.AssertError(t, err, "can't upload CRL with mismatched number") + test.AssertContains(t, err.Error(), "mismatched") +} + +// fakeSimpleS3 implements the simpleS3 interface, provides prevBytes for +// downloads, and checks that uploads match the expectBytes. +type fakeSimpleS3 struct { + prevBytes []byte + expectBytes []byte +} + +func (p *fakeSimpleS3) PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) { + recvBytes, err := io.ReadAll(params.Body) + if err != nil { + return nil, err + } + if !bytes.Equal(p.expectBytes, recvBytes) { + return nil, errors.New("received bytes did not match expectation") + } + return &s3.PutObjectOutput{}, nil +} + +func (p *fakeSimpleS3) GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) { + if p.prevBytes != nil { + return &s3.GetObjectOutput{Body: io.NopCloser(bytes.NewReader(p.prevBytes))}, nil + } + return nil, &smithyhttp.ResponseError{Response: &smithyhttp.Response{Response: &http.Response{StatusCode: 404}}} +} + +// Test that the correct bytes get propagated to S3. +func TestUploadCRLSuccess(t *testing.T) { + storer, iss := setupTestUploadCRL(t) + errs := make(chan error, 1) + + idpExt, err := idp.MakeUserCertsExt([]string{"http://c.ex.org"}) + test.AssertNotError(t, err, "creating test IDP extension") + + ins := make(chan *cspb.UploadCRLRequest) + go func() { + errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins}) + }() + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_Metadata{ + Metadata: &cspb.CRLMetadata{ + IssuerNameID: int64(iss.Cert.NameID()), + Number: 2, + }, + }, + } + + prevCRLBytes, err := x509.CreateRevocationList( + rand.Reader, + &x509.RevocationList{ + ThisUpdate: storer.clk.Now(), + NextUpdate: storer.clk.Now().Add(time.Hour), + Number: big.NewInt(1), + RevokedCertificateEntries: []x509.RevocationListEntry{ + {SerialNumber: big.NewInt(123), RevocationTime: time.Now().Add(-time.Hour)}, + }, + ExtraExtensions: []pkix.Extension{idpExt}, + }, + iss.Cert.Certificate, + iss.Signer, + ) + test.AssertNotError(t, err, "creating test CRL") + + storer.clk.Sleep(time.Minute) + + crlBytes, err := x509.CreateRevocationList( + rand.Reader, + &x509.RevocationList{ + ThisUpdate: storer.clk.Now(), + NextUpdate: storer.clk.Now().Add(time.Hour), + Number: big.NewInt(2), + RevokedCertificateEntries: []x509.RevocationListEntry{ + {SerialNumber: big.NewInt(123), RevocationTime: time.Now().Add(-time.Hour)}, + }, + ExtraExtensions: []pkix.Extension{idpExt}, + }, + iss.Cert.Certificate, + iss.Signer, + ) + test.AssertNotError(t, err, "creating test CRL") + + storer.s3Client = &fakeSimpleS3{prevBytes: prevCRLBytes, expectBytes: crlBytes} + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_CrlChunk{ + CrlChunk: crlBytes, + }, + } + close(ins) + err = <-errs + test.AssertNotError(t, err, "uploading valid CRL should work") +} + +// Test that the correct bytes get propagated to S3 for a CRL with to predecessor. +func TestUploadNewCRLSuccess(t *testing.T) { + storer, iss := setupTestUploadCRL(t) + errs := make(chan error, 1) + + ins := make(chan *cspb.UploadCRLRequest) + go func() { + errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins}) + }() + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_Metadata{ + Metadata: &cspb.CRLMetadata{ + IssuerNameID: int64(iss.Cert.NameID()), + Number: 1, + }, + }, + } + + crlBytes, err := x509.CreateRevocationList( + rand.Reader, + &x509.RevocationList{ + ThisUpdate: time.Now(), + NextUpdate: time.Now().Add(time.Hour), + Number: big.NewInt(1), + RevokedCertificateEntries: []x509.RevocationListEntry{ + {SerialNumber: big.NewInt(123), RevocationTime: time.Now().Add(-time.Hour)}, + }, + }, + iss.Cert.Certificate, + iss.Signer, + ) + test.AssertNotError(t, err, "creating test CRL") + + storer.s3Client = &fakeSimpleS3{expectBytes: crlBytes} + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_CrlChunk{ + CrlChunk: crlBytes, + }, + } + close(ins) + err = <-errs + test.AssertNotError(t, err, "uploading valid CRL should work") +} + +// Test that we get an error when the previous CRL has a higher CRL number. +func TestUploadCRLBackwardsNumber(t *testing.T) { + storer, iss := setupTestUploadCRL(t) + errs := make(chan error, 1) + + ins := make(chan *cspb.UploadCRLRequest) + go func() { + errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins}) + }() + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_Metadata{ + Metadata: &cspb.CRLMetadata{ + IssuerNameID: int64(iss.Cert.NameID()), + Number: 1, + }, + }, + } + + prevCRLBytes, err := x509.CreateRevocationList( + rand.Reader, + &x509.RevocationList{ + ThisUpdate: storer.clk.Now(), + NextUpdate: storer.clk.Now().Add(time.Hour), + Number: big.NewInt(2), + RevokedCertificateEntries: []x509.RevocationListEntry{ + {SerialNumber: big.NewInt(123), RevocationTime: time.Now().Add(-time.Hour)}, + }, + }, + iss.Cert.Certificate, + iss.Signer, + ) + test.AssertNotError(t, err, "creating test CRL") + + storer.clk.Sleep(time.Minute) + + crlBytes, err := x509.CreateRevocationList( + rand.Reader, + &x509.RevocationList{ + ThisUpdate: storer.clk.Now(), + NextUpdate: storer.clk.Now().Add(time.Hour), + Number: big.NewInt(1), + RevokedCertificateEntries: []x509.RevocationListEntry{ + {SerialNumber: big.NewInt(123), RevocationTime: time.Now().Add(-time.Hour)}, + }, + }, + iss.Cert.Certificate, + iss.Signer, + ) + test.AssertNotError(t, err, "creating test CRL") + + storer.s3Client = &fakeSimpleS3{prevBytes: prevCRLBytes, expectBytes: crlBytes} + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_CrlChunk{ + CrlChunk: crlBytes, + }, + } + close(ins) + err = <-errs + test.AssertError(t, err, "uploading out-of-order numbers should fail") + test.AssertContains(t, err.Error(), "crlNumber not strictly increasing") +} + +// brokenSimpleS3 implements the simpleS3 interface. It returns errors for all +// uploads and downloads. +type brokenSimpleS3 struct{} + +func (p *brokenSimpleS3) PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) { + return nil, errors.New("sorry") +} + +func (p *brokenSimpleS3) GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) { + return nil, errors.New("oops") +} + +// Test that we get an error when S3 falls over. +func TestUploadCRLBrokenS3(t *testing.T) { + storer, iss := setupTestUploadCRL(t) + errs := make(chan error, 1) + + ins := make(chan *cspb.UploadCRLRequest) + go func() { + errs <- storer.UploadCRL(&fakeUploadCRLServerStream{input: ins}) + }() + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_Metadata{ + Metadata: &cspb.CRLMetadata{ + IssuerNameID: int64(iss.Cert.NameID()), + Number: 1, + }, + }, + } + crlBytes, err := x509.CreateRevocationList( + rand.Reader, + &x509.RevocationList{ + ThisUpdate: time.Now(), + NextUpdate: time.Now().Add(time.Hour), + Number: big.NewInt(1), + RevokedCertificateEntries: []x509.RevocationListEntry{ + {SerialNumber: big.NewInt(123), RevocationTime: time.Now().Add(-time.Hour)}, + }, + }, + iss.Cert.Certificate, + iss.Signer, + ) + test.AssertNotError(t, err, "creating test CRL") + storer.s3Client = &brokenSimpleS3{} + ins <- &cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_CrlChunk{ + CrlChunk: crlBytes, + }, + } + close(ins) + err = <-errs + test.AssertError(t, err, "uploading to broken S3 should fail") + test.AssertContains(t, err.Error(), "getting previous CRL") +} diff --git a/crl/updater/batch.go b/crl/updater/batch.go new file mode 100644 index 00000000000..03f1d3aec85 --- /dev/null +++ b/crl/updater/batch.go @@ -0,0 +1,73 @@ +package updater + +import ( + "context" + "errors" + "sync" + + "github.com/letsencrypt/boulder/crl" + "github.com/letsencrypt/boulder/issuance" +) + +// RunOnce causes the crlUpdater to update every shard immediately, then exit. +// It will run as many simultaneous goroutines as the configured maxParallelism. +func (cu *crlUpdater) RunOnce(ctx context.Context) error { + var wg sync.WaitGroup + atTime := cu.clk.Now() + + type workItem struct { + issuerNameID issuance.NameID + shardIdx int + } + + var anyErr bool + var once sync.Once + + shardWorker := func(in <-chan workItem) { + defer wg.Done() + + for { + select { + case <-ctx.Done(): + return + case work, ok := <-in: + if !ok { + return + } + err := cu.updateShardWithRetry(ctx, atTime, work.issuerNameID, work.shardIdx) + if err != nil { + cu.log.AuditErr("Generating CRL failed", err, map[string]any{ + "id": crl.Id(work.issuerNameID, work.shardIdx, crl.Number(atTime)), + }) + once.Do(func() { anyErr = true }) + } + } + } + } + + inputs := make(chan workItem) + + for range cu.maxParallelism { + wg.Add(1) + go shardWorker(inputs) + } + + for _, issuer := range cu.issuers { + for i := range cu.numShards { + select { + case <-ctx.Done(): + close(inputs) + wg.Wait() + return ctx.Err() + case inputs <- workItem{issuerNameID: issuer.NameID(), shardIdx: i + 1}: + } + } + } + close(inputs) + + wg.Wait() + if anyErr { + return errors.New("one or more errors encountered, see logs") + } + return ctx.Err() +} diff --git a/crl/updater/batch_test.go b/crl/updater/batch_test.go new file mode 100644 index 00000000000..e49d9863a2e --- /dev/null +++ b/crl/updater/batch_test.go @@ -0,0 +1,46 @@ +package updater + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/jmhodges/clock" + + "github.com/letsencrypt/boulder/issuance" + blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/metrics" + "github.com/letsencrypt/boulder/test" +) + +func TestRunOnce(t *testing.T) { + e1, err := issuance.LoadCertificate("../../test/hierarchy/int-e1.cert.pem") + test.AssertNotError(t, err, "loading test issuer") + r3, err := issuance.LoadCertificate("../../test/hierarchy/int-r3.cert.pem") + test.AssertNotError(t, err, "loading test issuer") + + mockLog := blog.NewMock() + clk := clock.NewFake() + clk.Set(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)) + cu, err := NewUpdater( + []*issuance.Certificate{e1, r3}, + 2, 18*time.Hour, 24*time.Hour, + 6*time.Hour, time.Minute, 1, 1, + "stale-if-error=60", + 5*time.Minute, + &fakeSAC{revokedCerts: revokedCertsStream{err: errors.New("db no worky")}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)}, + &fakeCA{gcc: generateCRLStream{}}, + &fakeStorer{uploaderStream: &noopUploader{}}, + metrics.NoopRegisterer, mockLog, clk, + ) + test.AssertNotError(t, err, "building test crlUpdater") + + // An error that affects all issuers should have every issuer reflected in the + // combined error message. + err = cu.RunOnce(context.Background()) + test.AssertError(t, err, "database error") + test.AssertContains(t, err.Error(), "one or more errors") + test.AssertEquals(t, len(mockLog.GetAllMatching("Generating CRL failed")), 4) + cu.tickHistogram.Reset() +} diff --git a/crl/updater/continuous.go b/crl/updater/continuous.go new file mode 100644 index 00000000000..6415ad6c9a8 --- /dev/null +++ b/crl/updater/continuous.go @@ -0,0 +1,74 @@ +package updater + +import ( + "context" + "math/rand/v2" + "sync" + "time" + + "github.com/letsencrypt/boulder/crl" + "github.com/letsencrypt/boulder/issuance" +) + +// Run causes the crlUpdater to enter its processing loop. It starts one +// goroutine for every shard it intends to update, each of which will wake at +// the appropriate interval. +func (cu *crlUpdater) Run(ctx context.Context) error { + var wg sync.WaitGroup + + shardWorker := func(issuerNameID issuance.NameID, shardIdx int) { + defer wg.Done() + + // Wait for a random number of nanoseconds less than the updatePeriod, so + // that process restarts do not skip or delay shards deterministically. + waitTimer := time.NewTimer(time.Duration(rand.Int64N(cu.updatePeriod.Nanoseconds()))) + defer waitTimer.Stop() + select { + case <-waitTimer.C: + // Continue to ticker loop + case <-ctx.Done(): + return + } + + // Do work, then sleep for updatePeriod. Rinse, and repeat. + ticker := time.NewTicker(cu.updatePeriod) + defer ticker.Stop() + for { + // Check for context cancellation before we do any real work, in case we + // overran the last tick and both cases were selectable at the same time. + if ctx.Err() != nil { + return + } + + atTime := cu.clk.Now() + err := cu.updateShardWithRetry(ctx, atTime, issuerNameID, shardIdx) + if err != nil { + // We only log, rather than return, so that the long-lived process can + // continue and try again at the next tick. + cu.log.AuditErr("Generating CRL failed", err, map[string]any{ + "id": crl.Id(issuerNameID, shardIdx, crl.Number(atTime)), + }) + } + + select { + case <-ticker.C: + continue + case <-ctx.Done(): + return + } + } + } + + // Start one shard worker per shard this updater is responsible for. + for _, issuer := range cu.issuers { + for i := 1; i <= cu.numShards; i++ { + wg.Add(1) + go shardWorker(issuer.NameID(), i) + } + } + + // Wait for all of the shard workers to exit, which will happen when their + // contexts are cancelled, probably by a SIGTERM. + wg.Wait() + return ctx.Err() +} diff --git a/crl/updater/updater.go b/crl/updater/updater.go new file mode 100644 index 00000000000..796590ddfea --- /dev/null +++ b/crl/updater/updater.go @@ -0,0 +1,329 @@ +package updater + +import ( + "context" + "crypto/sha256" + "fmt" + "io" + "time" + + "github.com/jmhodges/clock" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "google.golang.org/protobuf/types/known/timestamppb" + + capb "github.com/letsencrypt/boulder/ca/proto" + "github.com/letsencrypt/boulder/core" + "github.com/letsencrypt/boulder/core/proto" + "github.com/letsencrypt/boulder/crl" + cspb "github.com/letsencrypt/boulder/crl/storer/proto" + "github.com/letsencrypt/boulder/issuance" + blog "github.com/letsencrypt/boulder/log" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +type crlUpdater struct { + issuers map[issuance.NameID]*issuance.Certificate + numShards int + shardWidth time.Duration + lookbackPeriod time.Duration + updatePeriod time.Duration + updateTimeout time.Duration + maxParallelism int + maxAttempts int + + cacheControl string + expiresMargin time.Duration + + sa sapb.StorageAuthorityClient + ca capb.CRLGeneratorClient + cs cspb.CRLStorerClient + + tickHistogram *prometheus.HistogramVec + updatedCounter *prometheus.CounterVec + + log blog.Logger + clk clock.Clock +} + +func NewUpdater( + issuers []*issuance.Certificate, + numShards int, + shardWidth time.Duration, + lookbackPeriod time.Duration, + updatePeriod time.Duration, + updateTimeout time.Duration, + maxParallelism int, + maxAttempts int, + cacheControl string, + expiresMargin time.Duration, + sa sapb.StorageAuthorityClient, + ca capb.CRLGeneratorClient, + cs cspb.CRLStorerClient, + stats prometheus.Registerer, + log blog.Logger, + clk clock.Clock, +) (*crlUpdater, error) { + issuersByNameID := make(map[issuance.NameID]*issuance.Certificate, len(issuers)) + for _, issuer := range issuers { + issuersByNameID[issuer.NameID()] = issuer + } + + if numShards < 1 { + return nil, fmt.Errorf("must have positive number of shards, got: %d", numShards) + } + + if updatePeriod >= 24*time.Hour { + return nil, fmt.Errorf("must update CRLs at least every 24 hours, got: %s", updatePeriod) + } + + if updateTimeout >= updatePeriod { + return nil, fmt.Errorf("update timeout must be less than period: %s !< %s", updateTimeout, updatePeriod) + } + + if lookbackPeriod < 2*updatePeriod { + return nil, fmt.Errorf("lookbackPeriod must be at least 2x updatePeriod: %s !< 2 * %s", lookbackPeriod, updatePeriod) + } + + if maxParallelism <= 0 { + maxParallelism = 1 + } + + if maxAttempts <= 0 { + maxAttempts = 1 + } + + tickHistogram := promauto.With(stats).NewHistogramVec(prometheus.HistogramOpts{ + Name: "crl_updater_ticks", + Help: "A histogram of crl-updater tick latencies labeled by issuer and result", + Buckets: []float64{0.01, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000}, + }, []string{"issuer", "result"}) + + updatedCounter := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ + Name: "crl_updater_generated", + Help: "A counter of CRL generation calls labeled by result", + }, []string{"issuer", "result"}) + + return &crlUpdater{ + issuersByNameID, + numShards, + shardWidth, + lookbackPeriod, + updatePeriod, + updateTimeout, + maxParallelism, + maxAttempts, + cacheControl, + expiresMargin, + sa, + ca, + cs, + tickHistogram, + updatedCounter, + log, + clk, + }, nil +} + +// updateShardWithRetry calls updateShard repeatedly (with exponential backoff +// between attempts) until it succeeds or the max number of attempts is reached. +func (cu *crlUpdater) updateShardWithRetry(ctx context.Context, atTime time.Time, issuerNameID issuance.NameID, shardIdx int) error { + deadline := cu.clk.Now().Add(cu.updateTimeout) + ctx, cancel := context.WithDeadline(ctx, deadline) + defer cancel() + + _, err := cu.sa.LeaseCRLShard(ctx, &sapb.LeaseCRLShardRequest{ + IssuerNameID: int64(issuerNameID), + MinShardIdx: int64(shardIdx), + MaxShardIdx: int64(shardIdx), + Until: timestamppb.New(deadline.Add(time.Minute)), + }) + if err != nil { + return fmt.Errorf("leasing shard: %w", err) + } + + crlID := crl.Id(issuerNameID, shardIdx, crl.Number(atTime)) + + for i := range cu.maxAttempts { + // core.RetryBackoff always returns 0 when its first argument is zero. + sleepTime := core.RetryBackoff(i, time.Second, time.Minute, 2) + if i != 0 { + cu.log.AuditErr("Generating CRL failed", err, map[string]any{ + "id": crlID, + "retryAfter": int(sleepTime.Seconds()), + }) + } + cu.clk.Sleep(sleepTime) + + err = cu.updateShard(ctx, atTime, issuerNameID, shardIdx) + if err == nil { + break + } + } + if err != nil { + return err + } + + // Notify the database that that we're done. + _, err = cu.sa.UpdateCRLShard(ctx, &sapb.UpdateCRLShardRequest{ + IssuerNameID: int64(issuerNameID), + ShardIdx: int64(shardIdx), + ThisUpdate: timestamppb.New(atTime), + }) + if err != nil { + return fmt.Errorf("updating db metadata: %w", err) + } + + return nil +} + +// updateShard processes a single shard. It computes the shard's boundaries, gets +// the list of revoked certs in that shard from the SA, gets the CA to sign the +// resulting CRL, and gets the crl-storer to upload it. It returns an error if +// any of these operations fail. +func (cu *crlUpdater) updateShard(ctx context.Context, atTime time.Time, issuerNameID issuance.NameID, shardIdx int) (err error) { + if shardIdx <= 0 { + return fmt.Errorf("invalid shard %d", shardIdx) + } + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + crlID := crl.Id(issuerNameID, shardIdx, crl.Number(atTime)) + + start := cu.clk.Now() + defer func() { + // This func closes over the named return value `err`, so can reference it. + result := "success" + if err != nil { + result = "failed" + } + cu.tickHistogram.WithLabelValues(cu.issuers[issuerNameID].Subject.CommonName, result).Observe(cu.clk.Since(start).Seconds()) + cu.updatedCounter.WithLabelValues(cu.issuers[issuerNameID].Subject.CommonName, result).Inc() + }() + + cu.log.Infof("Generating CRL shard: id=[%s]", crlID) + + // Query for unexpired certificates, with padding to ensure that revoked certificates show + // up in at least one CRL, even if they expire between revocation and CRL generation. + expiresAfter := cu.clk.Now().Add(-cu.lookbackPeriod) + + saStream, err := cu.sa.GetRevokedCertsByShard(ctx, &sapb.GetRevokedCertsByShardRequest{ + IssuerNameID: int64(issuerNameID), + ShardIdx: int64(shardIdx), + ExpiresAfter: timestamppb.New(expiresAfter), + RevokedBefore: timestamppb.New(atTime), + }) + if err != nil { + return fmt.Errorf("GetRevokedCertsByShard: %w", err) + } + + var crlEntries []*proto.CRLEntry + for { + entry, err := saStream.Recv() + if err != nil { + if err == io.EOF { + break + } + return fmt.Errorf("retrieving entry from SA: %w", err) + } + crlEntries = append(crlEntries, entry) + } + + cu.log.Infof("Queried SA for CRL shard: id=[%s] shardIdx=[%d] numEntries=[%d]", crlID, shardIdx, len(crlEntries)) + + // Send the full list of CRL Entries to the CA. + caStream, err := cu.ca.GenerateCRL(ctx) + if err != nil { + return fmt.Errorf("connecting to CA: %w", err) + } + + err = caStream.Send(&capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Metadata{ + Metadata: &capb.CRLMetadata{ + IssuerNameID: int64(issuerNameID), + ThisUpdate: timestamppb.New(atTime), + ShardIdx: int64(shardIdx), + }, + }, + }) + if err != nil { + return fmt.Errorf("sending CA metadata: %w", err) + } + + for _, entry := range crlEntries { + err = caStream.Send(&capb.GenerateCRLRequest{ + Payload: &capb.GenerateCRLRequest_Entry{ + Entry: entry, + }, + }) + if err != nil { + return fmt.Errorf("sending entry to CA: %w", err) + } + } + + err = caStream.CloseSend() + if err != nil { + return fmt.Errorf("closing CA request stream: %w", err) + } + + // Receive the full bytes of the signed CRL from the CA. + crlLen := 0 + crlHash := sha256.New() + var crlChunks [][]byte + for { + out, err := caStream.Recv() + if err != nil { + if err == io.EOF { + break + } + return fmt.Errorf("receiving CRL bytes: %w", err) + } + + crlLen += len(out.Chunk) + crlHash.Write(out.Chunk) + crlChunks = append(crlChunks, out.Chunk) + } + + // Send the full bytes of the signed CRL to the Storer. + csStream, err := cu.cs.UploadCRL(ctx) + if err != nil { + return fmt.Errorf("connecting to CRLStorer: %w", err) + } + + err = csStream.Send(&cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_Metadata{ + Metadata: &cspb.CRLMetadata{ + IssuerNameID: int64(issuerNameID), + Number: atTime.UnixNano(), + ShardIdx: int64(shardIdx), + CacheControl: cu.cacheControl, + Expires: timestamppb.New(atTime.Add(cu.updatePeriod).Add(cu.expiresMargin)), + }, + }, + }) + if err != nil { + return fmt.Errorf("sending CRLStorer metadata: %w", err) + } + + for _, chunk := range crlChunks { + err = csStream.Send(&cspb.UploadCRLRequest{ + Payload: &cspb.UploadCRLRequest_CrlChunk{ + CrlChunk: chunk, + }, + }) + if err != nil { + return fmt.Errorf("uploading CRL bytes: %w", err) + } + } + + _, err = csStream.CloseAndRecv() + if err != nil { + return fmt.Errorf("closing CRLStorer upload stream: %w", err) + } + + cu.log.Infof( + "Generated CRL shard: id=[%s] size=[%d] hash=[%x]", + crlID, crlLen, crlHash.Sum(nil)) + + return nil +} diff --git a/crl/updater/updater_test.go b/crl/updater/updater_test.go new file mode 100644 index 00000000000..86c73c2ca94 --- /dev/null +++ b/crl/updater/updater_test.go @@ -0,0 +1,417 @@ +package updater + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/jmhodges/clock" + "github.com/prometheus/client_golang/prometheus" + + capb "github.com/letsencrypt/boulder/ca/proto" + corepb "github.com/letsencrypt/boulder/core/proto" + cspb "github.com/letsencrypt/boulder/crl/storer/proto" + "github.com/letsencrypt/boulder/issuance" + blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/metrics" + "github.com/letsencrypt/boulder/revocation" + sapb "github.com/letsencrypt/boulder/sa/proto" + "github.com/letsencrypt/boulder/test" +) + +// revokedCertsStream is a fake grpc.ClientStreamingClient which can be +// populated with some CRL entries or an error for use as the return value of +// a faked GetRevokedCertsByShard call. +type revokedCertsStream struct { + grpc.ClientStream + entries []*corepb.CRLEntry + nextIdx int + err error +} + +func (f *revokedCertsStream) Recv() (*corepb.CRLEntry, error) { + if f.err != nil { + return nil, f.err + } + if f.nextIdx < len(f.entries) { + res := f.entries[f.nextIdx] + f.nextIdx++ + return res, nil + } + return nil, io.EOF +} + +// fakeSAC is a fake sapb.StorageAuthorityClient which can be populated with a +// fakeGRCC to be used as the return value for calls to GetRevokedCertsByShard, +// and a fake timestamp to serve as the database's maximum notAfter value. +type fakeSAC struct { + sapb.StorageAuthorityClient + revokedCerts revokedCertsStream + maxNotAfter time.Time + leaseError error +} + +// Return the configured stream. +func (f *fakeSAC) GetRevokedCertsByShard(ctx context.Context, req *sapb.GetRevokedCertsByShardRequest, _ ...grpc.CallOption) (grpc.ServerStreamingClient[corepb.CRLEntry], error) { + return &f.revokedCerts, nil +} + +func (f *fakeSAC) LeaseCRLShard(_ context.Context, req *sapb.LeaseCRLShardRequest, _ ...grpc.CallOption) (*sapb.LeaseCRLShardResponse, error) { + if f.leaseError != nil { + return nil, f.leaseError + } + return &sapb.LeaseCRLShardResponse{IssuerNameID: req.IssuerNameID, ShardIdx: req.MinShardIdx}, nil +} + +// generateCRLStream implements the streaming API returned from GenerateCRL. +// +// Specifically it implements grpc.BidiStreamingClient. +// +// If it has non-nil error fields, it returns those on Send() or Recv(). +// +// When it receives a CRL entry (on Send()), it records that entry internally, JSON serialized, +// with a newline between JSON objects. +// +// When it is asked for bytes of a signed CRL (Recv()), it sends those JSON serialized contents. +// +// We use JSON instead of CRL format because we're not testing the signing and formatting done +// by the CA, just the plumbing of different components together done by the crl-updater. +type generateCRLStream struct { + grpc.ClientStream + chunks [][]byte + nextIdx int + sendErr error + recvErr error +} + +type crlEntry struct { + Serial string + Reason int32 + RevokedAt time.Time +} + +func (f *generateCRLStream) Send(req *capb.GenerateCRLRequest) error { + if f.sendErr != nil { + return f.sendErr + } + if t, ok := req.Payload.(*capb.GenerateCRLRequest_Entry); ok { + jsonBytes, err := json.Marshal(crlEntry{ + Serial: t.Entry.Serial, + Reason: t.Entry.Reason, + RevokedAt: t.Entry.RevokedAt.AsTime(), + }) + if err != nil { + return err + } + f.chunks = append(f.chunks, jsonBytes) + f.chunks = append(f.chunks, []byte("\n")) + } + return f.sendErr +} + +func (f *generateCRLStream) CloseSend() error { + return nil +} + +func (f *generateCRLStream) Recv() (*capb.GenerateCRLResponse, error) { + if f.recvErr != nil { + return nil, f.recvErr + } + if f.nextIdx < len(f.chunks) { + res := f.chunks[f.nextIdx] + f.nextIdx++ + return &capb.GenerateCRLResponse{Chunk: res}, nil + } + return nil, io.EOF +} + +// fakeCA acts as a fake CA (specifically implementing capb.CRLGeneratorClient). +// +// It always returns its field in response to `GenerateCRL`. Because this is a streaming +// RPC, that return value is responsible for most of the work. +type fakeCA struct { + gcc generateCRLStream +} + +func (f *fakeCA) GenerateCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[capb.GenerateCRLRequest, capb.GenerateCRLResponse], error) { + return &f.gcc, nil +} + +// recordingUploader acts as the streaming part of UploadCRL. +// +// Records all uploaded chunks in crlBody. +type recordingUploader struct { + grpc.ClientStream + + crlBody []byte +} + +func (r *recordingUploader) Send(req *cspb.UploadCRLRequest) error { + if t, ok := req.Payload.(*cspb.UploadCRLRequest_CrlChunk); ok { + r.crlBody = append(r.crlBody, t.CrlChunk...) + } + return nil +} + +func (r *recordingUploader) CloseAndRecv() (*emptypb.Empty, error) { + return &emptypb.Empty{}, nil +} + +// noopUploader is a fake grpc.ClientStreamingClient which can be populated with +// an error for use as the return value of a faked UploadCRL call. +// +// It does nothing with uploaded contents. +type noopUploader struct { + grpc.ClientStream + sendErr error + recvErr error +} + +func (f *noopUploader) Send(*cspb.UploadCRLRequest) error { + return f.sendErr +} + +func (f *noopUploader) CloseAndRecv() (*emptypb.Empty, error) { + if f.recvErr != nil { + return nil, f.recvErr + } + return &emptypb.Empty{}, nil +} + +// fakeStorer is a fake cspb.CRLStorerClient which can be populated with an +// uploader stream for use as the return value for calls to UploadCRL. +type fakeStorer struct { + uploaderStream grpc.ClientStreamingClient[cspb.UploadCRLRequest, emptypb.Empty] +} + +func (f *fakeStorer) UploadCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.ClientStreamingClient[cspb.UploadCRLRequest, emptypb.Empty], error) { + return f.uploaderStream, nil +} + +func TestUpdateShard(t *testing.T) { + e1, err := issuance.LoadCertificate("../../test/hierarchy/int-e1.cert.pem") + test.AssertNotError(t, err, "loading test issuer") + r3, err := issuance.LoadCertificate("../../test/hierarchy/int-r3.cert.pem") + test.AssertNotError(t, err, "loading test issuer") + + sentinelErr := errors.New("oops") + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + clk := clock.NewFake() + clk.Set(time.Date(2020, time.January, 18, 0, 0, 0, 0, time.UTC)) + cu, err := NewUpdater( + []*issuance.Certificate{e1, r3}, + 2, + 18*time.Hour, // shardWidth + 24*time.Hour, // lookbackPeriod + 6*time.Hour, // updatePeriod + time.Minute, // updateTimeout + 1, 1, + "stale-if-error=60", + 5*time.Minute, + &fakeSAC{ + revokedCerts: revokedCertsStream{}, + maxNotAfter: clk.Now().Add(90 * 24 * time.Hour), + }, + &fakeCA{gcc: generateCRLStream{}}, + &fakeStorer{uploaderStream: &noopUploader{}}, + metrics.NoopRegisterer, blog.NewMock(), clk, + ) + test.AssertNotError(t, err, "building test crlUpdater") + + // Ensure that getting no results from the SA still works. + err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1) + test.AssertNotError(t, err, "empty CRL") + test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{ + "issuer": "(TEST) Elegant Elephant E1", "result": "success", + }, 1) + + // Make a CRL with actual contents. Verify that the information makes it through + // each of the steps: + // - read from SA + // - write to CA and read the response + // - upload with CRL storer + // + // The final response should show up in the bytes recorded by our fake storer. + recordingUploader := &recordingUploader{} + now := timestamppb.Now() + cu.cs = &fakeStorer{uploaderStream: recordingUploader} + cu.sa = &fakeSAC{ + revokedCerts: revokedCertsStream{ + entries: []*corepb.CRLEntry{ + { + Serial: "0311b5d430823cfa25b0fc85d14c54ee35", + Reason: int32(revocation.KeyCompromise), + RevokedAt: now, + }, + { + Serial: "037d6a05a0f6a975380456ae605cee9889", + Reason: int32(revocation.AffiliationChanged), + RevokedAt: now, + }, + { + Serial: "03aa617ab8ee58896ba082bfa25199c884", + Reason: int32(revocation.Unspecified), + RevokedAt: now, + }, + }, + }, + maxNotAfter: clk.Now().Add(90 * 24 * time.Hour), + } + // We ask for shard 2 specifically because GetRevokedCertsByShard only returns our + // certificate for that shard. + err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 2) + test.AssertNotError(t, err, "updateShard") + + expectedEntries := map[string]int32{ + "0311b5d430823cfa25b0fc85d14c54ee35": int32(revocation.KeyCompromise), + "037d6a05a0f6a975380456ae605cee9889": int32(revocation.AffiliationChanged), + "03aa617ab8ee58896ba082bfa25199c884": int32(revocation.Unspecified), + } + for r := range bytes.SplitSeq(recordingUploader.crlBody, []byte("\n")) { + if len(r) == 0 { + continue + } + var entry crlEntry + err := json.Unmarshal(r, &entry) + if err != nil { + t.Fatalf("unmarshaling JSON: %s", err) + } + expectedReason, ok := expectedEntries[entry.Serial] + if !ok { + t.Errorf("CRL entry for %s was unexpected", entry.Serial) + } + if entry.Reason != expectedReason { + t.Errorf("CRL entry for %s had reason=%d, want %d", entry.Serial, entry.Reason, expectedReason) + } + delete(expectedEntries, entry.Serial) + } + // At this point the expectedEntries map should be empty; if it's not, emit an error + // for each remaining expectation. + for k, v := range expectedEntries { + t.Errorf("expected cert %s to be revoked for reason=%d, but it was not on the CRL", k, v) + } + + cu.updatedCounter.Reset() + + // Ensure that getting no results from the SA still works. + err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1) + test.AssertNotError(t, err, "empty CRL") + test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{ + "issuer": "(TEST) Elegant Elephant E1", "result": "success", + }, 1) + cu.updatedCounter.Reset() + + // Errors closing the Storer upload stream should bubble up. + cu.cs = &fakeStorer{uploaderStream: &noopUploader{recvErr: sentinelErr}} + err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1) + test.AssertError(t, err, "storer error") + test.AssertContains(t, err.Error(), "closing CRLStorer upload stream") + test.AssertErrorIs(t, err, sentinelErr) + test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{ + "issuer": "(TEST) Elegant Elephant E1", "result": "failed", + }, 1) + cu.updatedCounter.Reset() + + // Errors sending to the Storer should bubble up sooner. + cu.cs = &fakeStorer{uploaderStream: &noopUploader{sendErr: sentinelErr}} + err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1) + test.AssertError(t, err, "storer error") + test.AssertContains(t, err.Error(), "sending CRLStorer metadata") + test.AssertErrorIs(t, err, sentinelErr) + test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{ + "issuer": "(TEST) Elegant Elephant E1", "result": "failed", + }, 1) + cu.updatedCounter.Reset() + + // Errors reading from the CA should bubble up sooner. + cu.ca = &fakeCA{gcc: generateCRLStream{recvErr: sentinelErr}} + err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1) + test.AssertError(t, err, "CA error") + test.AssertContains(t, err.Error(), "receiving CRL bytes") + test.AssertErrorIs(t, err, sentinelErr) + test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{ + "issuer": "(TEST) Elegant Elephant E1", "result": "failed", + }, 1) + cu.updatedCounter.Reset() + + // Errors sending to the CA should bubble up sooner. + cu.ca = &fakeCA{gcc: generateCRLStream{sendErr: sentinelErr}} + err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1) + test.AssertError(t, err, "CA error") + test.AssertContains(t, err.Error(), "sending CA metadata") + test.AssertErrorIs(t, err, sentinelErr) + test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{ + "issuer": "(TEST) Elegant Elephant E1", "result": "failed", + }, 1) + cu.updatedCounter.Reset() + + // Errors reading from the SA should bubble up soonest. + cu.sa = &fakeSAC{revokedCerts: revokedCertsStream{err: sentinelErr}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)} + err = cu.updateShard(ctx, cu.clk.Now(), e1.NameID(), 1) + test.AssertError(t, err, "database error") + test.AssertContains(t, err.Error(), "retrieving entry from SA") + test.AssertErrorIs(t, err, sentinelErr) + test.AssertMetricWithLabelsEquals(t, cu.updatedCounter, prometheus.Labels{ + "issuer": "(TEST) Elegant Elephant E1", "result": "failed", + }, 1) + cu.updatedCounter.Reset() +} + +func TestUpdateShardWithRetry(t *testing.T) { + e1, err := issuance.LoadCertificate("../../test/hierarchy/int-e1.cert.pem") + test.AssertNotError(t, err, "loading test issuer") + r3, err := issuance.LoadCertificate("../../test/hierarchy/int-r3.cert.pem") + test.AssertNotError(t, err, "loading test issuer") + + sentinelErr := errors.New("oops") + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + clk := clock.NewFake() + clk.Set(time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)) + + // Build an updater that will always fail when it talks to the SA. + cu, err := NewUpdater( + []*issuance.Certificate{e1, r3}, + 2, 18*time.Hour, 24*time.Hour, + 6*time.Hour, time.Minute, 1, 1, + "stale-if-error=60", + 5*time.Minute, + &fakeSAC{revokedCerts: revokedCertsStream{err: sentinelErr}, maxNotAfter: clk.Now().Add(90 * 24 * time.Hour)}, + &fakeCA{gcc: generateCRLStream{}}, + &fakeStorer{uploaderStream: &noopUploader{}}, + metrics.NoopRegisterer, blog.NewMock(), clk, + ) + test.AssertNotError(t, err, "building test crlUpdater") + + // Ensure that having MaxAttempts set to 1 results in the clock not moving + // forward at all. + startTime := cu.clk.Now() + err = cu.updateShardWithRetry(ctx, cu.clk.Now(), e1.NameID(), 1) + test.AssertError(t, err, "database error") + test.AssertErrorIs(t, err, sentinelErr) + test.AssertEquals(t, cu.clk.Now(), startTime) + + // Ensure that having MaxAttempts set to 5 results in the clock moving forward + // by 1+2+4+8=15 seconds. The core.RetryBackoff system has 20% jitter built + // in, so we have to be approximate. + cu.maxAttempts = 5 + startTime = cu.clk.Now() + err = cu.updateShardWithRetry(ctx, cu.clk.Now(), e1.NameID(), 1) + test.AssertError(t, err, "database error") + test.AssertErrorIs(t, err, sentinelErr) + t.Logf("start: %v", startTime) + t.Logf("now: %v", cu.clk.Now()) + test.Assert(t, startTime.Add(15*0.8*time.Second).Before(cu.clk.Now()), "retries didn't sleep enough") + test.Assert(t, startTime.Add(15*1.2*time.Second).After(cu.clk.Now()), "retries slept too much") +} diff --git a/csr/csr.go b/csr/csr.go index a960d17abb0..34b84c39b3b 100644 --- a/csr/csr.go +++ b/csr/csr.go @@ -5,6 +5,7 @@ import ( "crypto" "crypto/x509" "errors" + "net/netip" "strings" "github.com/letsencrypt/boulder/core" @@ -20,11 +21,7 @@ const maxCNLength = 64 // strong enough to use. Significantly the missing algorithms are: // * No algorithms using MD2, MD5, or SHA-1 // * No DSA algorithms -// -// SHA1WithRSA is allowed because there's still a fair bit of it -// out there, but we should try to remove it soon. var goodSignatureAlgorithms = map[x509.SignatureAlgorithm]bool{ - x509.SHA1WithRSA: true, // TODO(#2988): Remove support x509.SHA256WithRSA: true, x509.SHA384WithRSA: true, x509.SHA512WithRSA: true, @@ -34,20 +31,19 @@ var goodSignatureAlgorithms = map[x509.SignatureAlgorithm]bool{ } var ( - invalidPubKey = berrors.BadCSRError("invalid public key in CSR") - unsupportedSigAlg = berrors.BadCSRError("signature algorithm not supported") - invalidSig = berrors.BadCSRError("invalid signature on CSR") - invalidEmailPresent = berrors.BadCSRError("CSR contains one or more email address fields") - invalidIPPresent = berrors.BadCSRError("CSR contains one or more IP address fields") - invalidNoDNS = berrors.BadCSRError("at least one DNS name is required") - invalidAllSANTooLong = berrors.BadCSRError("CSR doesn't contain a SAN short enough to fit in CN") + invalidPubKey = berrors.BadCSRError("invalid public key in CSR") + unsupportedSigAlg = berrors.BadCSRError("signature algorithm not supported") + invalidSig = berrors.BadCSRError("invalid signature on CSR") + invalidEmailPresent = berrors.BadCSRError("CSR contains one or more email address fields") + invalidURIPresent = berrors.BadCSRError("CSR contains one or more URI fields") + invalidNoIdent = berrors.BadCSRError("at least one identifier is required") + invalidIPCN = berrors.BadCSRError("CSR contains IP address in Common Name") ) -// VerifyCSR checks the validity of a x509.CertificateRequest. Before doing checks it normalizes -// the CSR which lowers the case of DNS names and subject CN, and hoist a DNS name into the CN -// if it is empty. +// VerifyCSR checks the validity of a x509.CertificateRequest. It uses +// identifier.FromCSR to normalize the DNS names before checking whether we'll +// issue for them. func VerifyCSR(ctx context.Context, csr *x509.CertificateRequest, maxNames int, keyPolicy *goodkey.KeyPolicy, pa core.PolicyAuthority) error { - normalizeCSR(csr) key, ok := csr.PublicKey.(crypto.PublicKey) if !ok { return invalidPubKey @@ -62,6 +58,7 @@ func VerifyCSR(ctx context.Context, csr *x509.CertificateRequest, maxNames int, if !goodSignatureAlgorithms[csr.SignatureAlgorithm] { return unsupportedSigAlg } + err = csr.CheckSignature() if err != nil { return invalidSig @@ -69,48 +66,64 @@ func VerifyCSR(ctx context.Context, csr *x509.CertificateRequest, maxNames int, if len(csr.EmailAddresses) > 0 { return invalidEmailPresent } - if len(csr.IPAddresses) > 0 { - return invalidIPPresent - } - if len(csr.DNSNames) == 0 && csr.Subject.CommonName == "" { - return invalidNoDNS + if len(csr.URIs) > 0 { + return invalidURIPresent } - if csr.Subject.CommonName == "" { - return invalidAllSANTooLong - } - if len(csr.Subject.CommonName) > maxCNLength { - return berrors.BadCSRError("CN was longer than %d bytes", maxCNLength) + + // Reject all CSRs which have an IP address in the CN. We want to get rid of + // CNs entirely anyway, and IP addresses are a new feature, so don't let + // clients get in the habit of including them in the CN. We don't use + // CNFromCSR here because that also filters out IP address CNs, for defense + // in depth. + _, err = netip.ParseAddr(csr.Subject.CommonName) + if err == nil { // Inverted! Successful parsing is a bad thing in this case. + return invalidIPCN } - if len(csr.DNSNames) > maxNames { - return berrors.BadCSRError("CSR contains more than %d DNS names", maxNames) + + // FromCSR also performs normalization, returning values that may not match + // the literal CSR contents. + idents := identifier.FromCSR(csr) + if len(idents) == 0 { + return invalidNoIdent } - idents := make([]identifier.ACMEIdentifier, len(csr.DNSNames)) - for i, dnsName := range csr.DNSNames { - idents[i] = identifier.DNSIdentifier(dnsName) + if len(idents) > maxNames { + return berrors.BadCSRError("CSR contains more than %d identifiers", maxNames) } - err = pa.WillingToIssueWildcards(idents) + + err = pa.WillingToIssue(idents) if err != nil { return err } return nil } -// normalizeCSR deduplicates and lowers the case of dNSNames and the subject CN. -// It will also hoist a dNSName into the CN if it is empty. -func normalizeCSR(csr *x509.CertificateRequest) { - if csr.Subject.CommonName == "" { - var forcedCN string - // Promote the first SAN that is less than maxCNLength (if any) - for _, name := range csr.DNSNames { - if len(name) <= maxCNLength { - forcedCN = name - break - } +// CNFromCSR returns the lower-cased Subject Common Name from the CSR, if a +// short enough CN was provided. If it was too long or appears to be an IP, +// there will be no CN. If none was provided, the CN will be the first SAN that +// is short enough, which is done only for backwards compatibility with prior +// Let's Encrypt behaviour. +func CNFromCSR(csr *x509.CertificateRequest) string { + if len(csr.Subject.CommonName) > maxCNLength { + return "" + } + + if csr.Subject.CommonName != "" { + _, err := netip.ParseAddr(csr.Subject.CommonName) + if err == nil { // Inverted! Successful parsing is a bad thing in this case. + return "" } - csr.Subject.CommonName = forcedCN - } else if csr.Subject.CommonName != "" { - csr.DNSNames = append(csr.DNSNames, csr.Subject.CommonName) + + return strings.ToLower(csr.Subject.CommonName) } - csr.Subject.CommonName = strings.ToLower(csr.Subject.CommonName) - csr.DNSNames = core.UniqueLowerNames(csr.DNSNames) + + // If there's no CN already, but we want to set one, promote the first dnsName + // SAN which is shorter than the maximum acceptable CN length (if any). We + // will never promote an ipAddress SAN to the CN. + for _, name := range csr.DNSNames { + if len(name) <= maxCNLength { + return strings.ToLower(name) + } + } + + return "" } diff --git a/csr/csr_test.go b/csr/csr_test.go index d174cad4173..d3d3d1bc44f 100644 --- a/csr/csr_test.go +++ b/csr/csr_test.go @@ -6,35 +6,29 @@ import ( "crypto/rsa" "crypto/x509" "crypto/x509/pkix" + "encoding/asn1" "errors" "net" + "net/netip" + "net/url" "strings" "testing" "github.com/letsencrypt/boulder/core" berrors "github.com/letsencrypt/boulder/errors" + "github.com/letsencrypt/boulder/features" "github.com/letsencrypt/boulder/goodkey" "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/test" ) -var testingPolicy = &goodkey.KeyPolicy{ - AllowRSA: true, - AllowECDSANISTP256: true, - AllowECDSANISTP384: true, -} - type mockPA struct{} -func (pa *mockPA) ChallengesFor(identifier identifier.ACMEIdentifier) (challenges []core.Challenge, err error) { - return -} - -func (pa *mockPA) WillingToIssue(id identifier.ACMEIdentifier) error { - return nil +func (pa *mockPA) ChallengeTypesFor(ident identifier.ACMEIdentifier) ([]core.AcmeChallenge, error) { + return []core.AcmeChallenge{}, nil } -func (pa *mockPA) WillingToIssueWildcards(idents []identifier.ACMEIdentifier) error { +func (pa *mockPA) WillingToIssue(idents identifier.ACMEIdentifiers) error { for _, ident := range idents { if ident.Value == "bad-name.com" || ident.Value == "other-bad-name.com" { return errors.New("policy forbids issuing for identifier") @@ -47,6 +41,10 @@ func (pa *mockPA) ChallengeTypeEnabled(t core.AcmeChallenge) bool { return true } +func (pa *mockPA) CheckAuthzChallenges(a *core.Authorization) error { + return nil +} + func TestVerifyCSR(t *testing.T) { private, err := rsa.GenerateKey(rand.Reader, 2048) test.AssertNotError(t, err, "error generating test key") @@ -72,118 +70,149 @@ func TestVerifyCSR(t *testing.T) { signedReqWithIPAddress := new(x509.CertificateRequest) *signedReqWithIPAddress = *signedReq signedReqWithIPAddress.IPAddresses = []net.IP{net.IPv4(1, 2, 3, 4)} + signedReqWithIPCN := new(x509.CertificateRequest) + *signedReqWithIPCN = *signedReq + signedReqWithIPCN.Subject.CommonName = "1.2.3.4" + signedReqWithURI := new(x509.CertificateRequest) + *signedReqWithURI = *signedReq + testURI, _ := url.ParseRequestURI("https://example.com/") + signedReqWithURI.URIs = []*url.URL{testURI} signedReqWithAllLongSANs := new(x509.CertificateRequest) *signedReqWithAllLongSANs = *signedReq signedReqWithAllLongSANs.DNSNames = []string{"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com"} + keyPolicy, err := goodkey.NewPolicy(nil, nil) + test.AssertNotError(t, err, "creating test keypolicy") + cases := []struct { csr *x509.CertificateRequest maxNames int - keyPolicy *goodkey.KeyPolicy pa core.PolicyAuthority expectedError error }{ { &x509.CertificateRequest{}, 100, - testingPolicy, &mockPA{}, invalidPubKey, }, { &x509.CertificateRequest{PublicKey: &private.PublicKey}, 100, - testingPolicy, &mockPA{}, unsupportedSigAlg, }, { brokenSignedReq, 100, - testingPolicy, &mockPA{}, invalidSig, }, { signedReq, 100, - testingPolicy, &mockPA{}, - invalidNoDNS, + invalidNoIdent, }, { signedReqWithLongCN, 100, - testingPolicy, &mockPA{}, - berrors.BadCSRError("CN was longer than %d bytes", maxCNLength), + nil, }, { signedReqWithHosts, 1, - testingPolicy, &mockPA{}, - berrors.BadCSRError("CSR contains more than 1 DNS names"), + berrors.BadCSRError("CSR contains more than 1 identifiers"), }, { signedReqWithBadNames, 100, - testingPolicy, &mockPA{}, errors.New("policy forbids issuing for identifier"), }, { signedReqWithEmailAddress, 100, - testingPolicy, &mockPA{}, invalidEmailPresent, }, { signedReqWithIPAddress, 100, - testingPolicy, &mockPA{}, - invalidIPPresent, + nil, + }, + { + signedReqWithIPCN, + 100, + &mockPA{}, + invalidIPCN, + }, + { + signedReqWithURI, + 100, + &mockPA{}, + invalidURIPresent, }, { signedReqWithAllLongSANs, 100, - testingPolicy, &mockPA{}, - invalidAllSANTooLong, + nil, }, } for _, c := range cases { - err := VerifyCSR(context.Background(), c.csr, c.maxNames, c.keyPolicy, c.pa) + err := VerifyCSR(context.Background(), c.csr, c.maxNames, &keyPolicy, c.pa) test.AssertDeepEquals(t, c.expectedError, err) } } -func TestNormalizeCSR(t *testing.T) { +func TestCNFromCSR(t *testing.T) { tooLongString := strings.Repeat("a", maxCNLength+1) cases := []struct { - name string - csr *x509.CertificateRequest - expectedCN string - expectedNames []string + name string + csr *x509.CertificateRequest + expectedCN string }{ { "no explicit CN", &x509.CertificateRequest{DNSNames: []string{"a.com"}}, "a.com", - []string{"a.com"}, }, { "explicit uppercase CN", &x509.CertificateRequest{Subject: pkix.Name{CommonName: "A.com"}, DNSNames: []string{"a.com"}}, "a.com", - []string{"a.com"}, }, { - "no explicit CN, too long leading SANs", + "no explicit CN, uppercase SAN", + &x509.CertificateRequest{DNSNames: []string{"A.com"}}, + "a.com", + }, + { + "duplicate SANs", + &x509.CertificateRequest{DNSNames: []string{"b.com", "b.com", "a.com", "a.com"}}, + "b.com", + }, + { + "explicit CN not found in SANs", + &x509.CertificateRequest{Subject: pkix.Name{CommonName: "a.com"}, DNSNames: []string{"b.com"}}, + "a.com", + }, + { + "no explicit CN, all SANs too long to be the CN", + &x509.CertificateRequest{DNSNames: []string{ + tooLongString + ".a.com", + tooLongString + ".b.com", + }}, + "", + }, + { + "no explicit CN, leading SANs too long to be the CN", &x509.CertificateRequest{DNSNames: []string{ tooLongString + ".a.com", tooLongString + ".b.com", @@ -191,10 +220,9 @@ func TestNormalizeCSR(t *testing.T) { "b.com", }}, "a.com", - []string{"a.com", tooLongString + ".a.com", tooLongString + ".b.com", "b.com"}, }, { - "explicit CN, too long leading SANs", + "explicit CN, leading SANs too long to be the CN", &x509.CertificateRequest{ Subject: pkix.Name{CommonName: "A.com"}, DNSNames: []string{ @@ -204,14 +232,94 @@ func TestNormalizeCSR(t *testing.T) { "b.com", }}, "a.com", - []string{"a.com", tooLongString + ".a.com", tooLongString + ".b.com", "b.com"}, + }, + { + "explicit CN that's too long to be the CN", + &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: tooLongString + ".a.com"}, + }, + "", + }, + { + "explicit CN that's too long to be the CN, with a SAN", + &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: tooLongString + ".a.com"}, + DNSNames: []string{ + "b.com", + }}, + "", + }, + { + "explicit CN that's an IP", + &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "127.0.0.1"}, + }, + "", + }, + { + "no CN, only IP SANs", + &x509.CertificateRequest{ + IPAddresses: []net.IP{ + netip.MustParseAddr("127.0.0.1").AsSlice(), + }, + }, + "", }, } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - normalizeCSR(c.csr) - test.AssertEquals(t, c.expectedCN, c.csr.Subject.CommonName) - test.AssertDeepEquals(t, c.expectedNames, c.csr.DNSNames) + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + test.AssertEquals(t, CNFromCSR(tc.csr), tc.expectedCN) }) } } + +func TestSHA1Deprecation(t *testing.T) { + features.Reset() + + keyPolicy, err := goodkey.NewPolicy(nil, nil) + test.AssertNotError(t, err, "creating test keypolicy") + + private, err := rsa.GenerateKey(rand.Reader, 2048) + test.AssertNotError(t, err, "error generating test key") + + makeAndVerifyCsr := func(alg x509.SignatureAlgorithm) error { + csrBytes, err := x509.CreateCertificateRequest(rand.Reader, + &x509.CertificateRequest{ + DNSNames: []string{"example.com"}, + SignatureAlgorithm: alg, + PublicKey: &private.PublicKey, + }, private) + test.AssertNotError(t, err, "creating test CSR") + + csr, err := x509.ParseCertificateRequest(csrBytes) + test.AssertNotError(t, err, "parsing test CSR") + + return VerifyCSR(context.Background(), csr, 100, &keyPolicy, &mockPA{}) + } + + err = makeAndVerifyCsr(x509.SHA256WithRSA) + test.AssertNotError(t, err, "SHA256 CSR should verify") + + err = makeAndVerifyCsr(x509.SHA1WithRSA) + test.AssertError(t, err, "SHA1 CSR should not verify") +} + +func TestDuplicateExtensionRejection(t *testing.T) { + private, err := rsa.GenerateKey(rand.Reader, 2048) + test.AssertNotError(t, err, "error generating test key") + + csrBytes, err := x509.CreateCertificateRequest(rand.Reader, + &x509.CertificateRequest{ + DNSNames: []string{"example.com"}, + SignatureAlgorithm: x509.SHA256WithRSA, + PublicKey: &private.PublicKey, + ExtraExtensions: []pkix.Extension{ + {Id: asn1.ObjectIdentifier{2, 5, 29, 1}, Value: []byte("hello")}, + {Id: asn1.ObjectIdentifier{2, 5, 29, 1}, Value: []byte("world")}, + }, + }, private) + test.AssertNotError(t, err, "creating test CSR") + + _, err = x509.ParseCertificateRequest(csrBytes) + test.AssertError(t, err, "CSR with duplicate extension OID should fail to parse") +} diff --git a/ctpolicy/ctconfig/ctconfig.go b/ctpolicy/ctconfig/ctconfig.go index d990f681206..03dfe018994 100644 --- a/ctpolicy/ctconfig/ctconfig.go +++ b/ctpolicy/ctconfig/ctconfig.go @@ -1,90 +1,31 @@ package ctconfig import ( - "errors" - "fmt" - "time" - - "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" ) -// LogShard describes a single shard of a temporally sharded -// CT log -type LogShard struct { - URI string - Key string - WindowStart time.Time - WindowEnd time.Time -} - -// TemporalSet contains a set of temporal shards of a single log -type TemporalSet struct { - Name string - Shards []LogShard -} - -// Setup initializes the TemporalSet by parsing the start and end dates -// and verifying WindowEnd > WindowStart -func (ts *TemporalSet) Setup() error { - if ts.Name == "" { - return errors.New("Name cannot be empty") - } - if len(ts.Shards) == 0 { - return errors.New("temporal set contains no shards") - } - for i := range ts.Shards { - if ts.Shards[i].WindowEnd.Before(ts.Shards[i].WindowStart) || - ts.Shards[i].WindowEnd.Equal(ts.Shards[i].WindowStart) { - return errors.New("WindowStart must be before WindowEnd") - } - } - return nil -} - -// pick chooses the correct shard from a TemporalSet to use for the given -// expiration time. In the case where two shards have overlapping windows -// the earlier of the two shards will be chosen. -func (ts *TemporalSet) pick(exp time.Time) (*LogShard, error) { - for _, shard := range ts.Shards { - if exp.Before(shard.WindowStart) { - continue - } - if !exp.Before(shard.WindowEnd) { - continue - } - return &shard, nil - } - return nil, fmt.Errorf("no valid shard available for temporal set %q for expiration date %q", ts.Name, exp) -} - -// LogDescription contains the information needed to submit certificates -// to a CT log and verify returned receipts. If TemporalSet is non-nil then -// URI and Key should be empty. -type LogDescription struct { - URI string - Key string - SubmitFinalCert bool - - *TemporalSet -} - -// Info returns the URI and key of the log, either from a plain log description -// or from the earliest valid shard from a temporal log set -func (ld LogDescription) Info(exp time.Time) (string, string, error) { - if ld.TemporalSet == nil { - return ld.URI, ld.Key, nil - } - shard, err := ld.TemporalSet.pick(exp) - if err != nil { - return "", "", err - } - return shard.URI, shard.Key, nil -} - -type CTGroup struct { - Name string - Logs []LogDescription - // How long to wait for one log to accept a certificate before moving on to - // the next. - Stagger cmd.ConfigDuration +// CTConfig is the top-level config object expected to be embedded in an +// executable's JSON config struct. +type CTConfig struct { + // Stagger is duration (e.g. "200ms") indicating how long to wait for a log + // from one operator group to accept a certificate before attempting + // submission to a log run by a different operator instead. + Stagger config.Duration + // LogListFile is the path to a JSON file on disk containing the set of all + // logs trusted by Chrome. The file must match the v3 log list schema: + // https://www.gstatic.com/ct/log_list/v3/log_list_schema.json + LogListFile string `validate:"required"` + // SCTLogs is a list of CT log names to submit precerts to in order to get SCTs. + SCTLogs []string `validate:"min=1,dive,required"` + // InfoLogs is a list of CT log names to submit precerts to on a best-effort + // basis. Logs are included here for the sake of wider distribution of our + // precerts, and to exercise logs that in the qualification process. + InfoLogs []string + // FinalLogs is a list of CT log names to submit final certificates to. + // This may include duplicates from the lists above, to submit both precerts + // and final certs to the same log. + FinalLogs []string + // SubmitToTestLogs enables inclusion of "test" logs when obtaining SCTs. + // This should only be used in test environments. + SubmitToTestLogs bool } diff --git a/ctpolicy/ctconfig/ctconfig_test.go b/ctpolicy/ctconfig/ctconfig_test.go deleted file mode 100644 index d8d710f3970..00000000000 --- a/ctpolicy/ctconfig/ctconfig_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package ctconfig - -import ( - "testing" - "time" - - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/test" -) - -func TestTemporalSetup(t *testing.T) { - for _, tc := range []struct { - ts TemporalSet - err string - }{ - { - ts: TemporalSet{}, - err: "Name cannot be empty", - }, - { - ts: TemporalSet{ - Name: "temporal set", - }, - err: "temporal set contains no shards", - }, - { - ts: TemporalSet{ - Name: "temporal set", - Shards: []LogShard{ - { - WindowStart: time.Time{}, - WindowEnd: time.Time{}, - }, - }, - }, - err: "WindowStart must be before WindowEnd", - }, - { - ts: TemporalSet{ - Name: "temporal set", - Shards: []LogShard{ - { - WindowStart: time.Time{}.Add(time.Hour), - WindowEnd: time.Time{}, - }, - }, - }, - err: "WindowStart must be before WindowEnd", - }, - { - ts: TemporalSet{ - Name: "temporal set", - Shards: []LogShard{ - { - WindowStart: time.Time{}, - WindowEnd: time.Time{}.Add(time.Hour), - }, - }, - }, - err: "", - }, - } { - err := tc.ts.Setup() - if err != nil && tc.err != err.Error() { - t.Errorf("got error %q, wanted %q", err, tc.err) - } else if err == nil && tc.err != "" { - t.Errorf("unexpected error %q", err) - } - } -} - -func TestLogInfo(t *testing.T) { - ld := LogDescription{ - URI: "basic-uri", - Key: "basic-key", - } - uri, key, err := ld.Info(time.Time{}) - test.AssertNotError(t, err, "Info failed") - test.AssertEquals(t, uri, ld.URI) - test.AssertEquals(t, key, ld.Key) - - fc := clock.NewFake() - ld.TemporalSet = &TemporalSet{} - _, _, err = ld.Info(fc.Now()) - test.AssertError(t, err, "Info should fail with a TemporalSet with no viable shards") - ld.TemporalSet.Shards = []LogShard{{WindowStart: fc.Now().Add(time.Hour), WindowEnd: fc.Now().Add(time.Hour * 2)}} - _, _, err = ld.Info(fc.Now()) - test.AssertError(t, err, "Info should fail with a TemporalSet with no viable shards") - - fc.Add(time.Hour * 4) - now := fc.Now() - ld.TemporalSet.Shards = []LogShard{ - { - WindowStart: now.Add(time.Hour * -4), - WindowEnd: now.Add(time.Hour * -2), - URI: "a", - Key: "a", - }, - { - WindowStart: now.Add(time.Hour * -2), - WindowEnd: now.Add(time.Hour * 2), - URI: "b", - Key: "b", - }, - { - WindowStart: now.Add(time.Hour * 2), - WindowEnd: now.Add(time.Hour * 4), - URI: "c", - Key: "c", - }, - } - uri, key, err = ld.Info(now) - test.AssertNotError(t, err, "Info failed") - test.AssertEquals(t, uri, "b") - test.AssertEquals(t, key, "b") -} diff --git a/ctpolicy/ctpolicy.go b/ctpolicy/ctpolicy.go index 1252917d34c..02e93914c0e 100644 --- a/ctpolicy/ctpolicy.go +++ b/ctpolicy/ctpolicy.go @@ -2,214 +2,255 @@ package ctpolicy import ( "context" - "errors" - "math/rand" + "encoding/base64" + "fmt" + "strings" "time" - "github.com/letsencrypt/boulder/canceled" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/ctpolicy/ctconfig" + "github.com/letsencrypt/boulder/ctpolicy/loglist" berrors "github.com/letsencrypt/boulder/errors" blog "github.com/letsencrypt/boulder/log" pubpb "github.com/letsencrypt/boulder/publisher/proto" - "github.com/prometheus/client_golang/prometheus" +) + +const ( + succeeded = "succeeded" + failed = "failed" ) // CTPolicy is used to hold information about SCTs required from various // groupings type CTPolicy struct { - pub pubpb.PublisherClient - groups []ctconfig.CTGroup - informational []ctconfig.LogDescription - finalLogs []ctconfig.LogDescription - log blog.Logger - - winnerCounter *prometheus.CounterVec + pub pubpb.PublisherClient + sctLogs loglist.List + infoLogs loglist.List + finalLogs loglist.List + stagger time.Duration + log blog.Logger + winnerCounter *prometheus.CounterVec + shardExpiryGauge *prometheus.GaugeVec } // New creates a new CTPolicy struct -func New(pub pubpb.PublisherClient, - groups []ctconfig.CTGroup, - informational []ctconfig.LogDescription, - log blog.Logger, - stats prometheus.Registerer, -) *CTPolicy { - var finalLogs []ctconfig.LogDescription - for _, group := range groups { - for _, log := range group.Logs { - if log.SubmitFinalCert { - finalLogs = append(finalLogs, log) - } - } - } - for _, log := range informational { - if log.SubmitFinalCert { - finalLogs = append(finalLogs, log) +func New(pub pubpb.PublisherClient, sctLogs loglist.List, infoLogs loglist.List, finalLogs loglist.List, stagger time.Duration, log blog.Logger, stats prometheus.Registerer) *CTPolicy { + winnerCounter := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ + Name: "sct_winner", + Help: "Counter of logs which are selected for sct submission, by log URL and result (succeeded or failed).", + }, []string{"url", "result"}) + + shardExpiryGauge := promauto.With(stats).NewGaugeVec(prometheus.GaugeOpts{ + Name: "ct_shard_expiration_seconds", + Help: "CT shard end_exclusive field expressed as Unix epoch time, by operator and logID.", + }, []string{"operator", "logID"}) + + for _, log := range sctLogs { + if log.EndExclusive.IsZero() { + // Handles the case for non-temporally sharded logs too. + shardExpiryGauge.WithLabelValues(log.Operator, log.Name).Set(float64(0)) + } else { + shardExpiryGauge.WithLabelValues(log.Operator, log.Name).Set(float64(log.EndExclusive.Unix())) } } - winnerCounter := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "sct_race_winner", - Help: "Counter of logs that win SCT submission races.", - }, - []string{"log", "group"}, - ) - stats.MustRegister(winnerCounter) + // Stagger must be positive for time.Ticker. + // Default to the relatively safe value of 1 second. + if stagger <= 0 { + stagger = time.Second + } return &CTPolicy{ - pub: pub, - groups: groups, - informational: informational, - finalLogs: finalLogs, - log: log, - winnerCounter: winnerCounter, + pub: pub, + sctLogs: sctLogs, + infoLogs: infoLogs, + finalLogs: finalLogs, + stagger: stagger, + log: log, + winnerCounter: winnerCounter, + shardExpiryGauge: shardExpiryGauge, } } type result struct { + log loglist.Log sct []byte - log string err error } -// race submits an SCT to each log in a group and waits for the first response back, -// once it has the first SCT it cancels all of the other submissions and returns. -// It allows up to len(group)-1 of the submissions to fail as we only care about -// getting a single SCT. -func (ctp *CTPolicy) race(ctx context.Context, cert core.CertDER, group ctconfig.CTGroup, expiration time.Time) ([]byte, error) { - results := make(chan result, len(group.Logs)) - isPrecert := true - // Randomize the order in which we send requests to the logs in a group - // so we maximize the distribution of logs we get SCTs from. - for i, logNum := range rand.Perm(len(group.Logs)) { - ld := group.Logs[logNum] - go func(i int, ld ctconfig.LogDescription) { - // Each submission waits a bit longer than the previous one, to give the - // previous log a chance to reply. If the context is already done by the - // time we get here, don't bother submitting. That generally means the - // context was canceled because another log returned a success already. - time.Sleep(time.Duration(i) * group.Stagger.Duration) - if ctx.Err() != nil { - return - } - uri, key, err := ld.Info(expiration) - if err != nil { - ctp.log.Errf("unable to get log info: %s", err) - return - } - sct, err := ctp.pub.SubmitToSingleCTWithResult(ctx, &pubpb.Request{ - LogURL: uri, - LogPublicKey: key, - Der: cert, - Precert: isPrecert, - }) - if err != nil { - // Only log the error if it is not a result of the context being canceled - if !canceled.Is(err) { - ctp.log.Warningf("ct submission to %q failed: %s", uri, err) - } - results <- result{err: err} - return - } - results <- result{sct: sct.Sct, log: uri} - }(i, ld) +// getOne obtains an SCT (or error), and returns it in resChan +func (ctp *CTPolicy) getOne(ctx context.Context, cert core.CertDER, l loglist.Log, resChan chan result) { + sct, err := ctp.pub.SubmitToSingleCTWithResult(ctx, &pubpb.Request{ + LogURL: l.Url, + LogPublicKey: base64.StdEncoding.EncodeToString(l.Key), + Der: cert, + Kind: pubpb.SubmissionType_sct, + }) + if err != nil { + resChan <- result{log: l, err: fmt.Errorf("ct submission to %q (%q) failed: %w", l.Name, l.Url, err)} + return } - for i := 0; i < len(group.Logs); i++ { - select { - case <-ctx.Done(): - ctp.winnerCounter.With(prometheus.Labels{"log": "timeout", "group": group.Name}).Inc() - return nil, ctx.Err() - case res := <-results: - if res.sct != nil { - ctp.winnerCounter.With(prometheus.Labels{"log": res.log, "group": group.Name}).Inc() - // Return the very first SCT we get back. Returning triggers - // the defer'd context cancellation method. - return res.sct, nil - } - // We will continue waiting for an SCT until we've seen the same number - // of errors as there are logs in the group as we may still get a SCT - // back from another log. - } - } - ctp.winnerCounter.With(prometheus.Labels{"log": "all_failed", "group": group.Name}).Inc() - return nil, errors.New("all submissions failed") + resChan <- result{log: l, sct: sct.Sct} } -// GetSCTs attempts to retrieve a SCT from each configured grouping of logs and returns -// the set of SCTs to the caller. +// GetSCTs retrieves exactly two SCTs from the total collection of configured +// log groups, with at most one SCT coming from each group. It expects that all +// logs run by a single operator (e.g. Google) are in the same group, to +// guarantee that SCTs from logs in different groups do not end up coming from +// the same operator. As such, it enforces Google's current CT Policy, which +// requires that certs have two SCTs from logs run by different operators. func (ctp *CTPolicy) GetSCTs(ctx context.Context, cert core.CertDER, expiration time.Time) (core.SCTDERs, error) { - results := make(chan result, len(ctp.groups)) + // We'll cancel this sub-context when we have the two SCTs we need, to cause + // any other ongoing submission attempts to quit. subCtx, cancel := context.WithCancel(ctx) defer cancel() - for i, g := range ctp.groups { - go func(i int, g ctconfig.CTGroup) { - sct, err := ctp.race(subCtx, cert, g, expiration) - // Only one of these will be non-nil - if err != nil { - results <- result{err: berrors.MissingSCTsError("CT log group %q: %s", g.Name, err)} - } - results <- result{sct: sct} - }(i, g) + + // Identify the set of candidate logs whose temporal interval includes this + // cert's expiry. Randomize the order of the logs so that we're not always + // trying to submit to the same two. + logs := ctp.sctLogs.ForTime(expiration).Permute() + if len(logs) < 2 { + return nil, berrors.MissingSCTsError("Insufficient CT logs available (%d)", len(logs)) } - isPrecert := true - for _, log := range ctp.informational { - go func(l ctconfig.LogDescription) { - // We use a context.Background() here instead of subCtx because these - // submissions are running in a goroutine and we don't want them to be - // cancelled when the caller of CTPolicy.GetSCTs returns and cancels - // its RPC context. - uri, key, err := l.Info(expiration) - if err != nil { - ctp.log.Errf("unable to get log info: %s", err) - return + + // Ensure that the results channel has a buffer equal to the number of + // goroutines we're kicking off, so that they're all guaranteed to be able to + // write to it and exit without blocking and leaking. + resChan := make(chan result, len(logs)) + + // Kick off first two submissions + nextLog := 0 + for ; nextLog < 2; nextLog++ { + go ctp.getOne(subCtx, cert, logs[nextLog], resChan) + } + + go ctp.submitPrecertInformational(cert, expiration) + + // staggerTicker will be used to start a new submission each stagger interval + staggerTicker := time.NewTicker(ctp.stagger) + defer staggerTicker.Stop() + + // Collect SCTs and errors out of the results channels into these slices. + results := make([]result, 0) + errs := make([]string, 0) + +loop: + for { + select { + case <-staggerTicker.C: + // Each tick from the staggerTicker, we start submitting to another log + if nextLog >= len(logs) { + // Unless we have run out of logs to submit to, so don't need to tick anymore + staggerTicker.Stop() + continue } - _, err = ctp.pub.SubmitToSingleCTWithResult(context.Background(), &pubpb.Request{ - LogURL: uri, - LogPublicKey: key, - Der: cert, - Precert: isPrecert, - }) - if err != nil { - ctp.log.Warningf("ct submission to informational log %q failed: %s", uri, err) + go ctp.getOne(subCtx, cert, logs[nextLog], resChan) + nextLog++ + case res := <-resChan: + if res.err != nil { + errs = append(errs, res.err.Error()) + ctp.winnerCounter.WithLabelValues(res.log.Url, failed).Inc() + } else { + results = append(results, res) + ctp.winnerCounter.WithLabelValues(res.log.Url, succeeded).Inc() + + scts := compliantSet(results) + if scts != nil { + return scts, nil + } } - }(log) - } - var ret core.SCTDERs - for i := 0; i < len(ctp.groups); i++ { - res := <-results - // If any one group fails to get a SCT then we fail out immediately - // cancel any other in progress work as we can't continue - if res.err != nil { - // Returning triggers the defer'd context cancellation method - return nil, res.err + // We can collect len(logs) results from the channel as every goroutine is + // guaranteed to write one result (either sct or error) to the channel. + if len(results)+len(errs) >= len(logs) { + // We have an error or result from every log, but didn't find a compliant set + break loop + } } - ret = append(ret, res.sct) } - return ret, nil + + // If we made it to the end of that loop, that means we never got two SCTs + // to return. Error out instead. + if ctx.Err() != nil { + // We timed out (the calling function returned and canceled our context), + // thereby causing all of our getOne sub-goroutines to be cancelled. + return nil, berrors.MissingSCTsError("failed to get 2 SCTs before ctx finished: %s", ctx.Err()) + } + return nil, berrors.MissingSCTsError("failed to get 2 SCTs, got %d error(s): %s", len(errs), strings.Join(errs, "; ")) } -// SubmitFinalCert submits finalized certificates created from precertificates -// to any configured logs -func (ctp *CTPolicy) SubmitFinalCert(cert []byte, expiration time.Time) { - for _, log := range ctp.finalLogs { - go func(l ctconfig.LogDescription) { - uri, key, err := l.Info(expiration) - if err != nil { - ctp.log.Errf("unable to get log info: %s", err) - return +// compliantSet returns a slice of SCTs which complies with all relevant CT Log +// Policy requirements, namely that the set of SCTs: +// - contain at least two SCTs, which +// - come from logs run by at least two different operators, and +// - contain at least one RFC6962-compliant (i.e. non-static/tiled) log. +// +// If no such set of SCTs exists, returns nil. +func compliantSet(results []result) core.SCTDERs { + for _, first := range results { + if first.err != nil { + continue + } + for _, second := range results { + if second.err != nil { + continue } - _, err = ctp.pub.SubmitToSingleCTWithResult(context.Background(), &pubpb.Request{ - LogURL: uri, - LogPublicKey: key, - Der: cert, - Precert: false, - }) + if first.log.Operator == second.log.Operator { + // The two SCTs must come from different operators. + continue + } + if first.log.Tiled && second.log.Tiled { + // At least one must come from a non-tiled log. + continue + } + return core.SCTDERs{first.sct, second.sct} + } + } + return nil +} + +// submitAllBestEffort submits the given certificate or precertificate to every +// log ("informational" for precerts, "final" for certs) configured in the policy. +// It neither waits for these submission to complete, nor tracks their success. +func (ctp *CTPolicy) submitAllBestEffort(blob core.CertDER, kind pubpb.SubmissionType, expiry time.Time) { + logs := ctp.finalLogs + if kind == pubpb.SubmissionType_info { + logs = ctp.infoLogs + } + + for _, log := range logs { + if log.StartInclusive.After(expiry) || log.EndExclusive.Equal(expiry) || log.EndExclusive.Before(expiry) { + continue + } + + go func(log loglist.Log) { + _, err := ctp.pub.SubmitToSingleCTWithResult( + context.Background(), + &pubpb.Request{ + LogURL: log.Url, + LogPublicKey: base64.StdEncoding.EncodeToString(log.Key), + Der: blob, + Kind: kind, + }, + ) if err != nil { - ctp.log.Warningf("ct submission of final cert to log %q failed: %s", uri, err) + ctp.log.Warningf("ct submission of cert to log %q failed: %s", log.Url, err) } }(log) } } + +// submitPrecertInformational submits precertificates to any configured +// "informational" logs, but does not care about success or returned SCTs. +func (ctp *CTPolicy) submitPrecertInformational(cert core.CertDER, expiration time.Time) { + ctp.submitAllBestEffort(cert, pubpb.SubmissionType_info, expiration) +} + +// SubmitFinalCert submits finalized certificates created from precertificates +// to any configured "final" logs, but does not care about success. +func (ctp *CTPolicy) SubmitFinalCert(cert core.CertDER, expiration time.Time) { + ctp.submitAllBestEffort(cert, pubpb.SubmissionType_final, expiration) +} diff --git a/ctpolicy/ctpolicy_test.go b/ctpolicy/ctpolicy_test.go index bb1da73661b..30e9b2840a7 100644 --- a/ctpolicy/ctpolicy_test.go +++ b/ctpolicy/ctpolicy_test.go @@ -1,22 +1,24 @@ package ctpolicy import ( + "bytes" "context" "errors" - "regexp" + "strings" "testing" "time" - "github.com/letsencrypt/boulder/cmd" + "github.com/jmhodges/clock" + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/ctpolicy/ctconfig" + "github.com/letsencrypt/boulder/ctpolicy/loglist" berrors "github.com/letsencrypt/boulder/errors" blog "github.com/letsencrypt/boulder/log" "github.com/letsencrypt/boulder/metrics" pubpb "github.com/letsencrypt/boulder/publisher/proto" "github.com/letsencrypt/boulder/test" - "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc" ) type mockPub struct{} @@ -25,12 +27,19 @@ func (mp *mockPub) SubmitToSingleCTWithResult(_ context.Context, _ *pubpb.Reques return &pubpb.Result{Sct: []byte{0}}, nil } -type alwaysFail struct{} +type mockFailPub struct{} -func (mp *alwaysFail) SubmitToSingleCTWithResult(_ context.Context, _ *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) { +func (mp *mockFailPub) SubmitToSingleCTWithResult(_ context.Context, _ *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) { return nil, errors.New("BAD") } +type mockSlowPub struct{} + +func (mp *mockSlowPub) SubmitToSingleCTWithResult(ctx context.Context, _ *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) { + <-ctx.Done() + return nil, errors.New("timed out") +} + func TestGetSCTs(t *testing.T) { expired, cancel := context.WithDeadline(context.Background(), time.Now()) defer cancel() @@ -38,90 +47,61 @@ func TestGetSCTs(t *testing.T) { testCases := []struct { name string mock pubpb.PublisherClient - groups []ctconfig.CTGroup + logs loglist.List ctx context.Context result core.SCTDERs - errRegexp *regexp.Regexp + expectErr string berrorType *berrors.ErrorType }{ { name: "basic success case", mock: &mockPub{}, - groups: []ctconfig.CTGroup{ - { - Name: "a", - Logs: []ctconfig.LogDescription{ - {URI: "abc", Key: "def"}, - {URI: "ghi", Key: "jkl"}, - }, - }, - { - Name: "b", - Logs: []ctconfig.LogDescription{ - {URI: "abc", Key: "def"}, - {URI: "ghi", Key: "jkl"}, - }, - }, + logs: loglist.List{ + {Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")}, + {Name: "LogA2", Operator: "OperA", Url: "UrlA2", Key: []byte("KeyA2")}, + {Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1")}, + {Name: "LogC1", Operator: "OperC", Url: "UrlC1", Key: []byte("KeyC1")}, }, ctx: context.Background(), result: core.SCTDERs{[]byte{0}, []byte{0}}, }, { name: "basic failure case", - mock: &alwaysFail{}, - groups: []ctconfig.CTGroup{ - { - Name: "a", - Logs: []ctconfig.LogDescription{ - {URI: "abc", Key: "def"}, - {URI: "ghi", Key: "jkl"}, - }, - }, - { - Name: "b", - Logs: []ctconfig.LogDescription{ - {URI: "abc", Key: "def"}, - {URI: "ghi", Key: "jkl"}, - }, - }, + mock: &mockFailPub{}, + logs: loglist.List{ + {Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")}, + {Name: "LogA2", Operator: "OperA", Url: "UrlA2", Key: []byte("KeyA2")}, + {Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1")}, + {Name: "LogC1", Operator: "OperC", Url: "UrlC1", Key: []byte("KeyC1")}, }, ctx: context.Background(), - errRegexp: regexp.MustCompile("CT log group \".\": all submissions failed"), + expectErr: "failed to get 2 SCTs, got 4 error(s)", berrorType: &missingSCTErr, }, { name: "parent context timeout failure case", - mock: &alwaysFail{}, - groups: []ctconfig.CTGroup{ - { - Name: "a", - Logs: []ctconfig.LogDescription{ - {URI: "abc", Key: "def"}, - {URI: "ghi", Key: "jkl"}, - }, - }, - { - Name: "b", - Logs: []ctconfig.LogDescription{ - {URI: "abc", Key: "def"}, - {URI: "ghi", Key: "jkl"}, - }, - }, + mock: &mockSlowPub{}, + logs: loglist.List{ + {Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")}, + {Name: "LogA2", Operator: "OperA", Url: "UrlA2", Key: []byte("KeyA2")}, + {Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1")}, + {Name: "LogC1", Operator: "OperC", Url: "UrlC1", Key: []byte("KeyC1")}, }, - ctx: expired, - errRegexp: regexp.MustCompile("CT log group \".\": context deadline exceeded"), + ctx: expired, + expectErr: "failed to get 2 SCTs before ctx finished", + berrorType: &missingSCTErr, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - ctp := New(tc.mock, tc.groups, nil, blog.NewMock(), metrics.NoopRegisterer) + ctp := New(tc.mock, tc.logs, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer) ret, err := ctp.GetSCTs(tc.ctx, []byte{0}, time.Time{}) if tc.result != nil { test.AssertDeepEquals(t, ret, tc.result) - } else if tc.errRegexp != nil { - if !tc.errRegexp.MatchString(err.Error()) { - t.Errorf("Error %q did not match expected regexp %q", err, tc.errRegexp) + } else if tc.expectErr != "" { + if !strings.Contains(err.Error(), tc.expectErr) { + t.Errorf("Error %q did not match expected %q", err, tc.expectErr) } if tc.berrorType != nil { test.AssertErrorIs(t, err, *tc.berrorType) @@ -131,108 +111,146 @@ func TestGetSCTs(t *testing.T) { } } -type failOne struct { +type mockFailOnePub struct { badURL string } -func (mp *failOne) SubmitToSingleCTWithResult(_ context.Context, req *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) { +func (mp *mockFailOnePub) SubmitToSingleCTWithResult(_ context.Context, req *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) { if req.LogURL == mp.badURL { return nil, errors.New("BAD") } return &pubpb.Result{Sct: []byte{0}}, nil } -type slowPublisher struct{} - -func (sp *slowPublisher) SubmitToSingleCTWithResult(_ context.Context, req *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) { - time.Sleep(time.Second) - return &pubpb.Result{Sct: []byte{0}}, nil -} - func TestGetSCTsMetrics(t *testing.T) { - ctp := New(&failOne{badURL: "abc"}, []ctconfig.CTGroup{ - { - Name: "a", - Logs: []ctconfig.LogDescription{ - {URI: "abc", Key: "def"}, - {URI: "ghi", Key: "jkl"}, - }, - }, - { - Name: "b", - Logs: []ctconfig.LogDescription{ - {URI: "abc", Key: "def"}, - {URI: "ghi", Key: "jkl"}, - }, - }, - }, nil, blog.NewMock(), metrics.NoopRegisterer) + ctp := New(&mockFailOnePub{badURL: "UrlA1"}, loglist.List{ + {Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")}, + {Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1")}, + {Name: "LogC1", Operator: "OperC", Url: "UrlC1", Key: []byte("KeyC1")}, + }, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer) _, err := ctp.GetSCTs(context.Background(), []byte{0}, time.Time{}) test.AssertNotError(t, err, "GetSCTs failed") - test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"log": "ghi", "group": "a"}, 1) - test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"log": "ghi", "group": "b"}, 1) + test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"url": "UrlB1", "result": succeeded}, 1) + test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"url": "UrlC1", "result": succeeded}, 1) } func TestGetSCTsFailMetrics(t *testing.T) { - // When an entire log group fails, we should increment the "winner of SCT - // race" stat for that group under the fictional log "all_failed". - ctp := New(&failOne{badURL: "abc"}, []ctconfig.CTGroup{ - { - Name: "a", - Logs: []ctconfig.LogDescription{ - {URI: "abc", Key: "def"}, - }, - }, - }, nil, blog.NewMock(), metrics.NoopRegisterer) + // Ensure the proper metrics are incremented when GetSCTs fails. + ctp := New(&mockFailOnePub{badURL: "UrlA1"}, loglist.List{ + {Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")}, + {Name: "LogA2", Operator: "OperA", Url: "UrlA2", Key: []byte("KeyA2")}, + }, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer) _, err := ctp.GetSCTs(context.Background(), []byte{0}, time.Time{}) - if err == nil { - t.Fatal("GetSCTs should have failed") - } - test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"log": "all_failed", "group": "a"}, 1) + test.AssertError(t, err, "GetSCTs should have failed") + test.AssertErrorIs(t, err, berrors.MissingSCTs) + test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"url": "UrlA1", "result": failed}, 1) - // Same thing, but for when an entire log group times out. + // Ensure the proper metrics are incremented when GetSCTs times out. ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() - ctp = New(&slowPublisher{}, []ctconfig.CTGroup{ - { - Name: "a", - Logs: []ctconfig.LogDescription{ - {URI: "abc", Key: "def"}, - }, - }, - }, nil, blog.NewMock(), metrics.NoopRegisterer) + ctp = New(&mockSlowPub{}, loglist.List{ + {Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")}, + {Name: "LogA2", Operator: "OperA", Url: "UrlA2", Key: []byte("KeyA2")}, + }, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer) _, err = ctp.GetSCTs(ctx, []byte{0}, time.Time{}) - if err == nil { - t.Fatal("GetSCTs should have failed") - } - test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"log": "timeout", "group": "a"}, 1) + test.AssertError(t, err, "GetSCTs should have timed out") + test.AssertErrorIs(t, err, berrors.MissingSCTs) + test.AssertContains(t, err.Error(), context.DeadlineExceeded.Error()) + test.AssertMetricWithLabelsEquals(t, ctp.winnerCounter, prometheus.Labels{"url": "UrlA1", "result": failed}, 1) } -// A mock publisher that counts submissions -type countEm struct { - count int -} +func TestLogListMetrics(t *testing.T) { + fc := clock.NewFake() + Tomorrow := fc.Now().Add(24 * time.Hour) + NextWeek := fc.Now().Add(7 * 24 * time.Hour) -func (ce *countEm) SubmitToSingleCTWithResult(_ context.Context, _ *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) { - ce.count++ - return &pubpb.Result{Sct: []byte{0}}, nil + // Multiple operator groups with configured logs. + ctp := New(&mockPub{}, loglist.List{ + {Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1"), EndExclusive: Tomorrow}, + {Name: "LogA2", Operator: "OperA", Url: "UrlA2", Key: []byte("KeyA2"), EndExclusive: NextWeek}, + {Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1"), EndExclusive: Tomorrow}, + }, nil, nil, 0, blog.NewMock(), metrics.NoopRegisterer) + test.AssertMetricWithLabelsEquals(t, ctp.shardExpiryGauge, prometheus.Labels{"operator": "OperA", "logID": "LogA1"}, 86400) + test.AssertMetricWithLabelsEquals(t, ctp.shardExpiryGauge, prometheus.Labels{"operator": "OperA", "logID": "LogA2"}, 604800) + test.AssertMetricWithLabelsEquals(t, ctp.shardExpiryGauge, prometheus.Labels{"operator": "OperB", "logID": "LogB1"}, 86400) } -func TestStagger(t *testing.T) { - countingPub := &countEm{} - ctp := New(countingPub, []ctconfig.CTGroup{ +func TestCompliantSet(t *testing.T) { + for _, tc := range []struct { + name string + results []result + want core.SCTDERs + }{ + { + name: "nil input", + results: nil, + want: nil, + }, + { + name: "zero length input", + results: []result{}, + want: nil, + }, { - Name: "a", - Stagger: cmd.ConfigDuration{Duration: 500 * time.Millisecond}, - Logs: []ctconfig.LogDescription{ - {URI: "abc", Key: "def"}, - {URI: "ghi", Key: "jkl"}, + name: "only one result", + results: []result{ + {log: loglist.Log{Operator: "A", Tiled: false}, sct: []byte("sct1")}, }, + want: nil, }, - }, nil, blog.NewMock(), metrics.NoopRegisterer) - _, err := ctp.GetSCTs(context.Background(), []byte{0}, time.Time{}) - test.AssertNotError(t, err, "GetSCTs failed") - if countingPub.count != 1 { - t.Errorf("wrong number of requests to publisher. got %d, expected 1", countingPub.count) + { + name: "only one good result", + results: []result{ + {log: loglist.Log{Operator: "A", Tiled: false}, sct: []byte("sct1")}, + {log: loglist.Log{Operator: "B", Tiled: false}, err: errors.New("oops")}, + }, + want: nil, + }, + { + name: "only one operator", + results: []result{ + {log: loglist.Log{Operator: "A", Tiled: false}, sct: []byte("sct1")}, + {log: loglist.Log{Operator: "A", Tiled: false}, sct: []byte("sct2")}, + }, + want: nil, + }, + { + name: "all tiled", + results: []result{ + {log: loglist.Log{Operator: "A", Tiled: true}, sct: []byte("sct1")}, + {log: loglist.Log{Operator: "B", Tiled: true}, sct: []byte("sct2")}, + }, + want: nil, + }, + { + name: "happy path", + results: []result{ + {log: loglist.Log{Operator: "A", Tiled: false}, err: errors.New("oops")}, + {log: loglist.Log{Operator: "A", Tiled: true}, sct: []byte("sct2")}, + {log: loglist.Log{Operator: "A", Tiled: false}, sct: []byte("sct3")}, + {log: loglist.Log{Operator: "B", Tiled: false}, err: errors.New("oops")}, + {log: loglist.Log{Operator: "B", Tiled: true}, sct: []byte("sct4")}, + {log: loglist.Log{Operator: "B", Tiled: false}, sct: []byte("sct6")}, + {log: loglist.Log{Operator: "C", Tiled: false}, err: errors.New("oops")}, + {log: loglist.Log{Operator: "C", Tiled: true}, sct: []byte("sct8")}, + {log: loglist.Log{Operator: "C", Tiled: false}, sct: []byte("sct9")}, + }, + // The second and sixth results should be picked, because first and fourth + // are skipped for being errors, and fifth is skipped for also being tiled. + want: core.SCTDERs{[]byte("sct2"), []byte("sct6")}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + got := compliantSet(tc.results) + if len(got) != len(tc.want) { + t.Fatalf("compliantSet(%#v) returned %d SCTs, but want %d", tc.results, len(got), len(tc.want)) + } + for i, sct := range tc.want { + if !bytes.Equal(got[i], sct) { + t.Errorf("compliantSet(%#v) returned unexpected SCT at index %d", tc.results, i) + } + } + }) } } diff --git a/ctpolicy/loglist/lintlist.go b/ctpolicy/loglist/lintlist.go new file mode 100644 index 00000000000..da361775c0c --- /dev/null +++ b/ctpolicy/loglist/lintlist.go @@ -0,0 +1,43 @@ +package loglist + +import "sync" + +var lintlist struct { + sync.Once + list List + err error +} + +// InitLintList creates and stores a loglist intended for linting (i.e. with +// purpose Validation). Test logs are included only when submitToTestLogs is +// true. We have to store this in a global because the zlint framework doesn't +// (yet) support configuration, so the e_scts_from_same_operator lint cannot +// load a log list on its own. Instead, we have the CA call this initialization +// function at startup, and have the lint call the getter below to get access to +// the cached list. +func InitLintList(path string, submitToTestLogs bool) error { + lintlist.Do(func() { + l, err := New(path) + if err != nil { + lintlist.err = err + return + } + + l, err = l.forPurpose(Validation, submitToTestLogs) + if err != nil { + lintlist.err = err + return + } + + lintlist.list = l + }) + + return lintlist.err +} + +// GetLintList returns the log list initialized by InitLintList. This must +// only be called after InitLintList has been called on the same (or parent) +// goroutine. +func GetLintList() List { + return lintlist.list +} diff --git a/ctpolicy/loglist/loglist.go b/ctpolicy/loglist/loglist.go new file mode 100644 index 00000000000..be6f97bb49a --- /dev/null +++ b/ctpolicy/loglist/loglist.go @@ -0,0 +1,248 @@ +package loglist + +import ( + _ "embed" + "encoding/base64" + "errors" + "fmt" + "math/rand/v2" + "os" + "slices" + "time" + + "github.com/google/certificate-transparency-go/loglist3" +) + +// purpose is the use to which a log list will be put. This type exists to allow +// the following consts to be declared for use by LogList consumers. +type purpose string + +// Issuance means that the new log list should only contain Usable logs, which +// can issue SCTs that will be trusted by all Chrome clients. +const Issuance purpose = "scts" + +// Informational means that the new log list can contain Usable, Qualified, and +// Pending logs, which will all accept submissions but not necessarily be +// trusted by Chrome clients. +const Informational purpose = "info" + +// Validation means that the new log list should only contain Usable and +// Readonly logs, whose SCTs will be trusted by all Chrome clients but aren't +// necessarily still issuing SCTs today. +const Validation purpose = "lint" + +// List represents a list of logs arranged by the "v3" schema as published by +// Chrome: https://www.gstatic.com/ct/log_list/v3/log_list_schema.json +type List []Log + +// Log represents a single log run by an operator. It contains just the info +// necessary to determine whether we want to submit to that log, and how to +// do so. +type Log struct { + Operator string + Name string + Id string + Key []byte + Url string + StartInclusive time.Time + EndExclusive time.Time + State loglist3.LogStatus + Tiled bool + Type string +} + +// usableForPurpose returns true if the log state is acceptable for the given +// log list purpose, and false otherwise. +func usableForPurpose(s loglist3.LogStatus, p purpose) bool { + switch p { + case Issuance: + return s == loglist3.UsableLogStatus + case Informational: + return s == loglist3.UsableLogStatus || s == loglist3.QualifiedLogStatus || s == loglist3.PendingLogStatus + case Validation: + return s == loglist3.UsableLogStatus || s == loglist3.ReadOnlyLogStatus + } + return false +} + +// isTestLog returns true if the log type is test is "test" or "monitoring_only". +// The schema documents a third option, "prod", which does not currently appear in Google's lists. +func isTestLog(log Log) bool { + return log.Type == "test" || log.Type == "monitoring_only" +} + +// New returns a LogList of all operators and all logs parsed from the file at +// the given path. The file must conform to the JSON Schema published by Google: +// https://www.gstatic.com/ct/log_list/v3/log_list_schema.json +func New(path string) (List, error) { + file, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read CT Log List: %w", err) + } + + return newHelper(file) +} + +// newHelper is a helper to allow the core logic of `New()` to be unit tested +// without having to write files to disk. +func newHelper(file []byte) (List, error) { + parsed, err := loglist3.NewFromJSON(file) + if err != nil { + return nil, fmt.Errorf("failed to parse CT Log List: %w", err) + } + + result := make(List, 0) + for _, op := range parsed.Operators { + for _, log := range op.Logs { + info := Log{ + Operator: op.Name, + Name: log.Description, + Id: base64.StdEncoding.EncodeToString(log.LogID), + Key: log.Key, + Url: log.URL, + State: log.State.LogStatus(), + Tiled: false, + Type: log.Type, + } + + if log.TemporalInterval != nil { + info.StartInclusive = log.TemporalInterval.StartInclusive + info.EndExclusive = log.TemporalInterval.EndExclusive + } + + result = append(result, info) + } + + for _, log := range op.TiledLogs { + info := Log{ + Operator: op.Name, + Name: log.Description, + Id: base64.StdEncoding.EncodeToString(log.LogID), + Key: log.Key, + Url: log.SubmissionURL, + State: log.State.LogStatus(), + Tiled: true, + Type: log.Type, + } + + if log.TemporalInterval != nil { + info.StartInclusive = log.TemporalInterval.StartInclusive + info.EndExclusive = log.TemporalInterval.EndExclusive + } + + result = append(result, info) + } + } + + return result, nil +} + +// SubsetForPurpose returns a new log list containing only those logs whose +// names match those in the given list, and whose state is acceptable for the +// given purpose. It returns an error if any of the given names are not found +// in the starting list, or if the resulting list is too small to satisfy the +// Chrome "two operators" policy. +func (ll List) SubsetForPurpose(names []string, p purpose, submitToTestLogs bool) (List, error) { + sub, err := ll.subset(names) + if err != nil { + return nil, err + } + + res, err := sub.forPurpose(p, submitToTestLogs) + if err != nil { + return nil, err + } + + return res, nil +} + +// subset returns a new log list containing only those logs whose names match +// those in the given list. It returns an error if any of the given names are +// not found. +func (ll List) subset(names []string) (List, error) { + res := make(List, 0) + for _, name := range names { + found := false + for _, log := range ll { + if log.Name == name { + if found { + return nil, fmt.Errorf("found multiple logs matching name %q", name) + } + found = true + res = append(res, log) + } + } + if !found { + return nil, fmt.Errorf("no log found matching name %q", name) + } + } + return res, nil +} + +// forPurpose returns a new log list containing only those logs whose states are +// acceptable for the given purpose. Test logs are included only when +// submitToTestLogs is true. It returns an error if the purpose is Issuance or +// Validation and the set of remaining logs is too small to satisfy the Google +// "two operators" log policy. +func (ll List) forPurpose(p purpose, submitToTestLogs bool) (List, error) { + res := make(List, 0) + operators := make(map[string]struct{}) + + // Test logs in Chrome's all_logs_list.json omit the "state" field. loglist3 + // interprets this as "UndefinedLogStatus", which causes usableForPurpose() + // to return false. To account for this, we skip this check for test logs. + for _, log := range ll { + // Only consider test logs if we are submitting to test logs: + if isTestLog(log) && !submitToTestLogs { + continue + } + // Check the log is usable for a purpose. + // But test logs aren't ever marked Usable. + if !isTestLog(log) && !usableForPurpose(log.State, p) { + continue + } + res = append(res, log) + operators[log.Operator] = struct{}{} + } + + if len(operators) < 2 && p != Informational { + return nil, errors.New("log list does not have enough groups to satisfy Chrome policy") + } + + return res, nil +} + +// ForTime returns a new log list containing only those logs whose temporal +// intervals include the given certificate expiration timestamp. +func (ll List) ForTime(expiry time.Time) List { + res := slices.Clone(ll) + res = slices.DeleteFunc(res, func(l Log) bool { + if (l.StartInclusive.IsZero() || l.StartInclusive.Equal(expiry) || l.StartInclusive.Before(expiry)) && + (l.EndExclusive.IsZero() || l.EndExclusive.After(expiry)) { + return false + } + return true + }) + return res +} + +// Permute returns a new log list containing the exact same logs, but in a +// randomly-shuffled order. +func (ll List) Permute() List { + res := slices.Clone(ll) + rand.Shuffle(len(res), func(i int, j int) { + res[i], res[j] = res[j], res[i] + }) + return res +} + +// GetByID returns the Log matching the given ID, or an error if no such +// log can be found. +func (ll List) GetByID(logID string) (Log, error) { + for _, log := range ll { + if log.Id == logID { + return log, nil + } + } + return Log{}, fmt.Errorf("no log with ID %q found", logID) +} diff --git a/ctpolicy/loglist/loglist_test.go b/ctpolicy/loglist/loglist_test.go new file mode 100644 index 00000000000..9eb1e6fa2b4 --- /dev/null +++ b/ctpolicy/loglist/loglist_test.go @@ -0,0 +1,207 @@ +package loglist + +import ( + "testing" + "time" + + "github.com/google/certificate-transparency-go/loglist3" + "github.com/jmhodges/clock" + + "github.com/letsencrypt/boulder/test" +) + +func TestNew(t *testing.T) { + +} + +func TestSubset(t *testing.T) { + input := List{ + Log{Name: "Log A1"}, + Log{Name: "Log A2"}, + Log{Name: "Log B1"}, + Log{Name: "Log B2"}, + Log{Name: "Log C1"}, + Log{Name: "Log C2"}, + } + + actual, err := input.subset(nil) + test.AssertNotError(t, err, "nil names should not error") + test.AssertEquals(t, len(actual), 0) + + actual, err = input.subset([]string{}) + test.AssertNotError(t, err, "empty names should not error") + test.AssertEquals(t, len(actual), 0) + + actual, err = input.subset([]string{"Other Log"}) + test.AssertError(t, err, "wrong name should result in error") + test.AssertEquals(t, len(actual), 0) + + expected := List{ + Log{Name: "Log B1"}, + Log{Name: "Log A1"}, + Log{Name: "Log A2"}, + } + actual, err = input.subset([]string{"Log B1", "Log A1", "Log A2"}) + test.AssertNotError(t, err, "normal usage should not error") + test.AssertDeepEquals(t, actual, expected) +} + +func TestForPurpose(t *testing.T) { + input := List{ + Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus}, + Log{Name: "Log A2", Operator: "A", State: loglist3.RejectedLogStatus}, + Log{Name: "Log B1", Operator: "B", State: loglist3.UsableLogStatus}, + Log{Name: "Log B2", Operator: "B", State: loglist3.RetiredLogStatus}, + Log{Name: "Log C1", Operator: "C", State: loglist3.PendingLogStatus}, + Log{Name: "Log C2", Operator: "C", State: loglist3.ReadOnlyLogStatus}, + } + expected := List{ + Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus}, + Log{Name: "Log B1", Operator: "B", State: loglist3.UsableLogStatus}, + } + actual, err := input.forPurpose(Issuance, false) + test.AssertNotError(t, err, "should have two acceptable logs") + test.AssertDeepEquals(t, actual, expected) + + input = List{ + Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus}, + Log{Name: "Log A2", Operator: "A", State: loglist3.RejectedLogStatus}, + Log{Name: "Log B1", Operator: "B", State: loglist3.QualifiedLogStatus}, + Log{Name: "Log B2", Operator: "B", State: loglist3.RetiredLogStatus}, + Log{Name: "Log C1", Operator: "C", State: loglist3.PendingLogStatus}, + Log{Name: "Log C2", Operator: "C", State: loglist3.ReadOnlyLogStatus}, + } + _, err = input.forPurpose(Issuance, false) + test.AssertError(t, err, "should only have one acceptable log") + + expected = List{ + Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus}, + Log{Name: "Log C2", Operator: "C", State: loglist3.ReadOnlyLogStatus}, + } + actual, err = input.forPurpose(Validation, false) + test.AssertNotError(t, err, "should have two acceptable logs") + test.AssertDeepEquals(t, actual, expected) + + expected = List{ + Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus}, + Log{Name: "Log B1", Operator: "B", State: loglist3.QualifiedLogStatus}, + Log{Name: "Log C1", Operator: "C", State: loglist3.PendingLogStatus}, + } + actual, err = input.forPurpose(Informational, false) + test.AssertNotError(t, err, "should have three acceptable logs") + test.AssertDeepEquals(t, actual, expected) + + input = List{ + Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus}, + Log{Name: "Log B1", Operator: "B", State: loglist3.UsableLogStatus}, + Log{Name: "Log T1", Operator: "T", Type: "test", State: loglist3.UndefinedLogStatus}, + Log{Name: "Log M1", Operator: "M", Type: "monitoring_only", State: loglist3.UndefinedLogStatus}, + } + expected = List{ + Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus}, + Log{Name: "Log B1", Operator: "B", State: loglist3.UsableLogStatus}, + } + actual, err = input.forPurpose(Issuance, false) + test.AssertNotError(t, err, "should have two acceptable logs with submitToTestLogs=[false]") + test.AssertDeepEquals(t, actual, expected) + + expected = List{ + Log{Name: "Log A1", Operator: "A", State: loglist3.UsableLogStatus}, + Log{Name: "Log B1", Operator: "B", State: loglist3.UsableLogStatus}, + Log{Name: "Log T1", Operator: "T", Type: "test", State: loglist3.UndefinedLogStatus}, + Log{Name: "Log M1", Operator: "M", Type: "monitoring_only", State: loglist3.UndefinedLogStatus}, + } + actual, err = input.forPurpose(Issuance, true) + test.AssertNotError(t, err, "should have two acceptable logs with submitToTestLogs=[true]") + test.AssertDeepEquals(t, actual, expected) +} + +func TestForTime(t *testing.T) { + fc := clock.NewFake() + fc.Set(time.Now()) + + input := List{ + Log{Name: "Fully Bound", StartInclusive: fc.Now().Add(-time.Hour), EndExclusive: fc.Now().Add(time.Hour)}, + Log{Name: "Open End", StartInclusive: fc.Now().Add(-time.Hour)}, + Log{Name: "Open Start", EndExclusive: fc.Now().Add(time.Hour)}, + Log{Name: "Fully Open"}, + } + + expected := List{ + Log{Name: "Fully Bound", StartInclusive: fc.Now().Add(-time.Hour), EndExclusive: fc.Now().Add(time.Hour)}, + Log{Name: "Open End", StartInclusive: fc.Now().Add(-time.Hour)}, + Log{Name: "Open Start", EndExclusive: fc.Now().Add(time.Hour)}, + Log{Name: "Fully Open"}, + } + actual := input.ForTime(fc.Now()) + test.AssertDeepEquals(t, actual, expected) + + expected = List{ + Log{Name: "Fully Bound", StartInclusive: fc.Now().Add(-time.Hour), EndExclusive: fc.Now().Add(time.Hour)}, + Log{Name: "Open End", StartInclusive: fc.Now().Add(-time.Hour)}, + Log{Name: "Open Start", EndExclusive: fc.Now().Add(time.Hour)}, + Log{Name: "Fully Open"}, + } + actual = input.ForTime(fc.Now().Add(-time.Hour)) + test.AssertDeepEquals(t, actual, expected) + + expected = List{ + Log{Name: "Open Start", EndExclusive: fc.Now().Add(time.Hour)}, + Log{Name: "Fully Open"}, + } + actual = input.ForTime(fc.Now().Add(-2 * time.Hour)) + test.AssertDeepEquals(t, actual, expected) + + expected = List{ + Log{Name: "Open End", StartInclusive: fc.Now().Add(-time.Hour)}, + Log{Name: "Fully Open"}, + } + actual = input.ForTime(fc.Now().Add(time.Hour)) + test.AssertDeepEquals(t, actual, expected) +} + +func TestPermute(t *testing.T) { + input := List{ + Log{Name: "Log A1"}, + Log{Name: "Log A2"}, + Log{Name: "Log B1"}, + Log{Name: "Log B2"}, + Log{Name: "Log C1"}, + Log{Name: "Log C2"}, + } + + foundIndices := make(map[string]map[int]int) + for _, log := range input { + foundIndices[log.Name] = make(map[int]int) + } + + for range 100 { + actual := input.Permute() + for index, log := range actual { + foundIndices[log.Name][index]++ + } + } + + for name, counts := range foundIndices { + for index, count := range counts { + if count == 0 { + t.Errorf("Log %s appeared at index %d too few times", name, index) + } + } + } +} + +func TestGetByID(t *testing.T) { + input := List{ + Log{Name: "Log A1", Id: "ID A1"}, + Log{Name: "Log B1", Id: "ID B1"}, + } + + expected := Log{Name: "Log A1", Id: "ID A1"} + actual, err := input.GetByID("ID A1") + test.AssertNotError(t, err, "should have found log") + test.AssertDeepEquals(t, actual, expected) + + _, err = input.GetByID("Other ID") + test.AssertError(t, err, "should not have found log") +} diff --git a/db/gorm.go b/db/gorm.go new file mode 100644 index 00000000000..de2f5db509d --- /dev/null +++ b/db/gorm.go @@ -0,0 +1,224 @@ +package db + +import ( + "context" + "database/sql" + "fmt" + "reflect" + "regexp" + "strings" +) + +// Characters allowed in an unquoted identifier by MariaDB. +// https://mariadb.com/kb/en/identifier-names/#unquoted +var mariaDBUnquotedIdentifierRE = regexp.MustCompile("^[0-9a-zA-Z$_]+$") + +func validMariaDBUnquotedIdentifier(s string) error { + if !mariaDBUnquotedIdentifierRE.MatchString(s) { + return fmt.Errorf("invalid MariaDB identifier %q", s) + } + + allNumeric := true + startsNumeric := false + for i, c := range []byte(s) { + if c < '0' || c > '9' { + if startsNumeric && len(s) > i && s[i] == 'e' { + return fmt.Errorf("MariaDB identifier looks like floating point: %q", s) + } + allNumeric = false + break + } + startsNumeric = true + } + if allNumeric { + return fmt.Errorf("MariaDB identifier contains only numerals: %q", s) + } + return nil +} + +// NewMappedSelector returns an object which can be used to automagically query +// the provided type-mapped database for rows of the parameterized type. +func NewMappedSelector[T any](executor MappedExecutor) (MappedSelector[T], error) { + var throwaway T + t := reflect.TypeOf(throwaway) + + // We use a very strict mapping of struct fields to table columns here: + // - The struct must not have any embedded structs, only named fields. + // - The struct field names must be case-insensitively identical to the + // column names (no struct tags necessary). + // - The struct field names must be case-insensitively unique. + // - Every field of the struct must correspond to a database column. + // - Note that the reverse is not true: it's perfectly okay for there to be + // database columns which do not correspond to fields in the struct; those + // columns will be ignored. + // TODO: In the future, when we replace borp's TableMap with our own, this + // check should be performed at the time the mapping is declared. + columns := make([]string, 0) + seen := make(map[string]struct{}) + for i := range t.NumField() { + field := t.Field(i) + if field.Anonymous { + return nil, fmt.Errorf("struct contains anonymous embedded struct %q", field.Name) + } + column := strings.ToLower(t.Field(i).Name) + err := validMariaDBUnquotedIdentifier(column) + if err != nil { + return nil, fmt.Errorf("struct field maps to unsafe db column name %q", column) + } + if _, found := seen[column]; found { + return nil, fmt.Errorf("struct fields map to duplicate column name %q", column) + } + seen[column] = struct{}{} + columns = append(columns, column) + } + + return &mappedSelector[T]{wrapped: executor, columns: columns}, nil +} + +type mappedSelector[T any] struct { + wrapped MappedExecutor + columns []string +} + +// QueryContext performs a SELECT on the appropriate table for T. It combines the best +// features of borp, the go stdlib, and generics, using the type parameter of +// the typeSelector object to automatically look up the proper table name and +// columns to select. It returns an iterable which yields fully-populated +// objects of the parameterized type directly. The given clauses MUST be only +// the bits of a sql query from "WHERE ..." onwards; if they contain any of the +// "SELECT ... FROM ..." portion of the query it will result in an error. The +// args take the same kinds of values as borp's SELECT: either one argument per +// positional placeholder, or a map of placeholder names to their arguments +// (see https://pkg.go.dev/github.com/letsencrypt/borp#readme-ad-hoc-sql). +// +// The caller is responsible for calling `Rows.Close()` when they are done with +// the query. The caller is also responsible for ensuring that the clauses +// argument does not contain any user-influenced input. +func (ts mappedSelector[T]) QueryContext(ctx context.Context, clauses string, args ...any) (Rows[T], error) { + // Look up the table to use based on the type of this TypeSelector. + var throwaway T + tableMap, err := ts.wrapped.TableFor(reflect.TypeOf(throwaway), false) + if err != nil { + return nil, fmt.Errorf("database model type not mapped to table name: %w", err) + } + + return ts.QueryFrom(ctx, tableMap.TableName, clauses, args...) +} + +// QueryFrom is the same as Query, but it additionally takes a table name to +// select from, rather than automatically computing the table name from borp's +// DbMap. +// +// The caller is responsible for calling `Rows.Close()` when they are done with +// the query. The caller is also responsible for ensuring that the clauses +// argument does not contain any user-influenced input. +func (ts mappedSelector[T]) QueryFrom(ctx context.Context, tablename string, clauses string, args ...any) (Rows[T], error) { + err := validMariaDBUnquotedIdentifier(tablename) + if err != nil { + return nil, err + } + + // Construct the query from the column names, table name, and given clauses. + // Note that the column names here are in the order given by + query := fmt.Sprintf( + "SELECT %s FROM %s %s", + strings.Join(ts.columns, ", "), + tablename, + clauses, + ) + + r, err := ts.wrapped.QueryContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("reading db: %w", err) + } + + return &rows[T]{wrapped: r, numCols: len(ts.columns)}, nil +} + +// rows is a wrapper around the stdlib's sql.rows, but with a more +// type-safe method to get actual row content. +type rows[T any] struct { + wrapped *sql.Rows + numCols int +} + +// ForEach calls the given function with each model object retrieved by +// repeatedly calling .Get(). It closes the rows object when it hits an error +// or finishes iterating over the rows, so it can only be called once. This is +// the intended way to use the result of QueryContext or QueryFrom; the other +// methods on this type are lower-level and intended for advanced use only. +func (r rows[T]) ForEach(do func(*T) error) (err error) { + defer func() { + // Close the row reader when we exit. Use the named error return to combine + // any error from normal execution with any error from closing. + closeErr := r.Close() + if closeErr != nil && err != nil { + err = fmt.Errorf("%w; also while closing the row reader: %w", err, closeErr) + } else if closeErr != nil { + err = closeErr + } + // If closeErr is nil, then just leaving the existing named return alone + // will do the right thing. + }() + + for r.Next() { + row, err := r.Get() + if err != nil { + return fmt.Errorf("reading row: %w", err) + } + + err = do(row) + if err != nil { + return err + } + } + + err = r.Err() + if err != nil { + return fmt.Errorf("iterating over row reader: %w", err) + } + + return nil +} + +// Next is a wrapper around sql.Rows.Next(). It must be called before every call +// to Get(), including the first. +func (r rows[T]) Next() bool { + return r.wrapped.Next() +} + +// Get is a wrapper around sql.Rows.Scan(). Rather than populating an arbitrary +// number of &interface{} arguments, it returns a populated object of the +// parameterized type. +func (r rows[T]) Get() (*T, error) { + result := new(T) + v := reflect.ValueOf(result) + + // Because sql.Rows.Scan(...) takes a variadic number of individual targets to + // read values into, build a slice that can be splatted into the call. Use the + // pre-computed list of in-order column names to populate it. + scanTargets := make([]any, r.numCols) + for i := range scanTargets { + field := v.Elem().Field(i) + scanTargets[i] = field.Addr().Interface() + } + + err := r.wrapped.Scan(scanTargets...) + if err != nil { + return nil, fmt.Errorf("reading db row: %w", err) + } + + return result, nil +} + +// Err is a wrapper around sql.Rows.Err(). It should be checked immediately +// after Next() returns false for any reason. +func (r rows[T]) Err() error { + return r.wrapped.Err() +} + +// Close is a wrapper around sql.Rows.Close(). It must be called when the caller +// is done reading rows, regardless of success or error. +func (r rows[T]) Close() error { + return r.wrapped.Close() +} diff --git a/db/gorm_test.go b/db/gorm_test.go new file mode 100644 index 00000000000..c0a179bbce0 --- /dev/null +++ b/db/gorm_test.go @@ -0,0 +1,16 @@ +package db + +import ( + "testing" + + "github.com/letsencrypt/boulder/test" +) + +func TestValidMariaDBUnquotedIdentifier(t *testing.T) { + test.AssertError(t, validMariaDBUnquotedIdentifier("12345"), "expected error for 12345") + test.AssertError(t, validMariaDBUnquotedIdentifier("12345e"), "expected error for 12345e") + test.AssertError(t, validMariaDBUnquotedIdentifier("1e10"), "expected error for 1e10") + test.AssertError(t, validMariaDBUnquotedIdentifier("foo\\bar"), "expected error for foo\\bar") + test.AssertError(t, validMariaDBUnquotedIdentifier("zoom "), "expected error for identifier ending in space") + test.AssertNotError(t, validMariaDBUnquotedIdentifier("hi"), "expected no error for 'hi'") +} diff --git a/db/interfaces.go b/db/interfaces.go new file mode 100644 index 00000000000..60c6f3583cf --- /dev/null +++ b/db/interfaces.go @@ -0,0 +1,152 @@ +package db + +import ( + "context" + "database/sql" + "errors" + "reflect" + + "github.com/letsencrypt/borp" +) + +// These interfaces exist to aid in mocking database operations for unit tests. +// +// By convention, any function that takes a OneSelector, Selector, +// Inserter, Execer, or SelectExecer as as an argument expects +// that a context has already been applied to the relevant DbMap or +// Transaction object. + +// A OneSelector is anything that provides a `SelectOne` function. +type OneSelector interface { + SelectOne(context.Context, any, string, ...any) error +} + +// A Selector is anything that provides a `Select` function. +type Selector interface { + Select(context.Context, any, string, ...any) ([]any, error) +} + +// A Inserter is anything that provides an `Insert` function +type Inserter interface { + Insert(context.Context, ...any) error +} + +// A Execer is anything that provides an `ExecContext` function +type Execer interface { + ExecContext(context.Context, string, ...any) (sql.Result, error) +} + +// SelectExecer offers a subset of borp.SqlExecutor's methods: Select and +// ExecContext. +type SelectExecer interface { + Selector + Execer +} + +// DatabaseMap offers the full combination of OneSelector, Inserter, +// SelectExecer, and a Begin function for creating a Transaction. +type DatabaseMap interface { + OneSelector + Inserter + SelectExecer + BeginTx(context.Context) (Transaction, error) +} + +// Executor offers the full combination of OneSelector, Inserter, SelectExecer +// and adds a handful of other high level borp methods we use in Boulder. +type Executor interface { + OneSelector + Inserter + SelectExecer + Delete(context.Context, ...any) (int64, error) + Get(context.Context, any, ...any) (any, error) + Update(context.Context, ...any) (int64, error) + QueryContext(context.Context, string, ...any) (*sql.Rows, error) +} + +// Transaction extends an Executor and adds Rollback and Commit +type Transaction interface { + Executor + Rollback() error + Commit() error +} + +// MappedExecutor is anything that can map types to tables +type MappedExecutor interface { + TableFor(reflect.Type, bool) (*borp.TableMap, error) + QueryContext(ctx context.Context, clauses string, args ...any) (*sql.Rows, error) +} + +// MappedSelector is anything that can execute various kinds of SQL statements +// against a table automatically determined from the parameterized type. +type MappedSelector[T any] interface { + QueryContext(ctx context.Context, clauses string, args ...any) (Rows[T], error) + QueryFrom(ctx context.Context, tablename string, clauses string, args ...any) (Rows[T], error) +} + +// Rows is anything which lets you iterate over the result rows of a SELECT +// query. It is similar to sql.Rows, but generic. +type Rows[T any] interface { + ForEach(func(*T) error) error + Next() bool + Get() (*T, error) + Err() error + Close() error +} + +// MockSqlExecutor implement SqlExecutor by returning errors from every call. +// +// TODO: To mock out WithContext, we needed to be able to return objects that satisfy +// borp.SqlExecutor. That's a pretty big interface, so we specify one no-op mock +// that we can embed everywhere we need to satisfy it. +// Note: MockSqlExecutor does *not* implement WithContext. The expectation is +// that structs that embed MockSqlExecutor will define their own WithContext +// that returns a reference to themselves. That makes it easy for those structs +// to override the specific methods they need to implement (e.g. SelectOne). +type MockSqlExecutor struct{} + +func (mse MockSqlExecutor) Get(ctx context.Context, i any, keys ...any) (any, error) { + return nil, errors.New("unimplemented") +} +func (mse MockSqlExecutor) Insert(ctx context.Context, list ...any) error { + return errors.New("unimplemented") +} +func (mse MockSqlExecutor) Update(ctx context.Context, list ...any) (int64, error) { + return 0, errors.New("unimplemented") +} +func (mse MockSqlExecutor) Delete(ctx context.Context, list ...any) (int64, error) { + return 0, errors.New("unimplemented") +} +func (mse MockSqlExecutor) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) { + return nil, errors.New("unimplemented") +} +func (mse MockSqlExecutor) Select(ctx context.Context, i any, query string, args ...any) ([]any, error) { + return nil, errors.New("unimplemented") +} +func (mse MockSqlExecutor) SelectInt(ctx context.Context, query string, args ...any) (int64, error) { + return 0, errors.New("unimplemented") +} +func (mse MockSqlExecutor) SelectNullInt(ctx context.Context, query string, args ...any) (sql.NullInt64, error) { + return sql.NullInt64{}, errors.New("unimplemented") +} +func (mse MockSqlExecutor) SelectFloat(ctx context.Context, query string, args ...any) (float64, error) { + return 0, errors.New("unimplemented") +} +func (mse MockSqlExecutor) SelectNullFloat(ctx context.Context, query string, args ...any) (sql.NullFloat64, error) { + return sql.NullFloat64{}, errors.New("unimplemented") +} +func (mse MockSqlExecutor) SelectStr(ctx context.Context, query string, args ...any) (string, error) { + return "", errors.New("unimplemented") +} +func (mse MockSqlExecutor) SelectNullStr(ctx context.Context, query string, args ...any) (sql.NullString, error) { + return sql.NullString{}, errors.New("unimplemented") +} +func (mse MockSqlExecutor) SelectOne(ctx context.Context, holder any, query string, args ...any) error { + return errors.New("unimplemented") +} +func (mse MockSqlExecutor) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) { + return nil, errors.New("unimplemented") +} +func (mse MockSqlExecutor) QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row { + return nil +} diff --git a/db/map.go b/db/map.go index 9c8942225f3..758a21e7c03 100644 --- a/db/map.go +++ b/db/map.go @@ -5,10 +5,11 @@ import ( "database/sql" "errors" "fmt" + "reflect" "regexp" - "strings" - gorp "github.com/go-gorp/gorp/v3" + "github.com/go-sql-driver/mysql" + "github.com/letsencrypt/borp" ) // ErrDatabaseOp wraps an underlying err with a description of the operation @@ -20,21 +21,6 @@ type ErrDatabaseOp struct { Err error } -// noRows returns true when the underlying error is sql.ErrNoRows and indicates -// that the error was that no results were found. -func (e ErrDatabaseOp) noRows() bool { - return e.Err == sql.ErrNoRows -} - -// duplicate returns true when the underlying error has a message with a prefix -// matching "Error 1062: Duplicate entry". This is the error prefixed returned -// by MariaDB when a duplicate row is to be inserted. -func (e ErrDatabaseOp) duplicate() bool { - return strings.HasPrefix( - e.Err.Error(), - "Error 1062: Duplicate entry") -} - // Error for an ErrDatabaseOp composes a message with context about the // operation and table as well as the underlying Err's error message. func (e ErrDatabaseOp) Error() string { @@ -52,73 +38,86 @@ func (e ErrDatabaseOp) Error() string { e.Err) } -// IsNoRows is a utility function for casting an error to ErrDatabaseOp and -// returning true if its wrapped err is sql.ErrNoRows. If the error is not an -// ErrDatabaseOp the return value of IsNoRows will always be false. +// Unwrap returns the inner error to allow inspection of error chains. +func (e ErrDatabaseOp) Unwrap() error { + return e.Err +} + +// IsNoRows is a utility function for determining if an error wraps the go sql +// package's ErrNoRows, which is returned when a Scan operation has no more +// results to return, and as such is returned by many borp methods. func IsNoRows(err error) bool { - // if the err is an ErrDatabaseOp instance, return its noRows() result to see - // if the inner err is sql.ErrNoRows - var dbErr ErrDatabaseOp - if errors.As(err, &dbErr) { - return dbErr.noRows() - } - return false + return errors.Is(err, sql.ErrNoRows) } -// IsDuplicate is a utility function for casting an error to ErrDatabaseOp and -// returning a boolean indicating if it is a duplicate error or not. If the -// error is not an ErrDatabaseOp the return value of IsDuplicate will always be -// false. +// IsDuplicate is a utility function for determining if an error wrap MySQL's +// Error 1062: Duplicate entry. This error is returned when inserting a row +// would violate a unique key constraint. func IsDuplicate(err error) bool { - // if the err is an ErrDatabaseOp instance, return its duplicate() result to - // see if the inner err indicates a duplicate row error. - var dbErr ErrDatabaseOp - if errors.As(err, &dbErr) { - return dbErr.duplicate() - } - return false + var dbErr *mysql.MySQLError + return errors.As(err, &dbErr) && dbErr.Number == 1062 } -// WrappedMap wraps a *gorp.DbMap such that its major functions wrap error +// WrappedMap wraps a *borp.DbMap such that its major functions wrap error // results in ErrDatabaseOp instances before returning them to the caller. type WrappedMap struct { - *gorp.DbMap + dbMap *borp.DbMap } -func (m *WrappedMap) Get(holder interface{}, keys ...interface{}) (interface{}, error) { - return WrappedExecutor{SqlExecutor: m.DbMap}.Get(holder, keys...) +func NewWrappedMap(dbMap *borp.DbMap) *WrappedMap { + return &WrappedMap{dbMap: dbMap} } -func (m *WrappedMap) Insert(list ...interface{}) error { - return WrappedExecutor{SqlExecutor: m.DbMap}.Insert(list...) +func (m *WrappedMap) TableFor(t reflect.Type, checkPK bool) (*borp.TableMap, error) { + return m.dbMap.TableFor(t, checkPK) } -func (m *WrappedMap) Update(list ...interface{}) (int64, error) { - return WrappedExecutor{SqlExecutor: m.DbMap}.Update(list...) +func (m *WrappedMap) Get(ctx context.Context, holder any, keys ...any) (any, error) { + return WrappedExecutor{sqlExecutor: m.dbMap}.Get(ctx, holder, keys...) } -func (m *WrappedMap) Delete(list ...interface{}) (int64, error) { - return WrappedExecutor{SqlExecutor: m.DbMap}.Delete(list...) +func (m *WrappedMap) Insert(ctx context.Context, list ...any) error { + return WrappedExecutor{sqlExecutor: m.dbMap}.Insert(ctx, list...) } -func (m *WrappedMap) Select(holder interface{}, query string, args ...interface{}) ([]interface{}, error) { - return WrappedExecutor{SqlExecutor: m.DbMap}.Select(holder, query, args...) +func (m *WrappedMap) Update(ctx context.Context, list ...any) (int64, error) { + return WrappedExecutor{sqlExecutor: m.dbMap}.Update(ctx, list...) } -func (m *WrappedMap) SelectOne(holder interface{}, query string, args ...interface{}) error { - return WrappedExecutor{SqlExecutor: m.DbMap}.SelectOne(holder, query, args...) +func (m *WrappedMap) Delete(ctx context.Context, list ...any) (int64, error) { + return WrappedExecutor{sqlExecutor: m.dbMap}.Delete(ctx, list...) } -func (m *WrappedMap) Exec(query string, args ...interface{}) (sql.Result, error) { - return WrappedExecutor{SqlExecutor: m.DbMap}.Exec(query, args...) +func (m *WrappedMap) Select(ctx context.Context, holder any, query string, args ...any) ([]any, error) { + return WrappedExecutor{sqlExecutor: m.dbMap}.Select(ctx, holder, query, args...) } -func (m *WrappedMap) WithContext(ctx context.Context) gorp.SqlExecutor { - return WrappedExecutor{SqlExecutor: m.DbMap.WithContext(ctx)} +func (m *WrappedMap) SelectOne(ctx context.Context, holder any, query string, args ...any) error { + return WrappedExecutor{sqlExecutor: m.dbMap}.SelectOne(ctx, holder, query, args...) } -func (m *WrappedMap) Begin() (Transaction, error) { - tx, err := m.DbMap.Begin() +func (m *WrappedMap) SelectNullInt(ctx context.Context, query string, args ...any) (sql.NullInt64, error) { + return WrappedExecutor{sqlExecutor: m.dbMap}.SelectNullInt(ctx, query, args...) +} + +func (m *WrappedMap) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) { + return WrappedExecutor{sqlExecutor: m.dbMap}.QueryContext(ctx, query, args...) +} + +func (m *WrappedMap) QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row { + return WrappedExecutor{sqlExecutor: m.dbMap}.QueryRowContext(ctx, query, args...) +} + +func (m *WrappedMap) SelectStr(ctx context.Context, query string, args ...any) (string, error) { + return WrappedExecutor{sqlExecutor: m.dbMap}.SelectStr(ctx, query, args...) +} + +func (m *WrappedMap) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) { + return WrappedExecutor{sqlExecutor: m.dbMap}.ExecContext(ctx, query, args...) +} + +func (m *WrappedMap) BeginTx(ctx context.Context) (Transaction, error) { + tx, err := m.dbMap.BeginTx(ctx) if err != nil { return tx, ErrDatabaseOp{ Op: "begin transaction", @@ -126,65 +125,77 @@ func (m *WrappedMap) Begin() (Transaction, error) { } } return WrappedTransaction{ - Transaction: tx, + transaction: tx, }, err } -// WrappedTransaction wraps a *gorp.Transaction such that its major functions +func (m *WrappedMap) ColumnsForModel(model any) ([]string, error) { + tbl, err := m.dbMap.TableFor(reflect.TypeOf(model), true) + if err != nil { + return nil, err + } + var columns []string + for _, col := range tbl.Columns { + columns = append(columns, col.ColumnName) + } + return columns, nil +} + +// WrappedTransaction wraps a *borp.Transaction such that its major functions // wrap error results in ErrDatabaseOp instances before returning them to the // caller. type WrappedTransaction struct { - *gorp.Transaction -} - -func (tx WrappedTransaction) WithContext(ctx context.Context) gorp.SqlExecutor { - return WrappedExecutor{SqlExecutor: tx.Transaction.WithContext(ctx)} + transaction *borp.Transaction } func (tx WrappedTransaction) Commit() error { - return tx.Transaction.Commit() + return tx.transaction.Commit() } func (tx WrappedTransaction) Rollback() error { - return tx.Transaction.Rollback() + return tx.transaction.Rollback() +} + +func (tx WrappedTransaction) Get(ctx context.Context, holder any, keys ...any) (any, error) { + return (WrappedExecutor{sqlExecutor: tx.transaction}).Get(ctx, holder, keys...) } -func (tx WrappedTransaction) Get(holder interface{}, keys ...interface{}) (interface{}, error) { - return (WrappedExecutor{SqlExecutor: tx.Transaction}).Get(holder, keys...) +func (tx WrappedTransaction) Insert(ctx context.Context, list ...any) error { + return (WrappedExecutor{sqlExecutor: tx.transaction}).Insert(ctx, list...) } -func (tx WrappedTransaction) Insert(list ...interface{}) error { - return (WrappedExecutor{SqlExecutor: tx.Transaction}).Insert(list...) +func (tx WrappedTransaction) Update(ctx context.Context, list ...any) (int64, error) { + return (WrappedExecutor{sqlExecutor: tx.transaction}).Update(ctx, list...) } -func (tx WrappedTransaction) Update(list ...interface{}) (int64, error) { - return (WrappedExecutor{SqlExecutor: tx.Transaction}).Update(list...) +func (tx WrappedTransaction) Delete(ctx context.Context, list ...any) (int64, error) { + return (WrappedExecutor{sqlExecutor: tx.transaction}).Delete(ctx, list...) } -func (tx WrappedTransaction) Delete(list ...interface{}) (int64, error) { - return (WrappedExecutor{SqlExecutor: tx.Transaction}).Delete(list...) +func (tx WrappedTransaction) Select(ctx context.Context, holder any, query string, args ...any) ([]any, error) { + return (WrappedExecutor{sqlExecutor: tx.transaction}).Select(ctx, holder, query, args...) } -func (tx WrappedTransaction) Select(holder interface{}, query string, args ...interface{}) ([]interface{}, error) { - return (WrappedExecutor{SqlExecutor: tx.Transaction}).Select(holder, query, args...) +func (tx WrappedTransaction) SelectOne(ctx context.Context, holder any, query string, args ...any) error { + return (WrappedExecutor{sqlExecutor: tx.transaction}).SelectOne(ctx, holder, query, args...) } -func (tx WrappedTransaction) SelectOne(holder interface{}, query string, args ...interface{}) error { - return (WrappedExecutor{SqlExecutor: tx.Transaction}).SelectOne(holder, query, args...) +func (tx WrappedTransaction) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) { + return (WrappedExecutor{sqlExecutor: tx.transaction}).QueryContext(ctx, query, args...) } -func (tx WrappedTransaction) Exec(query string, args ...interface{}) (sql.Result, error) { - return (WrappedExecutor{SqlExecutor: tx.Transaction}).Exec(query, args...) +func (tx WrappedTransaction) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) { + return (WrappedExecutor{sqlExecutor: tx.transaction}).ExecContext(ctx, query, args...) } -// WrappedExecutor wraps a gorp.SqlExecutor such that its major functions +// WrappedExecutor wraps a borp.SqlExecutor such that its major functions // wrap error results in ErrDatabaseOp instances before returning them to the // caller. type WrappedExecutor struct { - gorp.SqlExecutor + sqlExecutor borp.SqlExecutor } -func errForOp(operation string, err error, list []interface{}) ErrDatabaseOp { +func errForOp(operation string, err error, list []any) ErrDatabaseOp { table := "unknown" if len(list) > 0 { table = fmt.Sprintf("%T", list[0]) @@ -196,7 +207,7 @@ func errForOp(operation string, err error, list []interface{}) ErrDatabaseOp { } } -func errForQuery(query, operation string, err error, list []interface{}) ErrDatabaseOp { +func errForQuery(query, operation string, err error, list []any) ErrDatabaseOp { // Extract the table from the query table := tableFromQuery(query) if table == "" && len(list) > 0 { @@ -217,54 +228,84 @@ func errForQuery(query, operation string, err error, list []interface{}) ErrData } } -func (we WrappedExecutor) Get(holder interface{}, keys ...interface{}) (interface{}, error) { - res, err := we.SqlExecutor.Get(holder, keys...) +func (we WrappedExecutor) Get(ctx context.Context, holder any, keys ...any) (any, error) { + res, err := we.sqlExecutor.Get(ctx, holder, keys...) if err != nil { - return res, errForOp("get", err, []interface{}{holder}) + return res, errForOp("get", err, []any{holder}) } return res, err } -func (we WrappedExecutor) Insert(list ...interface{}) error { - err := we.SqlExecutor.Insert(list...) +func (we WrappedExecutor) Insert(ctx context.Context, list ...any) error { + err := we.sqlExecutor.Insert(ctx, list...) if err != nil { return errForOp("insert", err, list) } return nil } -func (we WrappedExecutor) Update(list ...interface{}) (int64, error) { - updatedRows, err := we.SqlExecutor.Update(list...) +func (we WrappedExecutor) Update(ctx context.Context, list ...any) (int64, error) { + updatedRows, err := we.sqlExecutor.Update(ctx, list...) if err != nil { return updatedRows, errForOp("update", err, list) } return updatedRows, err } -func (we WrappedExecutor) Delete(list ...interface{}) (int64, error) { - deletedRows, err := we.SqlExecutor.Delete(list...) +func (we WrappedExecutor) Delete(ctx context.Context, list ...any) (int64, error) { + deletedRows, err := we.sqlExecutor.Delete(ctx, list...) if err != nil { return deletedRows, errForOp("delete", err, list) } return deletedRows, err } -func (we WrappedExecutor) Select(holder interface{}, query string, args ...interface{}) ([]interface{}, error) { - result, err := we.SqlExecutor.Select(holder, query, args...) +func (we WrappedExecutor) Select(ctx context.Context, holder any, query string, args ...any) ([]any, error) { + result, err := we.sqlExecutor.Select(ctx, holder, query, args...) if err != nil { - return result, errForQuery(query, "select", err, []interface{}{holder}) + return result, errForQuery(query, "select", err, []any{holder}) } return result, err } -func (we WrappedExecutor) SelectOne(holder interface{}, query string, args ...interface{}) error { - err := we.SqlExecutor.SelectOne(holder, query, args...) +func (we WrappedExecutor) SelectOne(ctx context.Context, holder any, query string, args ...any) error { + err := we.sqlExecutor.SelectOne(ctx, holder, query, args...) if err != nil { - return errForQuery(query, "select one", err, []interface{}{holder}) + return errForQuery(query, "select one", err, []any{holder}) } return nil } +func (we WrappedExecutor) SelectNullInt(ctx context.Context, query string, args ...any) (sql.NullInt64, error) { + rows, err := we.sqlExecutor.SelectNullInt(ctx, query, args...) + if err != nil { + return sql.NullInt64{}, errForQuery(query, "select", err, nil) + } + return rows, nil +} + +func (we WrappedExecutor) QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row { + // Note: we can't do error wrapping here because the error is passed via the `*sql.Row` + // object, and we can't produce a `*sql.Row` object with a custom error because it is unexported. + return we.sqlExecutor.QueryRowContext(ctx, query, args...) +} + +func (we WrappedExecutor) SelectStr(ctx context.Context, query string, args ...any) (string, error) { + str, err := we.sqlExecutor.SelectStr(ctx, query, args...) + if err != nil { + return "", errForQuery(query, "select", err, nil) + } + return str, nil +} + +func (we WrappedExecutor) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) { + rows, err := we.sqlExecutor.QueryContext(ctx, query, args...) + if err != nil { + return nil, errForQuery(query, "select", err, nil) + } + return rows, nil +} + var ( // selectTableRegexp matches the table name from an SQL select statement selectTableRegexp = regexp.MustCompile(`(?i)^\s*select\s+[a-z\d:\.\(\), \_\*` + "`" + `]+\s+from\s+([a-z\d\_,` + "`" + `]+)`) @@ -277,10 +318,10 @@ var ( // tableRegexps is a list of regexps that tableFromQuery will try to use in // succession to find the table name for an SQL query. While tableFromQuery - // isn't used by the higher level gorp Insert/Update/Select/etc functions we + // isn't used by the higher level borp Insert/Update/Select/etc functions we // include regexps for matching inserts, updates, selects, etc because we want // to match the correct table when these types of queries are run through - // Exec(). + // ExecContext(). tableRegexps = []*regexp.Regexp{ selectTableRegexp, insertTableRegexp, @@ -301,8 +342,8 @@ func tableFromQuery(query string) string { return "" } -func (we WrappedExecutor) Exec(query string, args ...interface{}) (sql.Result, error) { - res, err := we.SqlExecutor.Exec(query, args...) +func (we WrappedExecutor) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) { + res, err := we.sqlExecutor.ExecContext(ctx, query, args...) if err != nil { return res, errForQuery(query, "exec", err, args) } diff --git a/db/map_test.go b/db/map_test.go index 297b3494eb3..11b4675bc39 100644 --- a/db/map_test.go +++ b/db/map_test.go @@ -7,9 +7,10 @@ import ( "fmt" "testing" - gorp "github.com/go-gorp/gorp/v3" + "github.com/letsencrypt/borp" "github.com/go-sql-driver/mysql" + "github.com/letsencrypt/boulder/core" "github.com/letsencrypt/boulder/test" "github.com/letsencrypt/boulder/test/vars" @@ -48,7 +49,7 @@ func TestErrDatabaseOpError(t *testing.T) { } } -func TestErrDatabaseOpNoRows(t *testing.T) { +func TestIsNoRows(t *testing.T) { testCases := []struct { name string err ErrDatabaseOp @@ -59,7 +60,7 @@ func TestErrDatabaseOpNoRows(t *testing.T) { err: ErrDatabaseOp{ Op: "test", Table: "testTable", - Err: sql.ErrNoRows, + Err: fmt.Errorf("some wrapper around %w", sql.ErrNoRows), }, expectedNoRows: true, }, @@ -68,7 +69,7 @@ func TestErrDatabaseOpNoRows(t *testing.T) { err: ErrDatabaseOp{ Op: "test", Table: "testTable", - Err: errors.New("lots of rows. too many rows."), + Err: fmt.Errorf("some wrapper around %w", errors.New("lots of rows. too many rows.")), }, expectedNoRows: false, }, @@ -76,12 +77,12 @@ func TestErrDatabaseOpNoRows(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - test.AssertEquals(t, tc.err.noRows(), tc.expectedNoRows) + test.AssertEquals(t, IsNoRows(tc.err), tc.expectedNoRows) }) } } -func TestErrDatabaseOpDuplicate(t *testing.T) { +func TestIsDuplicate(t *testing.T) { testCases := []struct { name string err ErrDatabaseOp @@ -92,7 +93,7 @@ func TestErrDatabaseOpDuplicate(t *testing.T) { err: ErrDatabaseOp{ Op: "test", Table: "testTable", - Err: errors.New("Error 1062: Duplicate entry detected!!!!!!!"), + Err: fmt.Errorf("some wrapper around %w", &mysql.MySQLError{Number: 1062}), }, expectDuplicate: true, }, @@ -101,7 +102,7 @@ func TestErrDatabaseOpDuplicate(t *testing.T) { err: ErrDatabaseOp{ Op: "test", Table: "testTable", - Err: errors.New("DB forgot to save your data."), + Err: fmt.Errorf("some wrapper around %w", &mysql.MySQLError{Number: 1234}), }, expectDuplicate: false, }, @@ -109,7 +110,7 @@ func TestErrDatabaseOpDuplicate(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - test.AssertEquals(t, tc.err.duplicate(), tc.expectDuplicate) + test.AssertEquals(t, IsDuplicate(tc.err), tc.expectDuplicate) }) } } @@ -122,7 +123,7 @@ func TestTableFromQuery(t *testing.T) { expectedTable string }{ { - query: "SELECT id, jwk, jwk_sha256, contact, agreement, initialIP, createdAt, LockCol, status FROM registrations WHERE jwk_sha256 = ?", + query: "SELECT id, jwk, jwk_sha256, contact, agreement, createdAt, status FROM registrations WHERE jwk_sha256 = ?", expectedTable: "registrations", }, { @@ -134,19 +135,19 @@ func TestTableFromQuery(t *testing.T) { expectedTable: "authz2", }, { - query: "insert into `registrations` (`id`,`jwk`,`jw k_sha256`,`contact`,`agreement`,`initialIp`,`createdAt`,`LockCol`,`status`) values (null,?,?,?,?,?,?,?,?);", + query: "insert into `registrations` (`id`,`jwk`,`jwk_sha256`,`contact`,`agreement`,`createdAt`,`status`) values (null,?,?,?,?,?,?,?);", expectedTable: "`registrations`", }, { - query: "update `registrations` set `jwk`=?, `jwk_sh a256`=?, `contact`=?, `agreement`=?, `initialIp`=?, `createdAt`=?, `LockCol` =?, `status`=? where `id`=? and `LockCol`=?;", + query: "update `registrations` set `jwk`=?, `jwk_sha256`=?, `contact`=?, `agreement`=?, `createdAt`=?, `status`=? where `id`=?;", expectedTable: "`registrations`", }, { - query: "SELECT COUNT(1) FROM registrations WHERE initialIP = ? AND ? < createdAt AND createdAt <= ?", + query: "SELECT COUNT(*) FROM registrations WHERE ? < createdAt AND createdAt <= ?", expectedTable: "registrations", }, { - query: "SELECT count(1) FROM orders WHERE registrationID = ? AND created >= ? AND created < ?", + query: "SELECT COUNT(*) FROM orders WHERE registrationID = ? AND created >= ? AND created < ?", expectedTable: "orders", }, { @@ -161,14 +162,6 @@ func TestTableFromQuery(t *testing.T) { query: "insert into `orders` (`ID`,`RegistrationID`,`Expires`,`Created`,`Error`,`CertificateSerial`,`BeganProcessing`) values (null,?,?,?,?,?,?)", expectedTable: "`orders`", }, - { - query: "insert into `orderToAuthz2` (`OrderID`,`AuthzID`) values (?,?);", - expectedTable: "`orderToAuthz2`", - }, - { - query: "insert into `requestedNames` (`ID`,`OrderID`,`ReversedName`) values (?,?,?);", - expectedTable: "`requestedNames`", - }, { query: "UPDATE authz2 SET status = :status, attempted = :attempted, validationRecord = :validationRecord, validationError = :validationError, expires = :expires WHERE id = :id AND status = :pending", expectedTable: "authz2", @@ -189,10 +182,6 @@ func TestTableFromQuery(t *testing.T) { query: "insert into `certificates` (`registrationID`,`serial`,`digest`,`der`,`issued`,`expires`) values (?,?,?,?,?,?);", expectedTable: "`certificates`", }, - { - query: "INSERT INTO certificatesPerName (eTLDPlusOne, time, count) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE count=count+1;", - expectedTable: "certificatesPerName", - }, { query: "insert into `fqdnSets` (`ID`,`SetHash`,`Serial`,`Issued`,`Expires`) values (null,?,?,?,?);", expectedTable: "`fqdnSets`", @@ -236,25 +225,28 @@ func testDbMap(t *testing.T) *WrappedMap { dbConn, err := sql.Open("mysql", config.FormatDSN()) test.AssertNotError(t, err, "opening DB connection") - dialect := gorp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8"} + dialect := borp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8"} // NOTE(@cpu): We avoid giving a sa.BoulderTypeConverter to the DbMap field to // avoid the cyclic dep. We don't need to convert any types in the db tests. - dbMap := &gorp.DbMap{Db: dbConn, Dialect: dialect, TypeConverter: nil} - return &WrappedMap{DbMap: dbMap} + dbMap := &borp.DbMap{Db: dbConn, Dialect: dialect, TypeConverter: nil} + return &WrappedMap{dbMap: dbMap} } func TestWrappedMap(t *testing.T) { mustDbErr := func(err error) ErrDatabaseOp { + t.Helper() var dbOpErr ErrDatabaseOp test.AssertErrorWraps(t, err, &dbOpErr) return dbOpErr } + ctx := context.Background() + testWrapper := func(dbMap Executor) { reg := &core.Registration{} // Test wrapped Get - _, err := dbMap.Get(reg) + _, err := dbMap.Get(ctx, reg) test.AssertError(t, err, "expected err Getting Registration w/o type converter") dbOpErr := mustDbErr(err) test.AssertEquals(t, dbOpErr.Op, "get") @@ -262,7 +254,7 @@ func TestWrappedMap(t *testing.T) { test.AssertError(t, dbOpErr.Err, "expected non-nil underlying err") // Test wrapped Insert - err = dbMap.Insert(reg) + err = dbMap.Insert(ctx, reg) test.AssertError(t, err, "expected err Inserting Registration w/o type converter") dbOpErr = mustDbErr(err) test.AssertEquals(t, dbOpErr.Op, "insert") @@ -270,7 +262,7 @@ func TestWrappedMap(t *testing.T) { test.AssertError(t, dbOpErr.Err, "expected non-nil underlying err") // Test wrapped Update - _, err = dbMap.Update(reg) + _, err = dbMap.Update(ctx, reg) test.AssertError(t, err, "expected err Updating Registration w/o type converter") dbOpErr = mustDbErr(err) test.AssertEquals(t, dbOpErr.Op, "update") @@ -278,7 +270,7 @@ func TestWrappedMap(t *testing.T) { test.AssertError(t, dbOpErr.Err, "expected non-nil underlying err") // Test wrapped Delete - _, err = dbMap.Delete(reg) + _, err = dbMap.Delete(ctx, reg) test.AssertError(t, err, "expected err Deleting Registration w/o type converter") dbOpErr = mustDbErr(err) test.AssertEquals(t, dbOpErr.Op, "delete") @@ -286,7 +278,7 @@ func TestWrappedMap(t *testing.T) { test.AssertError(t, dbOpErr.Err, "expected non-nil underlying err") // Test wrapped Select with a bogus query - _, err = dbMap.Select(reg, "blah") + _, err = dbMap.Select(ctx, reg, "blah") test.AssertError(t, err, "expected err Selecting Registration w/o type converter") dbOpErr = mustDbErr(err) test.AssertEquals(t, dbOpErr.Op, "select") @@ -294,7 +286,7 @@ func TestWrappedMap(t *testing.T) { test.AssertError(t, dbOpErr.Err, "expected non-nil underlying err") // Test wrapped Select with a valid query - _, err = dbMap.Select(reg, "SELECT id, contact FROM registrationzzz WHERE id > 1;") + _, err = dbMap.Select(ctx, reg, "SELECT id, contact FROM registrationzzz WHERE id > 1;") test.AssertError(t, err, "expected err Selecting Registration w/o type converter") dbOpErr = mustDbErr(err) test.AssertEquals(t, dbOpErr.Op, "select") @@ -302,7 +294,7 @@ func TestWrappedMap(t *testing.T) { test.AssertError(t, dbOpErr.Err, "expected non-nil underlying err") // Test wrapped SelectOne with a bogus query - err = dbMap.SelectOne(reg, "blah") + err = dbMap.SelectOne(ctx, reg, "blah") test.AssertError(t, err, "expected err SelectOne-ing Registration w/o type converter") dbOpErr = mustDbErr(err) test.AssertEquals(t, dbOpErr.Op, "select one") @@ -310,7 +302,7 @@ func TestWrappedMap(t *testing.T) { test.AssertError(t, dbOpErr.Err, "expected non-nil underlying err") // Test wrapped SelectOne with a valid query - err = dbMap.SelectOne(reg, "SELECT contact FROM doesNotExist WHERE id=1;") + err = dbMap.SelectOne(ctx, reg, "SELECT contact FROM doesNotExist WHERE id=1;") test.AssertError(t, err, "expected err SelectOne-ing Registration w/o type converter") dbOpErr = mustDbErr(err) test.AssertEquals(t, dbOpErr.Op, "select one") @@ -318,7 +310,7 @@ func TestWrappedMap(t *testing.T) { test.AssertError(t, dbOpErr.Err, "expected non-nil underlying err") // Test wrapped Exec - _, err = dbMap.Exec("INSERT INTO whatever (id) VALUES (?) WHERE id = ?", 10) + _, err = dbMap.ExecContext(ctx, "INSERT INTO whatever (id) VALUES (?) WHERE id = ?", 10) test.AssertError(t, err, "expected err Exec-ing bad query") dbOpErr = mustDbErr(err) test.AssertEquals(t, dbOpErr.Op, "exec") @@ -333,24 +325,10 @@ func TestWrappedMap(t *testing.T) { // database errors. testWrapper(dbMap) - // Using WithContext on the WrappedMap should return a map that continues to - // operate in the expected fashion. - dbMapWithCtx := dbMap.WithContext(context.Background()) - testWrapper(dbMapWithCtx) - // Using Begin to start a transaction with the dbMap should return a // transaction that continues to operate in the expected fashion. - tx, err := dbMap.Begin() + tx, err := dbMap.BeginTx(ctx) defer func() { _ = tx.Rollback() }() test.AssertNotError(t, err, "unexpected error beginning transaction") testWrapper(tx) - - // Using Begin to start a transaction with the dbMap and then using - // WithContext should return a transaction that continues to operate in the - // expected fashion. - tx, err = dbMap.Begin() - defer func() { _ = tx.Rollback() }() - test.AssertNotError(t, err, "unexpected error beginning transaction") - txWithContext := tx.WithContext(context.Background()) - testWrapper(txWithContext) } diff --git a/db/mocks.go b/db/mocks.go deleted file mode 100644 index 759233d2be5..00000000000 --- a/db/mocks.go +++ /dev/null @@ -1,71 +0,0 @@ -package db - -import ( - "context" - "database/sql" - - "github.com/go-gorp/gorp/v3" -) - -// These interfaces exist to aid in mocking database operations for unit tests. -// -// By convention, any function that takes a OneSelector, Selector, -// Inserter, Execer, or SelectExecer as as an argument expects -// that a context has already been applied to the relevant DbMap or -// Transaction object. - -// A OneSelector is anything that provides a `SelectOne` function. -type OneSelector interface { - SelectOne(interface{}, string, ...interface{}) error -} - -// A Selector is anything that provides a `Select` function. -type Selector interface { - Select(interface{}, string, ...interface{}) ([]interface{}, error) -} - -// A Inserter is anything that provides an `Insert` function -type Inserter interface { - Insert(list ...interface{}) error -} - -// A Execer is anything that provides an `Exec` function -type Execer interface { - Exec(string, ...interface{}) (sql.Result, error) -} - -// SelectExecer offers a subset of gorp.SqlExecutor's methods: Select and -// Exec. -type SelectExecer interface { - Selector - Execer -} - -// DatabaseMap offers the full combination of OneSelector, Inserter, -// SelectExecer, and a Begin function for creating a Transaction. -type DatabaseMap interface { - OneSelector - Inserter - SelectExecer - Begin() (Transaction, error) -} - -// Executor offers the full combination of OneSelector, Inserter, SelectExecer -// and adds a handful of other high level Gorp methods we use in Boulder. -type Executor interface { - OneSelector - Inserter - SelectExecer - Delete(...interface{}) (int64, error) - Get(interface{}, ...interface{}) (interface{}, error) - Update(...interface{}) (int64, error) - Query(string, ...interface{}) (*sql.Rows, error) -} - -// Transaction extends an Executor and adds Rollback, Commit, and WithContext. -type Transaction interface { - Executor - Rollback() error - Commit() error - WithContext(ctx context.Context) gorp.SqlExecutor -} diff --git a/db/multi.go b/db/multi.go index 8f9b6a6814a..9906326e0a1 100644 --- a/db/multi.go +++ b/db/multi.go @@ -1,103 +1,107 @@ package db import ( + "context" "fmt" "strings" ) // MultiInserter makes it easy to construct a -// `INSERT INTO table (...) VALUES ... RETURNING id;` +// `INSERT INTO table (...) VALUES ...;` // query which inserts multiple rows into the same table. It can also execute // the resulting query. type MultiInserter struct { - table string - fields string - retCol string - numFields int - values [][]interface{} + // These are validated by the constructor as containing only characters + // that are allowed in an unquoted identifier. + // https://mariadb.com/kb/en/identifier-names/#unquoted + table string + fields []string + + values [][]any } // NewMultiInserter creates a new MultiInserter, checking for reasonable table // name and list of fields. -func NewMultiInserter(table string, fields string, retCol string) (*MultiInserter, error) { - numFields := len(strings.Split(fields, ",")) - if len(table) == 0 || len(fields) == 0 || numFields == 0 { +// Safety: `table` and `fields` must contain only strings that are known at +// compile time. They must not contain user-controlled strings. +func NewMultiInserter(table string, fields []string) (*MultiInserter, error) { + if len(table) == 0 || len(fields) == 0 { return nil, fmt.Errorf("empty table name or fields list") } - if strings.Contains(retCol, ",") { - return nil, fmt.Errorf("return column must be singular, but got %q", retCol) + + err := validMariaDBUnquotedIdentifier(table) + if err != nil { + return nil, err + } + for _, field := range fields { + err := validMariaDBUnquotedIdentifier(field) + if err != nil { + return nil, err + } } return &MultiInserter{ - table: table, - fields: fields, - retCol: retCol, - numFields: numFields, - values: make([][]interface{}, 0), + table: table, + fields: fields, + values: make([][]any, 0), }, nil } // Add registers another row to be included in the Insert query. -func (mi *MultiInserter) Add(row []interface{}) error { - if len(row) != mi.numFields { - return fmt.Errorf("field count mismatch, got %d, expected %d", len(row), mi.numFields) +func (mi *MultiInserter) Add(row []any) error { + if len(row) != len(mi.fields) { + return fmt.Errorf("field count mismatch, got %d, expected %d", len(row), len(mi.fields)) } mi.values = append(mi.values, row) return nil } // query returns the formatted query string, and the slice of arguments for -// for gorp to use in place of the query's question marks. Currently only +// for borp to use in place of the query's question marks. Currently only // used by .Insert(), below. -func (mi *MultiInserter) query() (string, []interface{}) { - questionsRow := strings.TrimRight(strings.Repeat("?,", mi.numFields), ",") - +func (mi *MultiInserter) query() (string, []any) { var questionsBuf strings.Builder - var queryArgs []interface{} + var queryArgs []any for _, row := range mi.values { - fmt.Fprintf(&questionsBuf, "(%s),", questionsRow) + // Safety: We are interpolating a string that will be used in a SQL + // query, but we constructed that string in this function and know it + // consists only of question marks joined with commas. + fmt.Fprintf(&questionsBuf, "(%s),", QuestionMarks(len(mi.fields))) queryArgs = append(queryArgs, row...) } questions := strings.TrimRight(questionsBuf.String(), ",") - returning := "" - if mi.retCol != "" { - returning = fmt.Sprintf(" RETURNING %s", mi.retCol) - } - query := fmt.Sprintf("INSERT INTO %s (%s) VALUES %s%s;", mi.table, mi.fields, questions, returning) + // Safety: we are interpolating `mi.table` and `mi.fields` into an SQL + // query. We know they contain, respectively, a valid unquoted identifier + // and a slice of valid unquoted identifiers because we verified that in + // the constructor. We know the query overall has valid syntax because we + // generate it entirely within this function. + query := fmt.Sprintf("INSERT INTO %s (%s) VALUES %s", mi.table, strings.Join(mi.fields, ","), questions) return query, queryArgs } -// Insert performs the action represented by .query() on the provided database, -// which is assumed to already have a context attached. If a non-empty retCol -// was provided, then it returns the list of values from that column returned -// by the query. -func (mi *MultiInserter) Insert(exec Executor) ([]int64, error) { - query, queryArgs := mi.query() - rows, err := exec.Query(query, queryArgs...) - if err != nil { - return nil, err +// Insert inserts all the collected rows into the database represented by +// `queryer`. +func (mi *MultiInserter) Insert(ctx context.Context, db Execer) error { + if len(mi.values) == 0 { + return nil } - ids := make([]int64, 0, len(mi.values)) - if mi.retCol != "" { - for rows.Next() { - var id int64 - err = rows.Scan(&id) - if err != nil { - rows.Close() - return nil, err - } - ids = append(ids, id) - } + query, queryArgs := mi.query() + res, err := db.ExecContext(ctx, query, queryArgs...) + if err != nil { + return err } - err = rows.Close() + affected, err := res.RowsAffected() if err != nil { - return nil, err + return err + } + if affected != int64(len(mi.values)) { + return fmt.Errorf("unexpected number of rows inserted: %d != %d", affected, len(mi.values)) } - return ids, nil + return nil } diff --git a/db/multi_test.go b/db/multi_test.go index 61724ca402c..e9a54461318 100644 --- a/db/multi_test.go +++ b/db/multi_test.go @@ -7,64 +7,59 @@ import ( ) func TestNewMulti(t *testing.T) { - _, err := NewMultiInserter("", "colA", "") + _, err := NewMultiInserter("", []string{"colA"}) test.AssertError(t, err, "Empty table name should fail") - _, err = NewMultiInserter("myTable", "", "") - test.AssertError(t, err, "Empty fields string should fail") + _, err = NewMultiInserter("myTable", nil) + test.AssertError(t, err, "Empty fields list should fail") - mi, err := NewMultiInserter("myTable", "colA", "") + mi, err := NewMultiInserter("myTable", []string{"colA"}) test.AssertNotError(t, err, "Single-column construction should not fail") - test.AssertEquals(t, mi.numFields, 1) + test.AssertEquals(t, len(mi.fields), 1) - mi, err = NewMultiInserter("myTable", "colA,colB, colC", "") + mi, err = NewMultiInserter("myTable", []string{"colA", "colB", "colC"}) test.AssertNotError(t, err, "Multi-column construction should not fail") - test.AssertEquals(t, mi.numFields, 3) + test.AssertEquals(t, len(mi.fields), 3) + + _, err = NewMultiInserter("foo\"bar", []string{"colA"}) + test.AssertError(t, err, "expected error for invalid table name") + + _, err = NewMultiInserter("myTable", []string{"colA", "foo\"bar"}) + test.AssertError(t, err, "expected error for invalid column name") } func TestMultiAdd(t *testing.T) { - mi, err := NewMultiInserter("table", "a,b,c", "") + mi, err := NewMultiInserter("table", []string{"a", "b", "c"}) test.AssertNotError(t, err, "Failed to create test MultiInserter") - err = mi.Add([]interface{}{}) + err = mi.Add([]any{}) test.AssertError(t, err, "Adding empty row should fail") - err = mi.Add([]interface{}{"foo"}) + err = mi.Add([]any{"foo"}) test.AssertError(t, err, "Adding short row should fail") - err = mi.Add([]interface{}{"foo", "bar", "baz", "bing", "boom"}) + err = mi.Add([]any{"foo", "bar", "baz", "bing", "boom"}) test.AssertError(t, err, "Adding long row should fail") - err = mi.Add([]interface{}{"one", "two", "three"}) + err = mi.Add([]any{"one", "two", "three"}) test.AssertNotError(t, err, "Adding correct-length row shouldn't fail") test.AssertEquals(t, len(mi.values), 1) - err = mi.Add([]interface{}{1, "two", map[string]int{"three": 3}}) + err = mi.Add([]any{1, "two", map[string]int{"three": 3}}) test.AssertNotError(t, err, "Adding heterogeneous row shouldn't fail") test.AssertEquals(t, len(mi.values), 2) // Note that .Add does *not* enforce that each row is of the same types. } func TestMultiQuery(t *testing.T) { - mi, err := NewMultiInserter("table", "a,b,c", "") + mi, err := NewMultiInserter("table", []string{"a", "b", "c"}) test.AssertNotError(t, err, "Failed to create test MultiInserter") - err = mi.Add([]interface{}{"one", "two", "three"}) + err = mi.Add([]any{"one", "two", "three"}) test.AssertNotError(t, err, "Failed to insert test row") - err = mi.Add([]interface{}{"egy", "kettö", "három"}) + err = mi.Add([]any{"egy", "kettö", "három"}) test.AssertNotError(t, err, "Failed to insert test row") query, queryArgs := mi.query() - test.AssertEquals(t, query, "INSERT INTO table (a,b,c) VALUES (?,?,?),(?,?,?);") - test.AssertDeepEquals(t, queryArgs, []interface{}{"one", "two", "three", "egy", "kettö", "három"}) - - mi, err = NewMultiInserter("table", "a,b,c", "id") - test.AssertNotError(t, err, "Failed to create test MultiInserter") - err = mi.Add([]interface{}{"one", "two", "three"}) - test.AssertNotError(t, err, "Failed to insert test row") - err = mi.Add([]interface{}{"egy", "kettö", "három"}) - test.AssertNotError(t, err, "Failed to insert test row") - - query, queryArgs = mi.query() - test.AssertEquals(t, query, "INSERT INTO table (a,b,c) VALUES (?,?,?),(?,?,?) RETURNING id;") - test.AssertDeepEquals(t, queryArgs, []interface{}{"one", "two", "three", "egy", "kettö", "három"}) + test.AssertEquals(t, query, "INSERT INTO table (a,b,c) VALUES (?,?,?),(?,?,?)") + test.AssertDeepEquals(t, queryArgs, []any{"one", "two", "three", "egy", "kettö", "három"}) } diff --git a/db/qmarks.go b/db/qmarks.go new file mode 100644 index 00000000000..d69cc52209d --- /dev/null +++ b/db/qmarks.go @@ -0,0 +1,21 @@ +package db + +import "strings" + +// QuestionMarks returns a string consisting of N question marks, joined by +// commas. If n is <= 0, panics. +func QuestionMarks(n int) string { + if n <= 0 { + panic("db.QuestionMarks called with n <=0") + } + var qmarks strings.Builder + qmarks.Grow(2 * n) + for i := range n { + if i == 0 { + qmarks.WriteString("?") + } else { + qmarks.WriteString(",?") + } + } + return qmarks.String() +} diff --git a/db/qmarks_test.go b/db/qmarks_test.go new file mode 100644 index 00000000000..f76ee4f4fa0 --- /dev/null +++ b/db/qmarks_test.go @@ -0,0 +1,19 @@ +package db + +import ( + "testing" + + "github.com/letsencrypt/boulder/test" +) + +func TestQuestionMarks(t *testing.T) { + test.AssertEquals(t, QuestionMarks(1), "?") + test.AssertEquals(t, QuestionMarks(2), "?,?") + test.AssertEquals(t, QuestionMarks(3), "?,?,?") +} + +func TestQuestionMarksPanic(t *testing.T) { + defer func() { _ = recover() }() + QuestionMarks(0) + t.Errorf("calling QuestionMarks(0) did not panic as expected") +} diff --git a/db/rollback_test.go b/db/rollback_test.go index 7dcf48b80cf..99df5431c5e 100644 --- a/db/rollback_test.go +++ b/db/rollback_test.go @@ -1,6 +1,7 @@ package db import ( + "context" "testing" berrors "github.com/letsencrypt/boulder/errors" @@ -8,9 +9,10 @@ import ( ) func TestRollback(t *testing.T) { + ctx := context.Background() dbMap := testDbMap(t) - tx, _ := dbMap.Begin() + tx, _ := dbMap.BeginTx(ctx) // Commit the transaction so that a subsequent rollback will always fail. _ = tx.Commit() @@ -28,7 +30,7 @@ func TestRollback(t *testing.T) { // Create a new transaction and don't commit it this time. The rollback should // succeed. - tx, _ = dbMap.Begin() + tx, _ = dbMap.BeginTx(ctx) result = rollback(tx, innerErr) // We expect that the err is returned unwrapped. diff --git a/db/transaction.go b/db/transaction.go index 1bd302916fe..c57cf9e0c93 100644 --- a/db/transaction.go +++ b/db/transaction.go @@ -3,19 +3,18 @@ package db import "context" // txFunc represents a function that does work in the context of a transaction. -type txFunc func(txWithCtx Executor) (interface{}, error) +type txFunc func(tx Executor) (any, error) // WithTransaction runs the given function in a transaction, rolling back if it // returns an error and committing if not. The provided context is also attached // to the transaction. WithTransaction also passes through a value returned by // `f`, if there is no error. -func WithTransaction(ctx context.Context, dbMap DatabaseMap, f txFunc) (interface{}, error) { - tx, err := dbMap.Begin() +func WithTransaction(ctx context.Context, dbMap DatabaseMap, f txFunc) (any, error) { + tx, err := dbMap.BeginTx(ctx) if err != nil { return nil, err } - txWithCtx := tx.WithContext(ctx) - result, err := f(txWithCtx) + result, err := f(tx) if err != nil { return nil, rollback(tx, err) } diff --git a/docker-compose.next.yml b/docker-compose.next.yml index f635c50583d..ae4f2333ddc 100644 --- a/docker-compose.next.yml +++ b/docker-compose.next.yml @@ -1,7 +1,6 @@ -version: '3' services: boulder: environment: - FAKE_DNS: 10.77.77.77 + FAKE_DNS: 64.112.117.122 BOULDER_CONFIG_DIR: test/config-next - GOFLAGS: -mod=vendor + GOCACHE: /boulder/.gocache/go-build-next diff --git a/docker-compose.yml b/docker-compose.yml index b0c235a91d2..6af833b6d36 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,169 +1,203 @@ -version: '3' services: boulder: - # CAUTION: Changing the Go version in this tag changes the version of Go - # used for release builds. make-deb.sh relies on being able to parse the - # numeric version between 'go' and the underscore-prefixed date. If you make - # changes to these tokens, please update this parsing logic. - image: &boulder_image letsencrypt/boulder-tools:${BOULDER_TOOLS_TAG:-go1.17.7_2022-02-10} + # The `letsencrypt/boulder-tools:latest` tag is automatically built in local + # dev environments. In CI a specific BOULDER_TOOLS_TAG is passed, and it is + # pulled with `docker compose pull`. + image: &boulder_tools_image letsencrypt/boulder-tools:${BOULDER_TOOLS_TAG:-latest} + build: + context: test/boulder-tools/ + # Should match one of the GO_CI_VERSIONS in test/boulder-tools/tag_and_upload.sh. + args: + GO_VERSION: 1.25.5 environment: - FAKE_DNS: 10.77.77.77 + # To solve HTTP-01 and TLS-ALPN-01 challenges, change the IP in FAKE_DNS + # to the IP address where your ACME client's solver is listening. This is + # pointing at the boulder service's "public" IP, where challtestsrv is. + FAKE_DNS: 64.112.117.122 BOULDER_CONFIG_DIR: test/config - GOFLAGS: -mod=vendor - # Go 1.18 turns off SHA-1 validation on CSRs (and certs, but that doesn't - # affect us). It also turns off TLS 1.0 and TLS 1.1. Temporarily go back - # to allowing these so we can upgrade to Go 1.18 while doing a deprecation - # window. These overrides will stop working in Go 1.19. - GODEBUG: x509sha1=1,tls10default=1 + USE_VITESS: false + GOCACHE: /boulder/.gocache/go-build volumes: - .:/boulder:cached - ./.gocache:/root/.cache/go-build:cached - - ./.hierarchy:/hierarchy/:cached - - ./.softhsm-tokens/:/var/lib/softhsm/tokens/:cached + - ./test/certs/.softhsm-tokens/:/var/lib/softhsm/tokens/:cached networks: - bluenet: + bouldernet: ipv4_address: 10.77.77.77 - rednet: - ipv4_address: 10.88.88.88 - redisnet: - ipv4_address: 10.33.33.33 - # Use sd-test-srv as a backup to Docker's embedded DNS server + publicnet: + ipv4_address: 64.112.117.122 + publicnet2: + ipv4_address: 64.112.117.134 + # Use consul as a backup to Docker's embedded DNS server. If there's a name + # Docker's DNS server doesn't know about, it will forward the query to this + # IP (running consul). # (https://docs.docker.com/config/containers/container-networking/#dns-services). - # If there's a name Docker's DNS server doesn't know about, it will - # forward the query to this IP (running sd-test-srv). We have - # special logic there that will return multiple IP addresses for - # service names. - dns: 10.77.77.77 + # This is used to look up service names via A records (like ra.service.consul) that + # are configured via the ServerAddress field of cmd.GRPCClientConfig. + # TODO: Remove this when ServerAddress is deprecated in favor of SRV records + # and DNSAuthority. + dns: 10.77.77.10 + extra_hosts: + # Allow the boulder container to be reached as "ca.example.org", so we + # can put that name inside our integration test certs (e.g. as a crl + # url) and have it look like a publicly-accessible name. + # TODO(#8215): Move s3-test-srv to a separate service. + - "ca.example.org:64.112.117.122" + # Allow the boulder container to be reached as "integration.trust", for + # similar reasons, but intended for use as a SAN rather than a CRLDP. + # TODO(#8215): Move observer's probe target to a separate service. + - "integration.trust:64.112.117.122" ports: - 4001:4001 # ACMEv2 - - 4002:4002 # OCSP - - 4003:4003 # OCSP + - 4003:4003 # SFE depends_on: - - bmysql - - bredis_clusterer + - bmariadb + - bproxysql + - bvitess + - bredis_1 + - bredis_2 + - bconsul + - bjaeger + - bpkimetal entrypoint: test/entrypoint.sh working_dir: &boulder_working_dir /boulder - bmysql: - image: mariadb:10.5 + bsetup: + image: *boulder_tools_image + volumes: + - .:/boulder:cached + - ./.gocache:/root/.cache/go-build:cached + - ./test/certs/.softhsm-tokens/:/var/lib/softhsm/tokens/:cached + entrypoint: test/certs/generate.sh + working_dir: *boulder_working_dir + profiles: + # Adding a profile to this container means that it won't be started by a + # normal "docker compose up/run boulder", only when specifically invoked + # with a "docker compose up bsetup". + - setup + + bmariadb: + image: mariadb:10.11.13 networks: - bluenet: + bouldernet: aliases: - - boulder-mysql + - boulder-mariadb environment: - MYSQL_ALLOW_EMPTY_PASSWORD: "yes" - # Send slow queries to a table so we can check for them in the - # integration tests. For now we ignore queries not using indexes, - # because that seems to trigger based on the optimizer's choice to not - # use an index for certain queries, particularly when tables are still - # small. - command: mysqld --bind-address=0.0.0.0 --slow-query-log --log-output=TABLE --log-queries-not-using-indexes=ON - logging: - driver: none + MYSQL_ALLOW_EMPTY_PASSWORD: "yes" - bredis_1: - image: redis:latest + bproxysql: + image: proxysql/proxysql:2.7.2 + # The --initial flag force resets the ProxySQL database on startup. By + # default, ProxySQL ignores new configuration if the database already + # exists. Without this flag, new configuration wouldn't be applied until you + # ran `docker compose down`. + entrypoint: proxysql -f --idle-threads -c /test/proxysql/proxysql.cnf --initial volumes: - ./test/:/test/:cached - command: redis-server /test/redis.config - networks: - redisnet: - ipv4_address: 10.33.33.2 - - bredis_2: - image: redis:latest - volumes: - - ./test/:/test/:cached - command: redis-server /test/redis.config + depends_on: + - bmariadb networks: - redisnet: - ipv4_address: 10.33.33.3 + bouldernet: + aliases: + - boulder-proxysql - bredis_3: - image: redis:latest + bredis_1: + image: redis:7.0.15 volumes: - ./test/:/test/:cached - command: redis-server /test/redis.config + command: redis-server /test/redis-ratelimits.config networks: - redisnet: - ipv4_address: 10.33.33.4 + bouldernet: + ipv4_address: 10.77.77.4 - bredis_4: - image: redis:latest + bredis_2: + image: redis:7.0.15 volumes: - ./test/:/test/:cached - command: redis-server /test/redis.config + command: redis-server /test/redis-ratelimits.config networks: - redisnet: - ipv4_address: 10.33.33.5 + bouldernet: + ipv4_address: 10.77.77.5 - bredis_5: - image: redis:latest + bconsul: + image: hashicorp/consul:1.19.2 volumes: - - ./test/:/test/:cached - command: redis-server /test/redis.config + - ./test/:/test/:cached networks: - redisnet: - ipv4_address: 10.33.33.6 + bouldernet: + ipv4_address: 10.77.77.10 + command: "consul agent -dev -config-format=hcl -config-file=/test/consul/config.hcl" - bredis_6: - image: redis:latest - volumes: - - ./test/:/test/:cached - command: redis-server /test/redis.config + bjaeger: + image: jaegertracing/all-in-one:1.50 networks: - redisnet: - ipv4_address: 10.33.33.7 + - bouldernet - bredis_clusterer: - image: redis:latest - volumes: - - ./test/:/test/:cached - - ./cluster/:/cluster/:cached - command: /test/wait-for-it.sh 10.33.33.2 4218 /test/redis-create.sh - depends_on: - - bredis_1 - - bredis_2 - - bredis_3 - - bredis_4 - - bredis_5 - - bredis_6 + bpkimetal: + image: ghcr.io/pkimetal/pkimetal:v1.20.0 networks: - redisnet: - ipv4_address: 10.33.33.10 - aliases: - - boulder-redis-clusterer + - bouldernet - netaccess: - image: *boulder_image + bvitess: + # The `letsencrypt/boulder-vtcomboserver:latest` tag is automatically built + # in local dev environments. In CI a specific BOULDER_VTCOMBOSERVER_TAG is + # passed, and it is pulled with `docker compose pull`. + image: letsencrypt/boulder-vtcomboserver:${BOULDER_VTCOMBOSERVER_TAG:-latest} + build: + context: test/vtcomboserver/ environment: - GO111MODULE: "on" - GOFLAGS: -mod=vendor - BOULDER_CONFIG_DIR: test/config + # By specifying KEYSPACES vttestserver will create the corresponding + # databases on startup. + KEYSPACES: boulder_sa_test,boulder_sa_integration,incidents_sa_test,incidents_sa_integration + NUM_SHARDS: 1,1,1,1 networks: - - bluenet - volumes: - - .:/boulder - working_dir: *boulder_working_dir - entrypoint: test/entrypoint-netaccess.sh + bouldernet: + aliases: + - boulder-vitess networks: - bluenet: + # This network represents the data-center internal network. It is used for + # boulder services and their infrastructure, such as consul, mariadb, and + # redis. + bouldernet: driver: bridge ipam: driver: default config: - subnet: 10.77.77.0/24 - rednet: + # Only issue DHCP addresses in the top half of the range, to avoid + # conflict with static addresses. + ip_range: 10.77.77.128/25 + + # This network represents the public internet. It uses a real public IP space + # (that Let's Encrypt controls) so that our integration tests are happy to + # validate and issue for it. It is used by challtestsrv, which binds to + # 64.112.117.122:80 and :443 for its HTTP-01 challenge responder. + # + # TODO(#8215): Put s3-test-srv on this network. + publicnet: driver: bridge ipam: driver: default config: - - subnet: 10.88.88.0/24 + - subnet: 64.112.117.0/25 - redisnet: + # This network is used for two things in the integration tests: + # - challtestsrv binds to 64.112.117.134:443 for its tls-alpn-01 challenge + # responder, to avoid interfering with the HTTPS port used for testing + # HTTP->HTTPS redirects during http-01 challenges. Note: this could + # probably be updated in the future so that challtestsrv can handle + # both tls-alpn-01 and HTTPS on the same port. + # - test/v2_integration.py has some test cases that start their own HTTP + # server instead of relying on challtestsrv, because they want very + # specific behavior. For these cases, v2_integration.py creates a Python + # HTTP server and binds it to 64.112.117.134:80. + # + # TODO(#8215): Deprecate this network, replacing it with individual IPs within + # the existing publicnet. + publicnet2: driver: bridge ipam: driver: default config: - - subnet: 10.33.33.0/24 + - subnet: 64.112.117.128/25 diff --git a/docs/CODE_OF_CONDUCT.md b/docs/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..f5121d46f00 --- /dev/null +++ b/docs/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +The code of conduct for everyone participating in this community in any capacity +is available for reference +[on the community forum](https://community.letsencrypt.org/guidelines). diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md new file mode 100644 index 00000000000..cae3364ba4c --- /dev/null +++ b/docs/CONTRIBUTING.md @@ -0,0 +1,444 @@ +Thanks for helping us build Boulder! This page contains requirements and +guidelines for Boulder contributions. + +# Patch Requirements + +* All new functionality and fixed bugs must be accompanied by tests. +* All patches must meet the deployability requirements listed below. +* We prefer pull requests from external forks be created with the ["Allow edits + from + maintainers"](https://github.com/blog/2247-improving-collaboration-with-forks) + checkbox selected. + +# Review Requirements + +* All pull requests must receive at least one approval by a [CODEOWNER](../CODEOWNERS) other than the author. This is enforced by GitHub itself. +* All pull requests should receive at least two approvals by [Trusted Contributors](https://github.com/letsencrypt/cp-cps/blob/main/CP-CPS.md#161-definitions). + This requirement may be waived when: + * the change only modifies documentation; + * the change only modifies tests; + * in exceptional circumstances, such as when no second reviewer is available at all. + + This requirement should not be waived when: + * the change is not written by a Trusted Contributor, to ensure that at least two TCs have eyes on it. +* New commits pushed to a branch invalidate previous reviews. In other words, a + reviewer must give positive reviews of a branch after its most recent pushed + commit. +* If a branch contains commits from multiple authors, it needs a reviewer who + is not an author of commits on that branch. +* Review changes to or addition of tests just as rigorously as you review code + changes. Consider: Do tests actually test what they mean to test? Is this the + best way to test the functionality in question? Do the tests cover all the + functionality in the patch, including error cases? +* Are there new RPCs or config fields? Make sure the patch meets the + Deployability rules below. + +# Merge Requirements + +We have a bot that will comment on some PRs indicating there are: + + 1. configuration changes + 2. SQL schema changes + 3. feature flag changes + +These may require either a CP/CPS review or filing of a ticket to make matching changes +in production. It is the responsibility of the person merging the PR to make sure +the required action has been performed before merging. Usually this will be confirmed +in a comment or in the PR description. + +# Patch Guidelines + +* Please include helpful comments. No need to gratuitously comment clear code, + but make sure it's clear why things are being done. Include information in + your pull request about what you're trying to accomplish with your patch. +* Avoid named return values. See + [#3017](https://github.com/letsencrypt/boulder/pull/3017) for an example of a + subtle problem they can cause. +* Do not include `XXX`s or naked `TODO`s. Use + the formats: + + ```go + // TODO(): Hoverboard + Time-machine unsupported until upstream patch. + // TODO(#): Pending hoverboard/time-machine interface. + // TODO(@githubusername): Enable hoverboard kickflips once interface is stable. + ``` + +# Squash merging + +Once a pull request is approved and the tests are passing, the author or any +other committer can merge it. We always use [squash +merges](https://github.com/blog/2141-squash-your-commits) via GitHub's web +interface. That means that during the course of your review you should +generally not squash or amend commits, or force push. Even if the changes in +each commit are small, keeping them separate makes it easier for us to review +incremental changes to a pull request. Rest assured that those tiny changes +will get squashed into a nice meaningful-size commit when we merge. + +If the CI tests are failing on your branch, you should look at the logs +to figure out why. Sometimes (though rarely) they fail spuriously, in which +case you can post a comment requesting that a project owner kick the build. + +# Error handling + +All errors must be addressed in some way: That may be simply by returning an +error up the stack, or by handling it in some intelligent way where it is +generated, or by explicitly ignoring it and assigning to `_`. We use the +`errcheck` tool in our integration tests to make sure all errors are +addressed. Note that ignoring errors, even in tests, should be rare, since +they may generate hard-to-debug problems. + +When handling errors, always do the operation which creates the error (usually +a function call) and the error checking on separate lines: +``` +err := someOperation(args) +if err != nil { + return nil, fmt.Errorf("some operation failed: %w", err) +} +``` +We avoid the `if err := someOperation(args); err != nil {...}` style as we find +it to be less readable and it can give rise to surprising scoping behavior. + +We define two special types of error. `BoulderError`, defined in +errors/errors.go, is used specifically when an typed error needs to be passed +across an RPC boundary. For instance, if the SA returns "not found", callers +need to be able to distinguish that from a network error. Not every error that +may pass across an RPC boundary needs to be a BoulderError, only those errors +that need to be handled by type elsewhere. Handling by type may be as simple as +turning a BoulderError into a specific type of ProblemDetail. + +The other special type of error is `ProblemDetails`. We try to treat these as a +presentation-layer detail, and use them only in parts of the system that are +responsible for rendering errors to end-users, i.e. WFE2. Note +one exception: The VA RPC layer defines its own `ProblemDetails` type, which is +returned to the RA and stored as part of a challenge (to eventually be rendered +to the user). + +Within WFE2, ProblemDetails are sent to the client by calling +`sendError()`, which also logs the error. For internal errors like timeout, +or any error type that we haven't specifically turned into a ProblemDetail, we +return a ServerInternal error. This avoids unnecessarily exposing internals. +It's possible to add additional errors to a logEvent using `.AddError()`, but +this should only be done when there is is internal-only information to log +that isn't redundant with the ProblemDetails sent to the user. Note that the +final argument to `sendError()`, `ierr`, will automatically get added to the +logEvent for ServerInternal errors, so when sending a ServerInternal error it's +not necessary to separately call `.AddError`. + +# Deployability + +We want to ensure that a new Boulder revision can be deployed to the +currently running Boulder production instance without requiring config +changes first. We also want to ensure that during a deploy, services can be +restarted in any order. That means two things: + +## Good zero values for config fields + +Any newly added config field must have a usable [zero +value](https://tour.golang.org/basics/12). That is to say, if a config field +is absent, Boulder shouldn't crash or misbehave. If that config file names a +file to be read, Boulder should be able to proceed without that file being +read. + +Note that there are some config fields that we want to be a hard requirement. +To handle such a field, first add it as optional, then file an issue to make +it required after the next deploy is complete. + +In general, we would like our deploy process to be: deploy new code + old +config; then immediately after deploy the same code + new config. This makes +deploys cheaper so we can do them more often, and allows us to more readily +separate deploy-triggered problems from config-triggered problems. + +## Flag-gating features + +When adding significant new features or replacing existing RPCs the +`boulder/features` package should be used to gate its usage. To add a flag, a +new field of the `features.Config` struct should be added. All flags default +to false. + +In order to test if the flag is enabled elsewhere in the codebase you can use +`features.Get().ExampleFeatureName` which gets the `bool` value from a global +config. + +Each service should include a `map[string]bool` named `Features` in its +configuration object at the top level and call `features.Set` with that map +immediately after parsing the configuration. For example to enable +`UseNewMetrics` and disable `AccountRevocation` you would add this object: + +```json +{ + ... + "features": { + "UseNewMetrics": true, + "AccountRevocation": false, + } +} +``` + +Feature flags are meant to be used temporarily and should not be used for +permanent boolean configuration options. + +### Deprecating a feature flag + +Once a feature has been enabled in both staging and production, someone on the +team should deprecate it: + + - Remove any instances of `features.Get().ExampleFeatureName`, adjusting code + as needed. + - Move the field to the top of the `features.Config` struct, under a comment + saying it's deprecated. + - Remove all references to the feature flag from `test/config-next`. + - Add the feature flag to `test/config`. This serves to check that we still + tolerate parsing the flag at startup, even though it is ineffective. + - File a ticket to remove the feature flag in staging and production. + - Once the feature flag is removed in staging and production, delete it from + `test/config` and `features.Config`. + +### Gating RPCs + +When you add a new RPC to a Boulder service (e.g. `SA.GetFoo()`), all +components that call that RPC should gate those calls using a feature flag. +Since the feature's zero value is false, a deploy with the existing config +will not call `SA.GetFoo()`. Then, once the deploy is complete and we know +that all SA instances support the `GetFoo()` RPC, we do a followup config +deploy that sets the default value to true, and finally remove the flag +entirely once we are confident the functionality it gates behaves correctly. + +### Gating migrations + +We use [database migrations](https://en.wikipedia.org/wiki/Schema_migration) +to modify the existing schema. These migrations will be run on live data +while Boulder is still running, so we need Boulder code at any given commit +to be capable of running without depending on any changes in schemas that +have not yet been applied. + +For instance, if we're adding a new column to an existing table, Boulder should +run correctly in three states: + +1. Migration not yet applied. +2. Migration applied, flag not yet flipped. +3. Migration applied, flag flipped. + +Specifically, that means that all of our `SELECT` statements should enumerate +columns to select, and not use `*`. Also, generally speaking, we will need a +separate model `struct` for serializing and deserializing data before and +after the migration. This is because the ORM package we use, +[`borp`](https://github.com/letsencrypt/borp), expects every field in a struct to +map to a column in the table. If we add a new field to a model struct and +Boulder attempts to write that struct to a table that doesn't yet have the +corresponding column (case 1), borp will fail with `Insert failed table posts +has no column named Foo`. There are examples of such models in sa/model.go, +along with code to turn a model into a `struct` used internally. + +An example of a flag-gated migration, adding a new `IsWizard` field to Person +controlled by a `AllowWizards` feature flag: + +```go +# features/features.go: + +const ( + unused FeatureFlag = iota // unused is used for testing + AllowWizards // Added! +) + +... + +var features = map[FeatureFlag]bool{ + unused: false, + AllowWizards: false, // Added! +} +``` + +```go +# sa/sa.go: + +struct Person { + HatSize int + IsWizard bool // Added! +} + +struct personModelv1 { + HatSize int +} + +// Added! +struct personModelv2 { + personModelv1 + IsWizard bool +} + +func (ssa *SQLStorageAuthority) GetPerson() (Person, error) { + if features.Enabled(features.AllowWizards) { // Added! + var model personModelv2 + ssa.dbMap.SelectOne(&model, "SELECT hatSize, isWizard FROM people") + return Person{ + HatSize: model.HatSize, + IsWizard: model.IsWizard, + } + } else { + var model personModelv1 + ssa.dbMap.SelectOne(&model, "SELECT hatSize FROM people") + return Person{ + HatSize: model.HatSize, + } + } +} + +func (ssa *SQLStorageAuthority) AddPerson(p Person) (error) { + if features.Enabled(features.AllowWizards) { // Added! + return ssa.dbMap.Insert(context.Background(), personModelv2{ + personModelv1: { + HatSize: p.HatSize, + }, + IsWizard: p.IsWizard, + }) + } else { + return ssa.dbMap.Insert(context.Background(), personModelv1{ + HatSize: p.HatSize, + // p.IsWizard ignored + }) + } +} +``` + +You will also need to update the `initTables` function from `sa/database.go` to +tell borp which table to use for your versioned model structs. Make sure to +consult the flag you defined so that only **one** of the table maps is added at +any given time, otherwise borp will error. Depending on your table you may also +need to add `SetKeys` and `SetVersionCol` entries for your versioned models. +Example: + +```go +func initTables(dbMap *borp.DbMap) { + // < unrelated lines snipped for brevity > + + if features.Enabled(features.AllowWizards) { + dbMap.AddTableWithName(personModelv2, "person") + } else { + dbMap.AddTableWithName(personModelv1, "person") + } +} +``` + +New migrations should be added at `./sa/db-next`: + +```shell +$ cd sa/db +$ sql-migrate new -env="boulder_sa_test" AddWizards +Created migration boulder_sa/20220906165519-AddWizards.sql +``` + +Finally, edit the resulting file +(`sa/db-next/boulder_sa/20220906165519-AddWizards.sql`) to define your migration: + +```mysql +-- +migrate Up +ALTER TABLE people ADD isWizard BOOLEAN SET DEFAULT false; + +-- +migrate Down +ALTER TABLE people DROP isWizard BOOLEAN SET DEFAULT false; +``` + +# Expressing "optional" Timestamps +Timestamps in protocol buffers must always be expressed as +[timestamppb.Timestamp](https://pkg.go.dev/google.golang.org/protobuf/types/known/timestamppb). +Timestamps must never contain their zero value, in the sense of +`timestamp.AsTime().IsZero()`. When a timestamp field is optional, absence must +be expressed through the absence of the field, rather than present with a zero +value. The `core.IsAnyNilOrZero` function can check these cases. + +Senders must check that timestamps are non-zero before sending them. Receivers +must check that timestamps are non-zero before accepting them. + +# Rounding time in DB + +All times that we send to the database are truncated to one second's worth of +precision. This reduces the size of indexes that include timestamps, and makes +querying them more efficient. The Storage Authority (SA) is responsible for this +truncation, and performs it for SELECT queries as well as INSERT and UPDATE. + +# Release Process + +The current Boulder release process is described in +[release.md](https://github.com/letsencrypt/boulder/blob/main/docs/release.md). New +releases are tagged weekly, and artifacts are automatically produced for each +release by GitHub Actions. + +# Dependencies + +We use [go modules](https://github.com/golang/go/wiki/Modules) and vendor our dependencies. + +To add a dependency, add the import statement to your .go file, then run +`go build` on it. This will automatically add the dependency to go.mod. Next, +run `go mod vendor && git add vendor/` to save a copy in the vendor folder. + +When vendorizing dependencies, it's important to make sure tests pass on the +version you are vendorizing. Currently we enforce this by requiring that pull +requests containing a dependency update to any version other than a tagged +release include a comment indicating that you ran the tests and that they +succeeded, preferably with the command line you run them with. Note that you +may have to get a separate checkout of the dependency (using `go get` outside +of the boulder repository) in order to run its tests, as some vendored +modules do not bring their tests with them. + +## Updating Dependencies + +To upgrade a dependency, [see the Go +docs](https://github.com/golang/go/wiki/Modules#how-to-upgrade-and-downgrade-dependencies). +Typically you want `go get ` rather than `go get -u +`, which can introduce a lot of unexpected updates. After running +`go get`, make sure to run `go mod vendor && git add vendor/` to update the +vendor directory. If you forget, CI tests will catch this. + +If you are updating a dependency to a version which is not a tagged release, +see the note above about how to run all of a dependency's tests and note that +you have done so in the PR. + +Note that updating dependencies can introduce new, transitive dependencies. In +general we try to keep our dependencies as narrow as possible in order to +minimize the number of people and organizations whose code we need to trust. +As a rule of thumb: If an update introduces new packages or modules that are +inside a repository where we already depend on other packages or modules, it's +not a big deal. If it introduces a new dependency in a different repository, +please try to figure out where that dependency came from and why (for instance: +"package X, which we depend on, started supporting XML config files, so now we +depend on an XML parser") and include that in the PR description. When there are +a large number of new dependencies introduced, and we don't need the +functionality they provide, we should consider asking the relevant upstream +repository for a refactoring to reduce the number of transitive dependencies. + +# Go Version + +The [Boulder development +environment](https://github.com/letsencrypt/boulder/blob/main/README.md#setting-up-boulder) +does not use the Go version installed on the host machine, and instead uses a +Go environment baked into a "boulder-tools" Docker image. We build a separate +boulder-tools container for each supported Go version. Please see [the +Boulder-tools +README](https://github.com/letsencrypt/boulder/blob/main/test/boulder-tools/README.md) +for more information on upgrading Go versions. + +# ACME Protocol Divergences + +While Boulder attempts to implement the ACME specification as strictly as +possible there are places at which we will diverge from the letter of the +specification for various reasons. We detail these divergences (for both the +V1 and V2 API) in the [ACME divergences +doc](https://github.com/letsencrypt/boulder/blob/main/docs/acme-divergences.md). + +# ACME Protocol Implementation Details + +The ACME specification allows developers to make certain decisions as to how +various elements in the RFC are implemented. Some of these fully conformant +decisions are listed in [ACME implementation details +doc](https://github.com/letsencrypt/boulder/blob/main/docs/acme-implementation_details.md). + +## Code of Conduct + +The code of conduct for everyone participating in this community in any capacity +is available for reference +[on the community forum](https://community.letsencrypt.org/guidelines). + +## Problems or questions? + +The best place to ask dev related questions is on the [Community +Forums](https://community.letsencrypt.org/). diff --git a/docs/CRLS.md b/docs/CRLS.md new file mode 100644 index 00000000000..1837e574269 --- /dev/null +++ b/docs/CRLS.md @@ -0,0 +1,45 @@ +# CRLs + +For each issuer certificate, Boulder generates several sharded CRLs. +The responsibility is shared across these components: + +- crl-updater +- sa +- ca +- crl-storer + +The crl-updater starts the process: for each shard of each issuer, +it requests revoked certificate information from the SA. It sends +that information to the CA for signing, and receives back a signed +CRL. It sends the signed CRL to the crl-storer for upload to an +S3-compatible data store. + +The crl-storer uploads the CRLs to the filename `/.crl`, +where `issuerID` is an integer that uniquely identifies the Subject of +the issuer certificate (based on hashing the Subject's encoded bytes). + +There's one more component that's not in this repository: an HTTP server +to serve objects from the S3-compatible data store. For Let's Encrypt, this +role is served by a CDN. Note that the CA must be carefully configured so +that the CRLBaseURL for each issuer matches the publicly accessible URL +where that issuer's CRLs will be served. + +## Shard assignment + +Certificates are assigned to shards explicitly at issuance time, with the +selected shard baked into the certificate as part of its CRLDistributionPoints +extension. The shard is selected based on taking the (random) low bytes of the +serial number modulo the number of shards produced by that certificate's issuer. + +## Storage + +When a certificate is revoked, the new status is written to both the +`certificateStatus` table and the `revokedCertificates` table. The former +contains an entry for every certificate, explicitly recording that newly-issued +certificates are not revoked. The latter is less explicit but more scalable, +containing rows only for certificates which have been revoked. + +The SA only exposes the latter of these two mechanisms via the +`GetRevokedCertsByShard` method, which returns revoked certificates whose +`shardIdx` matches the requested shard. The `certificateStatus` table will be +removed in the near future. diff --git a/docs/DESIGN.md b/docs/DESIGN.md index e183e0efbe2..032c92f0ca2 100644 --- a/docs/DESIGN.md +++ b/docs/DESIGN.md @@ -17,7 +17,7 @@ A couple of notes: * For simplicity, we do not show interactions with the Storage Authority. The SA simply acts as a common data store for the various components. It is written to by the RA (registrations and authorizations) and the CA - (certificates), and read by WFE, RA, and CA. + (certificates), and read by WFEv2, RA, and CA. * The interactions shown in the diagrams are the calls that go between components. These calls are done via [gRPC](https://grpc.io/). @@ -39,15 +39,6 @@ A couple of notes: ## New Account/Registration -ACME v1: - -``` -1: Client ---new-reg--> WFE -2: WFE ---NewRegistration--> RA -3: WFE <-------return------- RA -4: Client <------------ WFE -``` - ACME v2: ``` @@ -59,7 +50,7 @@ ACME v2: Notes: -* 1-2: WFE/WFEv2 do the following: +* 1-2: WFEv2 does the following: * Verify that the request is a POST * Verify the JWS signature on the POST body * Parse the registration/account object @@ -73,21 +64,12 @@ Notes: * Store the registration/account (which gives it an ID) * Return the registration/account as stored -* 3-4: WFE/WFEv2 do the following: +* 3-4: WFEv2 does the following: * Return the registration/account, with a unique URL ## Updated Registration -ACME v1: - -``` -1: Client ---reg--> WFE -2: WFE ---UpdateRegistration--> RA -3: WFE <--------return--------- RA -4: Client <-------- WFE -``` - ACME v2: ``` @@ -97,7 +79,7 @@ ACME v2: 4: Client <--------- WFEv2 ``` -* 1-2: WFE/WFEv2 do the following: +* 1-2: WFEv2 does the following: * Verify that the request is a POST * Verify the JWS signature on the POST body * Verify that the JWS signature is by a registered key @@ -111,25 +93,16 @@ ACME v2: * Store the updated registration/account * Return the updated registration/account -* 3-4: WFE/WFEv2 do the following: +* 3-4: WFEv2 does the following: * Return the updated registration/account ## New Authorization (ACME v1 Only) -ACME v1: - -``` -1: Client ---new-authz--> WFE -2: WFE ---NewAuthorization--> RA -3: WFE <-------return-------- RA -4: Client <-------------- WFE -``` - ACME v2: We do not implement "pre-authorization" and the newAuthz endpoint for ACME v2. Clients are expected to get authorizations by way of creating orders. -* 1-2: WFE does the following: +* 1-2: WFEv2 does the following: * Verify that the request is a POST * Verify the JWS signature on the POST body * Verify that the JWS signature is by a registered key @@ -143,15 +116,11 @@ Clients are expected to get authorizations by way of creating orders. * Construct URIs for the challenges * Store the authorization -* 3-4: WFE does the following: +* 3-4: WFEv2 does the following: * Return the authorization, with a unique URL ## New Order (ACME v2 Only) -ACME v1: -This version of the protocol does not use order objects or provide the newOrder -endpoint. - ACME v2: ``` 1: Client ---newOrder---> WFEv2 @@ -178,26 +147,25 @@ ACME v2: ## Challenge Response -ACME v1/ACME v2: +ACME v2: ``` -1: Client ---chal--> WFE/WFEv2 -2: WFE/WFEv2 ---UpdateAuthorization--> RA -3: RA ---PerformValidation--> VA -4: Client <~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~> VA -5: RA <-------return--------- VA -6: WFE/WFEv2 <--------return---------- RA -7: Client <--------- WFE/WFEv2 +1: Client ---chal--> WFEv2 +2: WFEv2 ---UpdateAuthorization--> RA +3: RA ---PerformValidation--> VA +4: Client <~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~> VA +5: RA <-------return--------- VA +6: WFEv2 <--------return---------- RA +7: Client <--------- WFEv2 ``` -* 1-2: WFE/WFEv2 do the following: +* 1-2: WFEv2 does the following: * Look up the referenced authorization object * Look up the referenced challenge within the authorization object * Verify that the request is a POST * Verify the JWS signature on the POST body * Verify that the JWS signature is by a registered key * Verify that the JWS key corresponds to the authorization - * WFEv1: Verify that the client has indicated agreement to terms * 2-3: RA does the following: * Store the updated authorization object @@ -208,7 +176,7 @@ ACME v1/ACME v2: * 4-5: RA does the following: * Return the updated authorization object -* 5-6: WFE/WFEv2 does the following: +* 5-6: WFEv2 does the following: * Return the updated authorization object * 6: VA does the following: @@ -220,19 +188,19 @@ ACME v1/ACME v2: * Mark the authorization as valid or invalid * Store the updated authorization object -* 6-7: WFE/WFEv2 do the following: +* 6-7: WFEv2 does the following: * Return the updated challenge object ## Authorization Poll -ACME v1/ACME v2: +ACME v2: ``` -1: Client ---authz--> WFE -2: Client <---------- WFE +1: Client ---authz--> WFEv2 +2: Client <---------- WFEv2 ``` -* 1-2: WFE/WFEv2 do the following: +* 1-2: WFEv2 does the following: * Look up the referenced authorization * Verify that the request is a GET * Return the authorization object @@ -255,24 +223,11 @@ ACME v2: ## New Certificate (ACME v1 Only) -ACME v1: - -``` -1: Client ---new-cert--> WFE -2: WFE ---NewCertificate--> RA -3: RA ----------IssuePreCertificate---------> CA -4: RA <---------------return----------------- CA -5: RA ---IssueCertificateForPrecertificate--> CA -6: RA <---------------return----------------- CA -7: WFE <------return------- RA -8: Client <------------- WFE -``` - ACME v2: This version of the protocol expects certificate issuance to occur only through order finalization and does not offer the new-cert endpoint. -* 1-2: WFE does the following: +* 1-2: WFEv2 does the following: * Verify that the request is a POST * Verify the JWS signature on the POST body * Verify that the JWS signature is by a registered key @@ -281,7 +236,7 @@ order finalization and does not offer the new-cert endpoint. * 3-4: RA does the following: * Verify the PKCS#10 CSR in the certificate request object - * Verify that the CSR has a non-zero number of domain names + * Verify that the CSR has a non-zero number of identifiers * Verify that the public key in the CSR is different from the account key * For each authorization referenced in the certificate request * Retrieve the authorization from the database @@ -302,7 +257,7 @@ order finalization and does not offer the new-cert endpoint. * Verify that the issued cert will not be valid longer than the CA cert * Verify that the issued cert will not be valid longer than the underlying authorizations * Open a CA DB transaction and allocate a new serial number - * Sign a poisoned precertificate with the CFSSL library + * Sign a poisoned precertificate * 5-6: RA does the following: * Collect the SCTs needed to satisfy the ctpolicy @@ -311,7 +266,7 @@ order finalization and does not offer the new-cert endpoint. * 5-6: CA does the following: * Remove the precertificate poison and sign a final certificate with SCTs provided by the RA * Create the first OCSP response for the final certificate - * Sign the final certificate and the first OCSP response with the CFSSL library + * Sign the final certificate and the first OCSP response * Store the final certificate * Commit the CA DB transaction if everything worked * Return the final certificate serial number @@ -320,15 +275,12 @@ order finalization and does not offer the new-cert endpoint. * Log the success or failure of the request * Return the certificate object -* 7-8: WFE does the following: +* 7-8: WFEv2 does the following: * Create a URL from the certificate's serial number * Return the certificate with its URL ## Order Finalization (ACME v2 Only) -ACME v1: -This version of the protocol does not use order objects. - ACME v2: ``` @@ -351,7 +303,7 @@ ACME v2: * 2-4: RA does the following: * Verify the PKCS#10 CSR in the certificate request object - * Verify that the CSR has a non-zero number of domain names + * Verify that the CSR has a non-zero number of identifiers * Verify that the public key in the CSR is different from the account key * Retrieve and verify the status and expiry of the order object * For each identifier referenced in the order request @@ -372,7 +324,7 @@ ACME v2: * Verify that the issued cert will not be valid longer than the CA cert * Verify that the issued cert will not be valid longer than the underlying authorizations * Open a CA DB transaction and allocate a new serial number - * Sign a poisoned precertificate with the CFSSL library + * Sign a poisoned precertificate * 5-6: RA does the following * Collect the SCTs needed to satisfy the ctpolicy @@ -381,7 +333,7 @@ ACME v2: * 5-6: CA does the following: * Sign a final certificate with SCTs provided by the RA * Create the first OCSP response for the final certificate - * Sign the final certificate and the first OCSP response with the CFSSL library + * Sign the final certificate and the first OCSP response * Store the final certificate * Commit the CA DB transaction if everything worked * Return the final certificate serial number @@ -397,16 +349,24 @@ ACME v2: ## Revoke Certificate -ACME v1/v2: +ACME v2: ``` -1: Client ---cert--> WFE/WFEv2 -2: WFE/WFEv2 ---RevokeCertificateWithReg--> RA -3: WFE/WFEv2 <------------return----------- RA -4: Client <--------- WFE/WFEv2 +1: Client ---cert--> WFEv2 +2: WFEv2 ---RevokeCertByApplicant--> RA +3: WFEv2 <-----------return--------- RA +4: Client <--------- WFEv2 +``` +or +``` +1: Client ---cert--> WFEv2 +2: WFEv2 ------RevokeCertByKey-----> RA +3: WFEv2 <-----------return--------- RA +4: Client <--------- WFEv2 ``` -* 1-2: WFE/WFEv2 do the following: + +* 1-2:WFEv2 does the following: * Verify that the request is a POST * Verify the JWS signature on the POST body * Verify that the JWS signature is either: @@ -424,5 +384,5 @@ ACME v1/v2: * Sign an OCSP response indicating revoked status for this certificate * Store the OCSP response in the database -* 3-4: WFE/WFEv2 do the following: +* 3-4: WFEv2 does the following: * Return an indication of the success or failure of the revocation diff --git a/docs/ISSUANCE-CYCLE.md b/docs/ISSUANCE-CYCLE.md new file mode 100644 index 00000000000..1dab719aeef --- /dev/null +++ b/docs/ISSUANCE-CYCLE.md @@ -0,0 +1,19 @@ +# The Issuance Cycle + +What happens during an ACME finalize request? + +At a high level: + +1. Check that all authorizations are good. +2. Recheck CAA for hostnames that need it. +3. Allocate and store a serial number. +4. Select a certificate profile. +5. Generate and store linting precertificate. +6. Sign, log (and don't store) precertificate. +7. Submit precertificate to CT. +8. Generate linting final certificate. Not logged or stored. +9. Sign, log, and store final certificate. + +Revocation can happen at any time after (5), whether or not step (6) was successful. We do things this way so that even in the event of a power failure or error storing data, we have a record of what we planned to sign (the tbsCertificate bytes of the linting certificate). + +Note that to avoid needing a migration, we chose to store the linting certificate from (5) in the "precertificates" table, which is now a bit of a misnomer. diff --git a/docs/acme-divergences-v1.md b/docs/acme-divergences-v1.md deleted file mode 100644 index 23550d34889..00000000000 --- a/docs/acme-divergences-v1.md +++ /dev/null @@ -1,115 +0,0 @@ -# ACMEv1 divergences from ACME draft versions - -Boulder primarily supports the final version of ACME, informally called ACMEv2 -and offered by Let's Encrypt at https://acme-v02.api.letsencrypt.org/. During a -transition period, Boulder still supports ACMEv1, which was developed alongside -the ACME standard as it developed. ACMEv1 doesn't exactly match any draft -version, but here we will define the ACMEv1 implementation in terms of its -divergences from draft 7 of ACME. - -If you're developing a new client, or updating an existing client, you should -target the ACMEv2 API. See acme-divergences.md for details of that API. - -**ACME v1 divergences from [`draft-ietf-acme-acme-07`](https://tools.ietf.org/html/draft-ietf-acme-acme-07).** - -## [Section 6](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-6) - -Boulder does not implement the [general JWS syntax](https://tools.ietf.org/html/rfc7515#page-20), but only accepts the [flattened syntax](https://tools.ietf.org/html/rfc7515#page-21). - -## [Section 6.2](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-6.2) - -Boulder enforces the presence of the `jwk` field in JWS objects, and does not support the `kid` field. - -## [Section 6.3.1](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-6.3.1) - -Boulder does not use the `url` field from the JWS protected resource. Instead Boulder will validate the `resource` field from the JWS payload matches the resource being requested. Boulder implements the resource types described in [draft-ietf-acme-02 Section 6.1](https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.1) plus the additional "KeyChange" resource. Boulder verifies the `resource` field contains the `/directory` URI for the requested resource. - -## [Section 6.5](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-6.5) - -Boulder does not provide a `Retry-After` header when a user hits a rate-limit, nor does it provide `Link` headers to further documentation on rate-limiting. - -## [Section 6.6](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-6.6) - -Boulder doesn't return errors under the `urn:ietf:params:acme:error:` namespace but instead uses the `urn:acme:error:` namespace from [draft-ietf-acme-01 Section 5.4](https://tools.ietf.org/html/draft-ietf-acme-acme-01#section-5.4). - -Boulder uses `invalidEmail` in place of the error `invalidContact` defined in [draft-ietf-acme-01 Section 5.4](https://tools.ietf.org/html/draft-ietf-acme-acme-01#section-5.4). - -Boulder does not implement the `unsupportedContact` and `accountDoesNotExist` errors. - -Boulder does not implement the `caa` and `dnssec` errors. - -## [Section 7.1](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-7.1) - -Boulder does not implement the `new-order` resource (previously referred to as `new-application`). Instead of `new-order` Boulder implements the `new-cert` resource that is defined in [draft-ietf-acme-02 Section 6.5](https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.5). - -Boulder also doesn't implement the `new-nonce` endpoint. - -Boulder implements the `new-account` resource only under the `new-reg` key. - -Boulder implements Link: rel="next" headers from new-reg to new-authz, and -new-authz to new-cert, as specified in -[draft-02](https://tools.ietf.org/html/draft-ietf-acme-acme-02#page-15), but -these links are not provided in the latest draft, and clients should use URLs -from the directory instead. - -Boulder does not provide the "index" link relation pointing at the directory URL. - -## [Section 7.1.2](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-7.1.2) - -Boulder does not implement the `terms-of-service-agreed` or `orders` fields in the registration object (nor the endpoints the latter links to). - -## [Section 7.1.3](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-7.1.3) - -Boulder does not implement orders (previously called `applications`), instead it implements the `new-cert` flow from [draft-ietf-acme-02 Section 6.5](https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.5). Instead of authorizations in the order response, Boulder currently uses authorizations that are created using the `new-authz` flow from [draft-ietf-acme-02 Section 6.4](https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4). - -## [Section 7.1.4](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-7.1.4) - -Boulder does not implement the `scope` field in authorization objects. - -## [Section 7.2](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-7.2) - -Boulder doesn't implement the `new-nonce` endpoint, instead it responds to `HEAD` requests with a valid `Replay-Nonce` header per [draft-ietf-acme-03 Section 5.4](https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-5.4). - -## [Section 7.3](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-7.3) - -Boulder only allows `mailto` URIs in the registrations `contact` list. - -Boulder uses an HTTP status code 409 (Conflict) response for an already existing registration instead of 200 (OK). Boulder returns the URI of the already existing registration in a `Location` header field instead of a `Content-Location` header field. - -Boulder does not return the `status` field. - -Boulder does not implement the `only-return-existing` field. - -## [Section 7.3.1](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-7.3.1) - -Boulder does not implement the `only-return-existing` behaviour and will always create a new account if an account for the given key does not exist. - -## [Section 7.3.6](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-7.3.6) - -Boulder implements draft-05 style key roll-over with a few divergences. Since Boulder doesn't currently use the registration URL to identify users we do not check for that field in the JWS protected headers but do check for it in the inner payload. Boulder also requires the outer JWS payload contains the `"resource": "key-change"` field. - -## [Section 7.4](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-7.4) - -Boulder does not implement orders (previously called `applications`), instead it implements the `new-cert` flow from [draft-ietf-acme-02 Section 6.5](https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.5). Instead of authorizations in the order response, Boulder currently uses authorizations that are created using the `new-authz` flow from [draft-ietf-acme-02 Section 6.4](https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.4). Certificates are not proactively issued, a user must request issuance via the `new-cert` endpoint instead of assuming a certificate will be created once all required authorizations are validated. - -## [Section 7.4.2](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-7.4.2) - -Boulder does not process `Accept` headers for `Content-Type` negotiation when retrieving certificates. Boulder returns certificates with the `Content-Type` value `application/pkix-cert` instead of `application/pem-certificate-chain`. - -## [Section 7.5](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-7.5) - -Boulder returns an `uri` instead of an `url` field in challenge objects. - -Boulder uses an HTTP status code 202 (Accepted) response for correct challenge responses instead of 200 (OK) as defined in [Section 7.1](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-7.1). - -## [Section 8.2](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-8.2) - -Boulder does not implement the ability to retry challenges or the `Retry-After` header. - -## [Section 8.6](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-8.6) - -Boulder does not implement the `oob-01` validation method. - -## [Section 9.5](https://tools.ietf.org/html/draft-ietf-acme-acme-07#section-9.6) - -Boulder uses the `urn:acme:` namespace from [draft-ietf-acme-01 Section 5.4](https://tools.ietf.org/html/draft-ietf-acme-acme-01#section-5.4) for errors instead of `urn:ietf:params:acme:`. diff --git a/docs/acme-divergences.md b/docs/acme-divergences.md index 9256019416f..60f41d4d20d 100644 --- a/docs/acme-divergences.md +++ b/docs/acme-divergences.md @@ -1,26 +1,13 @@ # Boulder divergences from ACME -While Boulder attempts to implement the ACME specification ([RFC 8555]) as strictly as possible there are places at which we will diverge from the letter of the specification for various reasons. This document describes the difference between RFC 8555 and Boulder's implementation of ACME, informally called ACMEv2 and available at https://acme-v02.api.letsencrypt.org/directory. Boulder's implementation of ACMEv1 differs substantially from the final RFC. Documentation for Boulder's ACMEv1 support is available in [acme-divergences-v1.md](acme-divergences-v1.md). A listing of RFC conformant design decisions that may differ from other ACME servers is listed in [implementation_details](https://github.com/letsencrypt/boulder/blob/main/docs/acme-implementation_details.md). +While Boulder attempts to implement the ACME specification ([RFC 8555]) as strictly as possible there are places at which we will diverge from the letter of the specification for various reasons. This document describes the difference between [RFC 8555] and Boulder's implementation of ACME, informally called ACMEv2 and available at https://acme-v02.api.letsencrypt.org/directory. A listing of RFC conformant design decisions that may differ from other ACME servers is listed in [implementation_details](https://github.com/letsencrypt/boulder/blob/main/docs/acme-implementation_details.md). - -Presently, Boulder diverges from the RFC 8555 ACME spec in the following ways: +Presently, Boulder diverges from the [RFC 8555] ACME spec in the following ways: ## [Section 6.3](https://tools.ietf.org/html/rfc8555#section-6.3) -Boulder supports POST-as-GET but does not mandate it by default for requests +Boulder supports POST-as-GET but does not mandate it for requests that simply fetch a resource (certificate, order, authorization, or challenge). -This behavior is configurable with a flag: Let's Encrypt's Staging environment -does mandate POST-as-GET, while the Production environment does not. - -## [Section 6.6](https://tools.ietf.org/html/rfc8555#section-6.6) - -Boulder does not provide a `Retry-After` header when a user hits a rate-limit, nor does it provide `Link` headers to further documentation on rate-limiting. - -## [Section 6.7](https://tools.ietf.org/html/rfc8555#section-6.7) - -Boulder uses `invalidEmail` in place of the error `invalidContact`. - -Boulder does not implement the `unsupportedContact` and `dnssec` errors. ## [Section 7.1.2](https://tools.ietf.org/html/rfc8555#section-7.1.2) @@ -31,7 +18,7 @@ support this non-essential feature in the future. Please follow Boulder Issue ## [Section 7.4](https://tools.ietf.org/html/rfc8555#section-7.4) Boulder does not accept the optional `notBefore` and `notAfter` fields of a -`newOrder` request paylod. +`newOrder` request payload. ## [Section 7.4.1](https://tools.ietf.org/html/rfc8555#section-7.4.1) diff --git a/docs/config-validation.md b/docs/config-validation.md new file mode 100644 index 00000000000..6f22e169e12 --- /dev/null +++ b/docs/config-validation.md @@ -0,0 +1,183 @@ +# Configuration Validation + +We use a fork of https://github.com/go-playground/validator which can be found +at https://github.com/letsencrypt/validator. + +## Usage + +By default Boulder validates config files for all components with a registered +validator. Validating a config file for a given component is as simple as +running the component directly: + +```shell +$ ./bin/boulder-observer -config test/config-next/observer.yml +Error validating config file "test/config-next/observer.yml": Key: 'ObsConf.MonConfs[1].Kind' Error:Field validation for 'Kind' failed on the 'oneof' tag +``` + +or by running the `boulder` binary and passing the component name as a +subcommand: + +```shell +$ ./bin/boulder boulder-observer -config test/config-next/observer.yml +Error validating config file "test/config-next/observer.yml": Key: 'ObsConf.MonConfs[1].Kind' Error:Field validation for 'Kind' failed on the 'oneof' tag +``` + +## Struct Tag Tips + +You can find the full list of struct tags supported by the validator [here] +(https://pkg.go.dev/github.com/go-playground/validator/v10#section-documentation). +The following are some tips for struct tags that are commonly used in our +configuration files. + +### `required` + +The required tag means that the field is not allowed to take its zero value, or +equivalently, is not allowed to be omitted. Note that this does not validate +that slices or maps have contents, it simply guarantees that they are not nil. +For fields of those types, you should use min=1 or similar to ensure they are +not empty. + +There are also "conditional" required tags, such as `required_with`, +`required_with_all`, `required_without`, `required_without_all`, and +`required_unless`. These behave exactly like the basic required tag, but only if +their conditional (usually the presence or absence of one or more other named +fields) is met. + +### `omitempty` + +The omitempty tag allows a field to be empty, or equivalently, to take its zero +value. If the field is omitted, none of the other validation tags on the field +will be enforced. This can be useful for tags like validate="omitempty,url", for +a field which is optional, but must be a URL if it is present. + +The omitempty tag can be "overruled" by the various conditional required tags. +For example, a field with tag `validate="omitempty,url,required_with=Foo"` is +allowed to be empty when field Foo is not present, but if field Foo is present, +then this field must be present and must be a URL. + +### `-` + +Normally, config validation descends into all struct-type fields, recursively +validating their fields all the way down. Sometimes this can pose a problem, +when a nested struct declares one of its fields as required, but a parent struct +wants to treat the whole nested struct as optional. The "-" tag tells the +validation not to recurse, marking the tagged field as optional, and therefore +making all of its sub-fields optional as well. We use this tag for many config +duration and password file struct valued fields which are optional in some +configs but required in others. + +### `structonly` + +The structonly tag allows a struct valued field to be empty, or equivalently, to +take its zero value, if it's not "overruled" by various conditional tags. If the +field is omitted the recursive validation of the structs fields will be skipped. +This can be useful for tags like `validate:"required_without=Foo,structonly"` +for a struct valued field which is only required, and thus should only be +validated, if field `Foo` is not present. + +### `min=1`, `gte=1` + +These validate that the value of integer valued field is greater than zero and +that the length of the slice or map is greater than zero. + +For instance, the following would be valid config for a slice valued field +tagged with `required`. +```json +{ + "foo": [], +} +``` + +But, only the following would be valid config for a slice valued field tagged +with `min=1`. +```json +{ + "foo": ["bar"], +} +``` + +### `len` + +Same as `eq` (equal to) but can also be used to validate the length of the +strings. + +### `hostname_port` + +The +[docs](https://pkg.go.dev/github.com/go-playground/validator/v10#hdr-HostPort) +for this tag are scant with detail, but it validates that the value is a valid +RFC 1123 hostname and port. It is used to validate many of the +`ListenAddress` and `DebugAddr` fields of our components. + +#### Future Work + +This tag is compatible with IPv4 addresses, but not IPv6 addresses. We should +consider fixing this in our fork of the validator. + +### `dive` + +This tag is used to validate the values of a slice or map. For instance, the +following would be valid config for a slice valued field (`[]string`) tagged +with `min=1,dive,oneof=bar baz`. + +```json +{ + "foo": ["bar", "baz"], +} +``` + +Note that the `dive` tag introduces an order-dependence in writing tags: tags +that come before `dive` apply to the current field, while tags that come after +`dive` apply to the current field's child values. In the example above: `min=1` +applies to the length of the slice (`[]string`), while `oneof=bar baz` applies +to the value of each string in the slice. + +We can also use `dive` to validate the values of a map. For instance, the +following would be valid config for a map valued field (`map[string]string`) +tagged with `min=1,dive,oneof=one two`. + +```json +{ + "foo": { + "bar": "one", + "baz": "two" + }, +} +``` + +`dive` can also be invoked multiple times to validate the values of nested +slices or maps. For instance, the following would be valid config for a slice of +slice valued field (`[][]string`) tagged with `min=1,dive,min=2,dive,oneof=bar +baz`. + +```json +{ + "foo": [ + ["bar", "baz"], + ["baz", "bar"], + ], +} +``` + +- `min=1` will be applied to the outer slice (`[]`). +- `min=2` will be applied to inner slice (`[]string`). +- `oneof=bar baz` will be applied to each string in the inner slice. + +### `keys` and `endkeys` + +These tags are used to validate the keys of a map. For instance, the following +would be valid config for a map valued field (`map[string]string`) tagged with +`min=1,dive,keys,eq=1|eq=2,endkeys,required`. + +```json +{ + "foo": { + "1": "bar", + "2": "baz", + }, +} +``` + +- `min=1` will be applied to the map itself +- `eq=1|eq=2` will be applied to the map keys +- `required` will be applied to map values diff --git a/docs/load-testing.md b/docs/load-testing.md deleted file mode 100644 index ab6f5c8c33e..00000000000 --- a/docs/load-testing.md +++ /dev/null @@ -1,58 +0,0 @@ -# Load testing the OCSP signing components. - -Here are instructions on how to realistically load test the OCSP signing -components of Boulder, exercising the pkcs11key, boulder-ca, and -ocsp-updater components. - -Set up a SoftHSM instance running pkcs11-daemon on some remote host with more -CPUs than your local machine. Easiest way to do this is to clone the Boulder -repo, and on the remote machine run: - - remote-machine$ docker-compose run -p 5657:5657 bhsm - -Check that the port is open: - - local-machine$ nc -zv remote-machine 5657 - Connection to remote-machine 5657 port [tcp/*] succeeded! - -Edit docker-compose.yml to change these in the "boulder" section's "env": - - PKCS11_PROXY_SOCKET: tcp://remote-machine:5657 - FAKE_DNS: 172.17.0.1 - -Run the pkcs11key benchmark to check raw signing speed at various settings for SESSIONS: - - local-machine$ docker-compose run -e SESSIONS=4 -e MODULE=/usr/local/lib/softhsm/libsofthsm2.so --entrypoint /go/src/github.com/letsencrypt/pkcs11key/test.sh boulder - -Initialize the tokens for use by Boulder: - - local-machine$ docker-compose run --entrypoint "softhsm --module /usr/local/lib/softhsm/libsofthsm2.so --init-token --pin 5678 --so-pin 1234 --slot 0 --label intermediate" boulder - local-machine$ docker-compose run --entrypoint "softhsm --module /usr/local/lib/softhsm/libsofthsm2.so --init-token --pin 5678 --so-pin 1234 --slot 1 --label root" boulder - -Configure Boulder to always consider all OCSP responses instantly stale, so it -will sign new ones as fast as it can. Edit "ocspMinTimeToExpiry" in -test/config/ocsp-updater.json (or test/config-next/ocsp-updater.json): - - "ocspMinTimeToExpiry": "0h", - -Run a local Boulder instance: - - local-machine$ docker-compose up - -Issue a bunch of certificates with chisel.py, ideally a few thousand -(corresponding to the default batch size of 5000 in ocsp-updater.json, to make -sure each batch is maxed out): - - local-machine$ while true; do python test/chisel.py $(openssl rand -hex 4).com ; done - -Use the local Prometheus instance to graph the number of complete gRPC calls: - -http://localhost:9090/graph?g0.range_input=5m&g0.expr=irate(grpc_client_handled_total%7Bgrpc_method%3D%22GenerateOCSP%22%7D%5B1m%5D)&g0.tab=0 - -If you vary the NumSessions config value in test/config/ca.json, you should see -the signing speed vary linearly, up to the number of cores in the remote -machine. Note that hyperthreaded cores look like 2 cores but may only perform -as 1 (needs testing). - -Keep in mind that round-trip time between your local machine and your HSM -machine greatly impact signing speed. diff --git a/docs/logging.md b/docs/logging.md index d62b0253de1..9fc6405d0de 100644 --- a/docs/logging.md +++ b/docs/logging.md @@ -1,9 +1,9 @@ # Logging -Boulder can log to stdout, syslog, or both. Boulder components generally have a -`syslog` portion of their JSON config that indicates the maximum level of -log that should be sent to a given destination. For instance, in -`test/config/wfe2.json`: +Boulder can log to stdout/stderr, syslog, or both. Boulder components +generally have a `syslog` portion of their JSON config that indicates the +maximum level of log that should be sent to a given destination. For instance, +in `test/config/wfe2.json`: ``` "syslog": { @@ -13,16 +13,22 @@ log that should be sent to a given destination. For instance, in ``` This indicates that logs of level 4 or below (error and warning) should be -emitted to stdout, and logs of level 6 or below (error, warning, notice, and +emitted to stdout/stderr, and logs of level 6 or below (error, warning, notice, and info) should be emitted to syslog, using the local Unix socket method. The -highest meaningful value is 7, which enabled debug logging. The default value -for these fields is 6 (INFO) for syslogLevel and 0 (no logs) for stdoutLevel. +highest meaningful value is 7, which enables debug logging. + +The stdout/stderr logger uses ANSI escape codes to color warnings as yellow +and errors as red, if stdout is detected to be a terminal. + +The default value for these fields is 6 (INFO) for syslogLevel and 0 (no logs) +for stdoutLevel. To turn off syslog logging entirely, set syslogLevel to -1. In Boulder's development environment, we enable stdout logging because that makes it easier to see what's going on quickly. In production, we disable stdout -logging because it would duplicate the syslog logging. We prefer the syslog +logging because it would duplicate the syslog logging. We preferred the syslog logging because it provides things like severity level in a consistent way with -other components. +other components. But we may move to stdout/stderr logging to make it easier to +containerize Boulder. Boulder has a number of adapters to take other packages' log APIs and send them to syslog as expected. For instance, we provide a custom logger for mysql, grpc, diff --git a/docs/multi-va.md b/docs/multi-va.md index f31c52aab78..d1d0b044f7d 100644 --- a/docs/multi-va.md +++ b/docs/multi-va.md @@ -15,43 +15,34 @@ primary VA will ask both remote VAs to perform matching validations for each primary validation). Of course this is a development environment so both the primary and remote VAs are all running on one host. -The primary and remote VAs are both the same piece of software, the `boulder-va` -service ([cmd here](https://github.com/letsencrypt/boulder/tree/main/cmd/boulder-va), -[package here](https://github.com/letsencrypt/boulder/tree/main/va)). +The `boulder-va` service ([here](https://github.com/letsencrypt/boulder/tree/main/cmd/boulder-va) and `remoteva` service ([here](https://github.com/letsencrypt/boulder/tree/main/cmd/remoteva)) are distinct pieces of software that utilize the same package ([here](https://github.com/letsencrypt/boulder/tree/main/va)). The boulder-ra uses [the same RPC interface](https://github.com/letsencrypt/boulder/blob/ea231adc36746cce97f860e818c2cdf92f060543/va/proto/va.proto#L8-L10) to ask for a primary validation as the primary VA uses to ask a remote VA for a confirmation validation. -Primary VA instances know they are a primary based on the presence of the -`"remoteVAs"` configuration element. If present it specifies gRPC service -addresses for other VA instances to use as remotes. There's also a handful of -feature flags that control how the primary VAs handle the remote VAs. +Primary VA instances contain a `"remoteVAs"` configuration element. If present +it specifies gRPC service addresses for `remoteva` instances to use as remote +VAs. There's also a handful of feature flags that control how the primary VAs +handle the remote VAs. -In the development environment with `config-next` the two primary VAs are `va1.boulder:9092` and -`va2.boulder:9092` and use +In the development environment with `config-next` the two primary VAs are `va1.service.consul:9092` and +`va2.service.consul:9092` and use [`test/config-next/va.json`](https://github.com/letsencrypt/boulder/blob/ea231adc36746cce97f860e818c2cdf92f060543/test/config-next/va.json) as their configuration. This config file specifies two `"remoteVA"s`, -`va1.boulder:9097` and `va2.boulder:9098` and enforces +`rva1.service.consul:9097` and `va2.service.consul:9098` and enforces [that a maximum of 1 of the 2 remote VAs disagree](https://github.com/letsencrypt/boulder/blob/ea231adc36746cce97f860e818c2cdf92f060543/test/config-next/va.json#L44) with the primary VA for all validations. The remote VA instances use -[`test/config-next/va-remote-a.json`](https://github.com/letsencrypt/boulder/blob/ea231adc36746cce97f860e818c2cdf92f060543/test/config-next/va-remote-a.json) +[`test/config-next/remoteva-a.json`](https://github.com/letsencrypt/boulder/blob/5c27eadb1db0605f380e41c8bd444a7f4ffe3c08/test/config-next/remoteva-a.json) and -[`test/config-next/va-remote-b.json`](https://github.com/letsencrypt/boulder/blob/ea231adc36746cce97f860e818c2cdf92f060543/test/config-next/va-remote-b.json) +[`test/config-next/remoteva-b.json`](https://github.com/letsencrypt/boulder/blob/5c27eadb1db0605f380e41c8bd444a7f4ffe3c08/test/config-next/remoteva-b.json) as their config files. -There are two feature flags that control whether multi-VA takes effect: -MultiVAFullResults and EnforceMultiVA. If MultiVAFullResults is enabled -then each primary validation will also send out remote validation requests, and -wait for all the results to come in, so we can log the results for analysis. If -EnforceMultiVA is enabled, we require that almost all remote validation requests -succeed. The primary VA's "maxRemoteValidationFailures" config field specifies -how many remote VAs can fail before the primary VA considers overall validation -a failure. It should be strictly less than the number of remote VAs. - -Validation is also controlled by the "multiVAPolicyFile" config field on the -primary VA. This specifies a file that can contain temporary overrides for -domains or accounts that fail under multi-va. Over time those temporary -overrides will be removed. +We require that almost all remote validation requests succeed; the exact number +is controlled by the VA based on the thresholds required by MPIC. If the number of +failing remote VAs exceeds that threshold, validation is terminated. If the +number of successful remote VAs is high enough that it would be impossible for +the outstanding remote VAs to exceed that threshold, validation immediately +succeeds. There are some integration tests that test this end to end. The most relevant is probably diff --git a/docs/redis.md b/docs/redis.md index bf8eef87ed3..dae6b23e717 100644 --- a/docs/redis.md +++ b/docs/redis.md @@ -1,32 +1,31 @@ # Redis -We use Redis Cluster for OCSP. The Boulder dev environment stands up a cluster -of 6 nodes, with 3 primaries and 3 replicas. Check docker-compose.yml for -details of those. - -The initial setup is done by test/redis-create.sh, which assigns all the -individual Redis nodes to their roles as primaries or replicas. +We use Redis for storing rate limit data. The Boulder dev environment stands up +two nodes. We use the Ring client in the github.com/redis/go-redis package to +consistently hash our reads and writes across these two nodes. ## Debugging -Our main tool for interacting with our OCSP storage in Redis is cmd/rocsp-tool. -However, sometimes if things aren't working right you might want to drop down a -level. - -The first tool you might turn to is `redis-cli`. You probably don't +Our main tool for interacting with Redis is `redis-cli`. You probably don't have redis-cli on your host, so we'll run it in a Docker container. We also need to pass some specific arguments for TLS and authentication. There's a script that handles all that for you: `test/redis-cli.sh`. First, make sure your -redis cluster is running: +redis is running: -``` -docker-compose up bredis_clusterer +```shell +docker compose up boulder ``` -Then, in a different window, run: +Then, in a different window, run the following to connect to `bredis_1`: +```shell +./test/redis-cli.sh -h 10.77.77.4 ``` -./test/redis-cli.sh -h 10.33.33.2 + +Similarly, to connect to `bredis_2`: + +```shell +./test/redis-cli.sh -h 10.77.77.5 ``` You can pass any IP address for the -h (host) parameter. The full list of IP @@ -36,12 +35,12 @@ redis-cli commandline parameters. They'll get passed through. You may want to go a level deeper and communicate with a Redis node using the Redis protocol. Here's the command to do that (run from the Boulder root): -``` -openssl s_client -connect 10.33.33.2:4218 \ - -CAfile test/redis-tls/minica.pem \ - -cert test/redis-tls/boulder/cert.pem \ - -key test/redis-tls/boulder/key.pem +```shell +openssl s_client -connect 10.77.77.4:4218 \ + -CAfile test/certs/ipki/minica.pem \ + -cert test/certs/ipki/localhost/cert.pem \ + -key test/certs/ipki/localhost/key.pem ``` Then, first thing when you connect, run `AUTH `. You can get a -list of usernames and passwords from test/redis.config. +list of usernames and passwords from test/redis-ratelimits.config. diff --git a/docs/release.md b/docs/release.md new file mode 100644 index 00000000000..fae2243cfdd --- /dev/null +++ b/docs/release.md @@ -0,0 +1,131 @@ +# Boulder Release Process + +A description and demonstration of the full process for tagging a normal weekly +release and a hotfix release. + +Once a release is tagged, it will be generally deployed to +[staging](https://letsencrypt.org/docs/staging-environment/) and then to +[production](https://acme-v02.api.letsencrypt.org/) over the next few days. + +## Goals + +1. All development, including reverts and hotfixes needed to patch a broken + release, happens on the `main` branch of this repository. Code is never + deployed without being reviewed and merged here first, and code is never + landed on a release branch that isn't landed on `main` first. + +2. Doing a normal release requires approximately zero thought. It Just Works. + +3. Doing a hotfix release differs as little as possible from the normal release + process. + +## Release Schedule + +Boulder developers make a new release at the beginning of each week, typically +around 10am PST **Monday**. Operations deploys the new release to the [staging +environment](https://letsencrypt.org/docs/staging-environment/) on **Tuesday**, +typically by 2pm PST. If there have been no issues discovered with the release +from its time in staging, then on **Thursday** the operations team deploys the +release to the production environment. + +Holidays, unexpected bugs, and other resource constraints may affect the above +schedule and result in staging or production updates being skipped. It should be +considered a guideline for normal releases but not a strict contract. + +## Release Structure + +As of 2025-06-30, releases are tagged with a tag of the form `v0.YYYYMMDD.N`, where +the `YYYYMMDD` is the date that the initial release is cut (usually the Monday +of the current week), and `N` is an integer indicating the hotfix number, +starting at `0`. For example, a regular release might be `v0.20250707.0`, and +the first hotfix for that release would be `v0.20250707.1`. + +Historically, releases were tagged with the form `release-YYYY-MM-DD[x]`, where +`[x]` was an optional lowercase letter suffix for hotfixes. For example, the +second hotfix release (i.e. third release overall) in the third week of +January 2022 was [`release-2022-01-18b`](https://github.com/letsencrypt/boulder/releases/tag/release-2022-01-18b). + +All release tags are signed with a key associated with a Boulder developer. Tag +signatures are automatically verified by GitHub using the public keys that +developer has uploaded, and are additionally checked before being built and +deployed to our staging and production environments. Note that, due to how Git +works, in order for a tag to be signed it must also have a message; we set the +tag message to just be a slightly more readable version of the tag name. + +## Making a Release + +### Prerequisites + +* You must have a GPG key with signing capability: + * [Checking for existing GPG keys](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/checking-for-existing-gpg-keys) + +* If you don't have a GPG key with signing capability, create one: + * [Generating a new local GPG key](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/generating-a-new-gpg-key) + * [Generating a new Yubikey GPG key](https://support.yubico.com/hc/en-us/articles/360013790259-Using-Your-YubiKey-with-OpenPGP) + +* The signing GPG key must be added to your GitHub account: + * [Adding a new GPG key to your GitHub + account](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/adding-a-new-gpg-key-to-your-github-account) + +* `git` *may* need to be configured to call the correct GPG binary: + * The default: `git config --global gpg.program gpg` is correct for most Linux platforms + * On macOS and some Linux platforms: `git config --global gpg.program gpg2` is correct + +* `git` must be configured to use the correct GPG key: + * [Telling Git about your GPG key](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/telling-git-about-your-signing-key) + +* Understand the [process for signing tags](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/signing-tags) + +### Regular Releases + +Simply create a signed tag. The `tools/release/tag` tool will automatically +determine the correct tag name based on the current date. + +```sh +go run github.com/letsencrypt/boulder/tools/release/tag@main +``` + +This will print the newly-created tag and instructions on how to push it after +you are satisfied that it is correct. Alternately you can run the command with +the `-push` flag to push the resulting tag automatically. + +### Hotfix Releases + +Sometimes it is necessary to create a new release which looks like a prior +release but with one or more additional commits added. This is usually the case +when we discover a critical bug in the currently-deployed version that needs to +be fixed, but we don't want to include other changes that have already been +merged to `main` since the currently-deployed release was tagged. + +In this situation, we create a new hotfix release branch starting at the point +of the previous release tag. We then use the normal GitHub PR and code-review +process to copy the necessary fix(es) from `main` (where they must already be +merged) to the release branch. Finally we create a new release tag at the tip +of the release branch instead of the tip of main. + +To create the new release branch, substitute the name of the release tag which you want to use as the starting point into this command: + +```sh +go run github.com/letsencrypt/boulder/tools/release/branch@main v0.YYYYMMDD.0 +``` + +This will create a release branch named `release-branch-v0.YYYYMMDD`. When all necessary PRs have been merged into that branch, create the new tag by substituting the branch name into this command: + +```sh +go run github.com/letsencrypt/boulder/tools/release/tag@main release-branch-v0.YYYYMMDD +``` + +## Deploying Releases + +When doing a release, SRE's tooling will check that: + +1. GitHub shows that tests have passed for the commit at the planned release + tag. + +2. The planned release tag is an ancestor of the current `main` on GitHub, or + the planned release tag is equal to the head of a branch named + `release-branch-XXX`, and all commits between `main` and the head of that + branch are cherry-picks of commits which landed on `main` following the + normal review process. + +These checks ensure that all deployed code has been properly reviewed and tested before reaching production environments. diff --git a/errors/errors.go b/errors/errors.go index 3ca9988a6be..14d5b7af873 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -1,7 +1,31 @@ +// Package errors provide a special error type for use in Boulder. This error +// type carries additional type information with it, and has two special powers: +// +// 1. It is recognized by our gRPC code, and the type metadata and detail string +// will cross gRPC boundaries intact. +// +// 2. It is recognized by our frontend API "rendering" code, and will be +// automatically converted to the corresponding urn:ietf:params:acme:error:... +// ACME Problem Document. +// +// This means that a deeply-nested service (such as the SA) that wants to ensure +// that the ACME client sees a particular problem document (such as NotFound) +// can return a BoulderError and be sure that it will be propagated all the way +// to the client. +// +// Note, however, that any additional context wrapped *around* the BoulderError +// (such as by fmt.Errorf("oops: %w")) will be lost when the error is converted +// into a problem document. Similarly, any type information wrapped *by* a +// BoulderError (such as a sql.ErrNoRows) is lost at the gRPC serialization +// boundary. package errors import ( "fmt" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/letsencrypt/boulder/identifier" ) @@ -12,9 +36,12 @@ import ( // BoulderError wrapping one of these types. type ErrorType int +// These numeric constants are used when sending berrors through gRPC. const ( + // InternalServer is deprecated. Instead, pass a plain Go error. That will get + // turned into a probs.InternalServerError by the WFE. InternalServer ErrorType = iota - _ + _ // Reserved, previously NotSupported Malformed Unauthorized NotFound @@ -32,6 +59,18 @@ const ( BadCSR AlreadyRevoked BadRevocationReason + UnsupportedContact + // The requested serial number does not exist in the `serials` table. + UnknownSerial + Conflict + // Defined in https://datatracker.ietf.org/doc/draft-aaron-acme-profiles/00/ + InvalidProfile + // The certificate being indicated for replacement already has a replacement + // order. + AlreadyReplaced + BadSignatureAlgorithm + AccountDoesNotExist + BadNonce ) func (ErrorType) Error() string { @@ -43,6 +82,10 @@ type BoulderError struct { Type ErrorType Detail string SubErrors []SubBoulderError + + // RetryAfter the duration a client should wait before retrying the request + // which resulted in this error. + RetryAfter time.Duration } // SubBoulderError represents sub-errors specific to an identifier that are @@ -52,99 +95,249 @@ type SubBoulderError struct { Identifier identifier.ACMEIdentifier } +// Error implements the error interface, returning a string representation of +// this error. func (be *BoulderError) Error() string { return be.Detail } +// Unwrap implements the optional error-unwrapping interface. It returns the +// underlying type, all of when themselves implement the error interface, so +// that `if errors.Is(someError, berrors.Malformed)` works. func (be *BoulderError) Unwrap() error { return be.Type } +// GRPCStatus implements the interface implicitly defined by gRPC's +// status.FromError, which uses this function to detect if the error produced +// by the gRPC server implementation code is a gRPC status.Status. Implementing +// this means that BoulderErrors serialized in gRPC response metadata can be +// accompanied by a gRPC status other than "UNKNOWN". +func (be *BoulderError) GRPCStatus() *status.Status { + var c codes.Code + switch be.Type { + case InternalServer: + c = codes.Internal + case Malformed: + c = codes.InvalidArgument + case Unauthorized: + c = codes.PermissionDenied + case NotFound: + c = codes.NotFound + case RateLimit: + c = codes.Unknown + case RejectedIdentifier: + c = codes.InvalidArgument + case InvalidEmail: + c = codes.InvalidArgument + case ConnectionFailure: + c = codes.Unavailable + case CAA: + c = codes.FailedPrecondition + case MissingSCTs: + c = codes.Internal + case Duplicate: + c = codes.AlreadyExists + case OrderNotReady: + c = codes.FailedPrecondition + case DNS: + c = codes.Unknown + case BadPublicKey: + c = codes.InvalidArgument + case BadCSR: + c = codes.InvalidArgument + case AlreadyRevoked: + c = codes.AlreadyExists + case BadRevocationReason: + c = codes.InvalidArgument + case UnsupportedContact: + c = codes.InvalidArgument + default: + c = codes.Unknown + } + return status.New(c, be.Error()) +} + // WithSubErrors returns a new BoulderError instance created by adding the // provided subErrs to the existing BoulderError. func (be *BoulderError) WithSubErrors(subErrs []SubBoulderError) *BoulderError { return &BoulderError{ - Type: be.Type, - Detail: be.Detail, - SubErrors: append(be.SubErrors, subErrs...), + Type: be.Type, + Detail: be.Detail, + SubErrors: append(be.SubErrors, subErrs...), + RetryAfter: be.RetryAfter, } } -// New is a convenience function for creating a new BoulderError -func New(errType ErrorType, msg string, args ...interface{}) error { +// New is a convenience function for creating a new BoulderError. +func New(errType ErrorType, msg string) error { + return &BoulderError{ + Type: errType, + Detail: msg, + } +} + +// newf is a convenience function for creating a new BoulderError with a +// formatted message. +func newf(errType ErrorType, msg string, args ...any) error { return &BoulderError{ Type: errType, Detail: fmt.Sprintf(msg, args...), } } -func InternalServerError(msg string, args ...interface{}) error { - return New(InternalServer, msg, args...) +func InternalServerError(msg string, args ...any) error { + return newf(InternalServer, msg, args...) +} + +func MalformedError(msg string, args ...any) error { + return newf(Malformed, msg, args...) +} + +func UnauthorizedError(msg string, args ...any) error { + return newf(Unauthorized, msg, args...) +} + +func NotFoundError(msg string, args ...any) error { + return newf(NotFound, msg, args...) +} + +func RateLimitError(retryAfter time.Duration, msg string, args ...any) error { + return &BoulderError{ + Type: RateLimit, + Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/", args...), + RetryAfter: retryAfter, + } +} + +func RegistrationsPerIPAddressError(retryAfter time.Duration, msg string, args ...any) error { + return &BoulderError{ + Type: RateLimit, + Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#new-registrations-per-ip-address", args...), + RetryAfter: retryAfter, + } +} + +func RegistrationsPerIPv6RangeError(retryAfter time.Duration, msg string, args ...any) error { + return &BoulderError{ + Type: RateLimit, + Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#new-registrations-per-ipv6-range", args...), + RetryAfter: retryAfter, + } } -func MalformedError(msg string, args ...interface{}) error { - return New(Malformed, msg, args...) +func NewOrdersPerAccountError(retryAfter time.Duration, msg string, args ...any) error { + return &BoulderError{ + Type: RateLimit, + Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#new-orders-per-account", args...), + RetryAfter: retryAfter, + } } -func UnauthorizedError(msg string, args ...interface{}) error { - return New(Unauthorized, msg, args...) +func CertificatesPerDomainError(retryAfter time.Duration, msg string, args ...any) error { + return &BoulderError{ + Type: RateLimit, + Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#new-certificates-per-registered-domain", args...), + RetryAfter: retryAfter, + } } -func NotFoundError(msg string, args ...interface{}) error { - return New(NotFound, msg, args...) +func CertificatesPerFQDNSetError(retryAfter time.Duration, msg string, args ...any) error { + return &BoulderError{ + Type: RateLimit, + Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#new-certificates-per-exact-set-of-identifiers", args...), + RetryAfter: retryAfter, + } +} + +func FailedAuthorizationsPerDomainPerAccountError(retryAfter time.Duration, msg string, args ...any) error { + return &BoulderError{ + Type: RateLimit, + Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#authorization-failures-per-identifier-per-account", args...), + RetryAfter: retryAfter, + } } -func RateLimitError(msg string, args ...interface{}) error { +func LimitOverrideRequestsPerIPAddressError(retryAfter time.Duration, msg string, args ...any) error { return &BoulderError{ - Type: RateLimit, - Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/", args...), + Type: RateLimit, + Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/#new-registrations-per-ip-address", args...), + RetryAfter: retryAfter, } } -func RejectedIdentifierError(msg string, args ...interface{}) error { - return New(RejectedIdentifier, msg, args...) +func RejectedIdentifierError(msg string, args ...any) error { + return newf(RejectedIdentifier, msg, args...) +} + +func InvalidEmailError(msg string, args ...any) error { + return newf(InvalidEmail, msg, args...) +} + +func UnsupportedContactError(msg string, args ...any) error { + return newf(UnsupportedContact, msg, args...) } -func InvalidEmailError(msg string, args ...interface{}) error { - return New(InvalidEmail, msg, args...) +func ConnectionFailureError(msg string, args ...any) error { + return newf(ConnectionFailure, msg, args...) } -func ConnectionFailureError(msg string, args ...interface{}) error { - return New(ConnectionFailure, msg, args...) +func CAAError(msg string, args ...any) error { + return newf(CAA, msg, args...) } -func CAAError(msg string, args ...interface{}) error { - return New(CAA, msg, args...) +func MissingSCTsError(msg string, args ...any) error { + return newf(MissingSCTs, msg, args...) } -func MissingSCTsError(msg string, args ...interface{}) error { - return New(MissingSCTs, msg, args...) +func DuplicateError(msg string, args ...any) error { + return newf(Duplicate, msg, args...) } -func DuplicateError(msg string, args ...interface{}) error { - return New(Duplicate, msg, args...) +func OrderNotReadyError(msg string, args ...any) error { + return newf(OrderNotReady, msg, args...) } -func OrderNotReadyError(msg string, args ...interface{}) error { - return New(OrderNotReady, msg, args...) +func DNSError(msg string, args ...any) error { + return newf(DNS, msg, args...) } -func DNSError(msg string, args ...interface{}) error { - return New(DNS, msg, args...) +func BadPublicKeyError(msg string, args ...any) error { + return newf(BadPublicKey, msg, args...) } -func BadPublicKeyError(msg string, args ...interface{}) error { - return New(BadPublicKey, msg, args...) +func BadCSRError(msg string, args ...any) error { + return newf(BadCSR, msg, args...) } -func BadCSRError(msg string, args ...interface{}) error { - return New(BadCSR, msg, args...) +func AlreadyReplacedError(msg string, args ...any) error { + return newf(AlreadyReplaced, msg, args...) } -func AlreadyRevokedError(msg string, args ...interface{}) error { - return New(AlreadyRevoked, msg, args...) +func AlreadyRevokedError(msg string, args ...any) error { + return newf(AlreadyRevoked, msg, args...) } func BadRevocationReasonError(reason int64) error { - return New(BadRevocationReason, "disallowed revocation reason: %d", reason) + return newf(BadRevocationReason, "disallowed revocation reason: %d", reason) +} + +func UnknownSerialError() error { + return newf(UnknownSerial, "unknown serial") +} + +func InvalidProfileError(msg string, args ...any) error { + return newf(InvalidProfile, msg, args...) +} + +func BadSignatureAlgorithmError(msg string, args ...any) error { + return newf(BadSignatureAlgorithm, msg, args...) +} + +func AccountDoesNotExistError(msg string, args ...any) error { + return newf(AccountDoesNotExist, msg, args...) +} + +func BadNonceError(msg string, args ...any) error { + return newf(BadNonce, msg, args...) } diff --git a/errors/errors_test.go b/errors/errors_test.go index 675b2359749..f69abbf4674 100644 --- a/errors/errors_test.go +++ b/errors/errors_test.go @@ -17,14 +17,14 @@ func TestWithSubErrors(t *testing.T) { subErrs := []SubBoulderError{ { - Identifier: identifier.DNSIdentifier("example.com"), + Identifier: identifier.NewDNS("example.com"), BoulderError: &BoulderError{ Type: RateLimit, Detail: "everyone uses this example domain", }, }, { - Identifier: identifier.DNSIdentifier("what about example.com"), + Identifier: identifier.NewDNS("what about example.com"), BoulderError: &BoulderError{ Type: RateLimit, Detail: "try a real identifier value next time", @@ -39,7 +39,7 @@ func TestWithSubErrors(t *testing.T) { test.AssertDeepEquals(t, outResult.SubErrors, subErrs) // Adding another suberr shouldn't squash the original sub errors anotherSubErr := SubBoulderError{ - Identifier: identifier.DNSIdentifier("another ident"), + Identifier: identifier.NewDNS("another ident"), BoulderError: &BoulderError{ Type: RateLimit, Detail: "another rate limit err", diff --git a/features/featureflag_string.go b/features/featureflag_string.go deleted file mode 100644 index b3b68b70590..00000000000 --- a/features/featureflag_string.go +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by "stringer -type=FeatureFlag"; DO NOT EDIT. - -package features - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[unused-0] - _ = x[PrecertificateRevocation-1] - _ = x[StripDefaultSchemePort-2] - _ = x[NonCFSSLSigner-3] - _ = x[StoreIssuerInfo-4] - _ = x[StreamlineOrderAndAuthzs-5] - _ = x[V1DisableNewValidations-6] - _ = x[CAAValidationMethods-7] - _ = x[CAAAccountURI-8] - _ = x[EnforceMultiVA-9] - _ = x[MultiVAFullResults-10] - _ = x[MandatoryPOSTAsGET-11] - _ = x[AllowV1Registration-12] - _ = x[StoreRevokerInfo-13] - _ = x[RestrictRSAKeySizes-14] - _ = x[FasterNewOrdersRateLimit-15] - _ = x[ECDSAForAll-16] - _ = x[ServeRenewalInfo-17] - _ = x[GetAuthzReadOnly-18] - _ = x[GetAuthzUseIndex-19] - _ = x[CheckFailedAuthorizationsFirst-20] - _ = x[AllowReRevocation-21] - _ = x[MozRevocationReasons-22] -} - -const _FeatureFlag_name = "unusedPrecertificateRevocationStripDefaultSchemePortNonCFSSLSignerStoreIssuerInfoStreamlineOrderAndAuthzsV1DisableNewValidationsCAAValidationMethodsCAAAccountURIEnforceMultiVAMultiVAFullResultsMandatoryPOSTAsGETAllowV1RegistrationStoreRevokerInfoRestrictRSAKeySizesFasterNewOrdersRateLimitECDSAForAllServeRenewalInfoGetAuthzReadOnlyGetAuthzUseIndexCheckFailedAuthorizationsFirstAllowReRevocationMozRevocationReasons" - -var _FeatureFlag_index = [...]uint16{0, 6, 30, 52, 66, 81, 105, 128, 148, 161, 175, 193, 211, 230, 246, 265, 289, 300, 316, 332, 348, 378, 395, 415} - -func (i FeatureFlag) String() string { - if i < 0 || i >= FeatureFlag(len(_FeatureFlag_index)-1) { - return "FeatureFlag(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _FeatureFlag_name[_FeatureFlag_index[i]:_FeatureFlag_index[i+1]] -} diff --git a/features/features.go b/features/features.go index 4608d1d63ff..fb3fe9920c7 100644 --- a/features/features.go +++ b/features/features.go @@ -1,158 +1,119 @@ -//go:generate stringer -type=FeatureFlag - +// features provides the Config struct, which is used to define feature flags +// that can affect behavior across Boulder components. It also maintains a +// global singleton Config which can be referenced by arbitrary Boulder code +// without having to pass a collection of feature flags through the function +// call graph. package features import ( - "fmt" "sync" ) -type FeatureFlag int +// Config contains one boolean field for every Boulder feature flag. It can be +// included directly in an executable's Config struct to have feature flags be +// automatically parsed by the json config loader; executables that do so must +// then call features.Set(parsedConfig) to load the parsed struct into this +// package's global Config. +type Config struct { + // Deprecated flags. + IncrementRateLimits bool + UseKvLimitsForNewOrder bool + DisableLegacyLimitWrites bool + MultipleCertificateProfiles bool + InsertAuthzsIndividually bool + EnforceMultiCAA bool + EnforceMPIC bool + MPICFullResults bool + UnsplitIssuance bool + ExpirationMailerUsesJoin bool + DOH bool + IgnoreAccountContacts bool + NoPendingAuthzReuse bool + ServeRenewalInfo bool + StoreAuthzsInOrders bool + StoreARIReplacesInOrders bool -const ( - unused FeatureFlag = iota // unused is used for testing - // Deprecated features, these can be removed once stripped from production configs - PrecertificateRevocation - StripDefaultSchemePort - NonCFSSLSigner - StoreIssuerInfo - StreamlineOrderAndAuthzs - V1DisableNewValidations + // CertCheckerChecksValidations enables an extra query for each certificate + // checked, to find the relevant authzs. Since this query might be + // expensive, we gate it behind a feature flag. + CertCheckerChecksValidations bool - // Currently in-use features - // Check CAA and respect validationmethods parameter. - CAAValidationMethods - // Check CAA and respect accounturi parameter. - CAAAccountURI - // EnforceMultiVA causes the VA to block on remote VA PerformValidation - // requests in order to make a valid/invalid decision with the results. - EnforceMultiVA - // MultiVAFullResults will cause the main VA to wait for all of the remote VA - // results, not just the threshold required to make a decision. - MultiVAFullResults - // MandatoryPOSTAsGET forbids legacy unauthenticated GET requests for ACME - // resources. - MandatoryPOSTAsGET - // Allow creation of new registrations in ACMEv1. - AllowV1Registration - // StoreRevokerInfo enables storage of the revoker and a bool indicating if the row - // was checked for extant unrevoked certificates in the blockedKeys table. - StoreRevokerInfo - // RestrictRSAKeySizes enables restriction of acceptable RSA public key moduli to - // the common sizes (2048, 3072, and 4096 bits). - RestrictRSAKeySizes - // FasterNewOrdersRateLimit enables use of a separate table for counting the - // new orders rate limit. - FasterNewOrdersRateLimit - // ECDSAForAll enables all accounts, regardless of their presence in the CA's - // ecdsaAllowedAccounts config value, to get issuance from ECDSA issuers. - ECDSAForAll - // ServeRenewalInfo exposes the renewalInfo endpoint in the directory and for - // GET requests. WARNING: This feature is a draft and highly unstable. - ServeRenewalInfo - // GetAuthzReadOnly causes the SA to use its read-only database connection - // (which is generally pointed at a replica rather than the primary db) when - // querying the authz2 table. - GetAuthzReadOnly - // GetAuthzUseIndex causes the SA to use to add a USE INDEX hint when it - // queries the authz2 table. - GetAuthzUseIndex - // Check the failed authorization limit before doing authz reuse. - CheckFailedAuthorizationsFirst - // AllowReRevocation causes the RA to allow the revocation reason of an - // already-revoked certificate to be updated to `keyCompromise` from any - // other reason if that compromise is demonstrated by making the second - // revocation request signed by the certificate keypair. - AllowReRevocation - // MozRevocationReasons causes the RA to enforce the following upcoming - // Mozilla policies regarding revocation: - // - A subscriber can request that their certificate be revoked with reason - // keyCompromise, even without demonstrating that compromise at the time. - // However, the cert's pubkey will not be added to the blocked keys list. - // - When an applicant other than the original subscriber requests that a - // certificate be revoked (by demonstrating control over all names in it), - // the cert will be revoked with reason cessationOfOperation, regardless of - // what revocation reason they request. - // - When anyone requests that a certificate be revoked by signing the request - // with the certificate's keypair, the cert will be revoked with reason - // keyCompromise, regardless of what revocation reason they request. - MozRevocationReasons -) + // CertCheckerRequiresValidations causes cert-checker to fail if the + // query enabled by CertCheckerChecksValidations didn't find corresponding + // authorizations. + CertCheckerRequiresValidations bool -// List of features and their default value, protected by fMu -var features = map[FeatureFlag]bool{ - unused: false, - CAAValidationMethods: false, - CAAAccountURI: false, - EnforceMultiVA: false, - MultiVAFullResults: false, - MandatoryPOSTAsGET: false, - AllowV1Registration: true, - V1DisableNewValidations: false, - PrecertificateRevocation: false, - StripDefaultSchemePort: false, - StoreIssuerInfo: false, - StoreRevokerInfo: false, - RestrictRSAKeySizes: false, - FasterNewOrdersRateLimit: false, - NonCFSSLSigner: false, - ECDSAForAll: false, - StreamlineOrderAndAuthzs: false, - ServeRenewalInfo: false, - GetAuthzReadOnly: false, - GetAuthzUseIndex: false, - CheckFailedAuthorizationsFirst: false, - AllowReRevocation: false, - MozRevocationReasons: false, -} + // AsyncFinalize enables the RA to return approximately immediately from + // requests to finalize orders. This allows us to take longer getting SCTs, + // issuing certs, and updating the database; it indirectly reduces the number + // of issuances that fail due to timeouts during storage. However, it also + // requires clients to properly implement polling the Order object to wait + // for the cert URL to appear. + AsyncFinalize bool -var fMu = new(sync.RWMutex) + // CAARechecksFailOrder causes the RA to set an order to "invalid" if its CAA + // rechecks fail. + CAARechecksFailOrder bool -var initial = map[FeatureFlag]bool{} + // CheckIdentifiersPaused checks if any of the identifiers in the order are + // currently paused at NewOrder time. If any are paused, an error is + // returned to the Subscriber indicating that the order cannot be processed + // until the paused identifiers are unpaused and the order is resubmitted. + CheckIdentifiersPaused bool -var nameToFeature = make(map[string]FeatureFlag, len(features)) + // PropagateCancels controls whether the WFE allows + // cancellation of an inbound request to cancel downstream gRPC and other + // queries. In practice, cancellation of an inbound request is achieved by + // Nginx closing the connection on which the request was happening. This may + // help shed load in overcapacity situations. However, note that in-progress + // database queries (for instance, in the SA) are not cancelled. Database + // queries waiting for an available connection may be cancelled. + PropagateCancels bool -func init() { - for f, v := range features { - nameToFeature[f.String()] = f - initial[f] = v - } + // AutomaticallyPauseZombieClients configures the RA to automatically track + // and pause issuance for each (account, hostname) pair that repeatedly + // fails validation. + AutomaticallyPauseZombieClients bool + + // DNSAccount01Enabled controls support for the dns-account-01 challenge + // type. When enabled, the server can offer and validate this challenge + // during certificate issuance. This flag must be set to true in the + // RA, VA, and WFE2 services for full functionality. + DNSAccount01Enabled bool } -// Set accepts a list of features and whether they should -// be enabled or disabled, it will return a error if passed -// a feature name that it doesn't know -func Set(featureSet map[string]bool) error { +var fMu = new(sync.RWMutex) +var global = Config{} + +// Set changes the global FeatureSet to match the input FeatureSet. This +// overrides any previous changes made to the global FeatureSet. +// +// When used in tests, the caller must defer features.Reset() to avoid leaving +// dirty global state. +func Set(fs Config) { fMu.Lock() defer fMu.Unlock() - for n, v := range featureSet { - f, present := nameToFeature[n] - if !present { - return fmt.Errorf("feature '%s' doesn't exist", n) - } - features[f] = v - } - return nil + // If the FeatureSet type ever changes, this must be updated to still copy + // the input argument, never hold a reference to it. + global = fs } -// Enabled returns true if the feature is enabled or false -// if it isn't, it will panic if passed a feature that it -// doesn't know. -func Enabled(n FeatureFlag) bool { - fMu.RLock() - defer fMu.RUnlock() - v, present := features[n] - if !present { - panic(fmt.Sprintf("feature '%s' doesn't exist", n.String())) - } - return v -} - -// Reset resets the features to their initial state +// Reset resets all features to their initial state (false). func Reset() { fMu.Lock() defer fMu.Unlock() - for k, v := range initial { - features[k] = v - } + global = Config{} +} + +// Get returns a copy of the current global FeatureSet, indicating which +// features are currently enabled (set to true). Expected caller behavior looks +// like: +// +// if features.Get().FeatureName { ... +func Get() Config { + fMu.RLock() + defer fMu.RUnlock() + // If the FeatureSet type ever changes, this must be updated to still return + // only a copy of the current state, never a reference directly to it. + return global } diff --git a/features/features_test.go b/features/features_test.go deleted file mode 100644 index dff198d68e4..00000000000 --- a/features/features_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package features - -import ( - "testing" - - "github.com/letsencrypt/boulder/test" -) - -func TestFeatures(t *testing.T) { - features = map[FeatureFlag]bool{ - unused: false, - } - test.Assert(t, !Enabled(unused), "'unused' shouldn't be enabled") - - err := Set(map[string]bool{"unused": true}) - test.AssertNotError(t, err, "Set shouldn't have failed setting existing features") - test.Assert(t, Enabled(unused), "'unused' should be enabled") - - Reset() - test.Assert(t, !Enabled(unused), "'unused' shouldn't be enabled") - - err = Set(map[string]bool{"non-existent": true}) - test.AssertError(t, err, "Set should've failed trying to enable a non-existent feature") - - defer func() { - if r := recover(); r == nil { - t.Errorf("Enabled did not panic on an unknown feature") - } - }() - features = map[FeatureFlag]bool{} - Enabled(unused) -} diff --git a/go.mod b/go.mod index 1e01789b3d3..65d0c7d077e 100644 --- a/go.mod +++ b/go.mod @@ -1,71 +1,95 @@ module github.com/letsencrypt/boulder -go 1.17 +go 1.25.0 require ( - github.com/beeker1121/goque v1.0.3-0.20191103205551-d618510128af - github.com/eggsampler/acme/v3 v3.2.1 - github.com/go-gorp/gorp/v3 v3.0.2 - github.com/go-redis/redis/v8 v8.11.4 - github.com/go-sql-driver/mysql v1.5.0 + github.com/aws/aws-sdk-go-v2 v1.40.1 + github.com/aws/aws-sdk-go-v2/config v1.32.3 + github.com/aws/aws-sdk-go-v2/service/s3 v1.93.0 + github.com/aws/smithy-go v1.24.0 + github.com/eggsampler/acme/v3 v3.6.2 + github.com/go-jose/go-jose/v4 v4.1.2 + github.com/go-logr/stdr v1.2.2 + github.com/go-sql-driver/mysql v1.9.1 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da - github.com/google/certificate-transparency-go v1.0.22-0.20181127102053-c25855a82c75 - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/honeycombio/beeline-go v1.1.1 - github.com/hpcloud/tail v1.0.0 - github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 - github.com/letsencrypt/challtestsrv v1.2.1 + github.com/google/certificate-transparency-go v1.3.2-0.20250507091337-0eddb39e94f8 + github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 + github.com/jmhodges/clock v1.2.0 + github.com/letsencrypt/borp v0.0.0-20251118150929-89c6927051ae + github.com/letsencrypt/challtestsrv v1.3.3 github.com/letsencrypt/pkcs11key/v4 v4.0.0 - github.com/miekg/dns v1.1.45 + github.com/letsencrypt/validator/v10 v10.0.0-20230215210743-a0c7dfc17158 + github.com/miekg/dns v1.1.61 github.com/miekg/pkcs11 v1.1.1 - github.com/prometheus/client_golang v1.12.1 - github.com/prometheus/client_model v0.2.0 + github.com/nxadm/tail v1.4.11 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_model v0.6.1 + github.com/redis/go-redis/extra/redisotel/v9 v9.5.3 + github.com/redis/go-redis/v9 v9.10.0 github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 - github.com/weppos/publicsuffix-go v0.15.1-0.20220329081811-9a40b608a236 - github.com/zmap/zcrypto v0.0.0-20210811211718-6f9bc4aff20f - github.com/zmap/zlint/v3 v3.3.1-0.20211019173530-cb17369b4628 - golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 - golang.org/x/net v0.0.0-20211029224645-99673261e6eb - golang.org/x/text v0.3.6 - google.golang.org/grpc v1.36.1 - google.golang.org/protobuf v1.27.1 - gopkg.in/square/go-jose.v2 v2.4.1 - gopkg.in/yaml.v2 v2.4.0 + github.com/weppos/publicsuffix-go v0.50.1 + github.com/zmap/zcrypto v0.0.0-20250129210703-03c45d0bae98 + github.com/zmap/zlint/v3 v3.6.6 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 + golang.org/x/crypto v0.44.0 + golang.org/x/net v0.47.0 + golang.org/x/term v0.37.0 + golang.org/x/text v0.31.0 + golang.org/x/time v0.11.0 + google.golang.org/grpc v1.75.0 + google.golang.org/protobuf v1.36.8 + gopkg.in/yaml.v3 v3.0.1 ) require ( + filippo.io/edwards25519 v1.1.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.19.3 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.15 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.11 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.3 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect - github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 // indirect - github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 // indirect - github.com/felixge/httpsnoop v1.0.1 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect - github.com/honeycombio/libhoney-go v1.15.2 // indirect - github.com/klauspost/compress v1.11.4 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - github.com/syndtr/goleveldb v1.0.0 // indirect - github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect - github.com/vmihailenco/tagparser v0.1.1 // indirect - go.opentelemetry.io/contrib/propagators v0.19.0 // indirect - go.opentelemetry.io/otel v0.19.0 // indirect - go.opentelemetry.io/otel/trace v0.19.0 // indirect - golang.org/x/mod v0.4.2 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/appengine v1.6.6 // indirect - google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 // indirect - gopkg.in/alexcesaro/statsd.v2 v2.0.0 // indirect - gopkg.in/fsnotify.v1 v1.4.7 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/poy/onpar v1.1.2 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/redis/go-redis/extra/rediscmd/v9 v9.5.3 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.1 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/tools v0.38.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect ) - -// This version is required by parts of the honeycombio/beeline-go package -// that we do not rely upon. It appears to introduce performance regressions -// for us. -exclude github.com/go-sql-driver/mysql v1.6.0 diff --git a/go.sum b/go.sum index 59bafb67c72..4b2a4d209db 100644 --- a/go.sum +++ b/go.sum @@ -1,77 +1,70 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= -github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/a8m/expect v1.0.0/go.mod h1:4IwSCMumY49ScypDnjNbYEjgVeqy1/U2cEs3Lat96eA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beeker1121/goque v1.0.3-0.20191103205551-d618510128af h1:XbgLdZvVbWsK9HAhAYOp6rksTAdOVYDBQtGSVOLlJrw= -github.com/beeker1121/goque v1.0.3-0.20191103205551-d618510128af/go.mod h1:84CWnaDz4g1tEVnFLnuBigmGK15oPohy0RfvSN8d4eg= +github.com/aws/aws-sdk-go-v2 v1.40.1 h1:difXb4maDZkRH0x//Qkwcfpdg1XQVXEAEs2DdXldFFc= +github.com/aws/aws-sdk-go-v2 v1.40.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4= +github.com/aws/aws-sdk-go-v2/config v1.32.3 h1:cpz7H2uMNTDa0h/5CYL5dLUEzPSLo2g0NkbxTRJtSSU= +github.com/aws/aws-sdk-go-v2/config v1.32.3/go.mod h1:srtPKaJJe3McW6T/+GMBZyIPc+SeqJsNPJsd4mOYZ6s= +github.com/aws/aws-sdk-go-v2/credentials v1.19.3 h1:01Ym72hK43hjwDeJUfi1l2oYLXBAOR8gNSZNmXmvuas= +github.com/aws/aws-sdk-go-v2/credentials v1.19.3/go.mod h1:55nWF/Sr9Zvls0bGnWkRxUdhzKqj9uRNlPvgV1vgxKc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.15 h1:utxLraaifrSBkeyII9mIbVwXXWrZdlPO7FIKmyLCEcY= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.15/go.mod h1:hW6zjYUDQwfz3icf4g2O41PHi77u10oAzJ84iSzR/lo= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.15 h1:Y5YXgygXwDI5P4RkteB5yF7v35neH7LfJKBG+hzIons= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.15/go.mod h1:K+/1EpG42dFSY7CBj+Fruzm8PsCGWTXJ3jdeJ659oGQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.15 h1:AvltKnW9ewxX2hFmQS0FyJH93aSvJVUEFvXfU+HWtSE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.15/go.mod h1:3I4oCdZdmgrREhU74qS1dK9yZ62yumob+58AbFR4cQA= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.15 h1:NLYTEyZmVZo0Qh183sC8nC+ydJXOOeIL/qI/sS3PdLY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.15/go.mod h1:Z803iB3B0bc8oJV8zH2PERLRfQUJ2n2BXISpsA4+O1M= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.6 h1:P1MU/SuhadGvg2jtviDXPEejU3jBNhoeeAlRadHzvHI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.6/go.mod h1:5KYaMG6wmVKMFBSfWoyG/zH8pWwzQFnKgpoSRlXHKdQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.15 h1:3/u/4yZOffg5jdNk1sDpOQ4Y+R6Xbh+GzpDrSZjuy3U= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.15/go.mod h1:4Zkjq0FKjE78NKjabuM4tRXKFzUJWXgP0ItEZK8l7JU= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.15 h1:wsSQ4SVz5YE1crz0Ap7VBZrV4nNqZt4CIBBT8mnwoNc= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.15/go.mod h1:I7sditnFGtYMIqPRU1QoHZAUrXkGp4SczmlLwrNPlD0= +github.com/aws/aws-sdk-go-v2/service/s3 v1.93.0 h1:IrbE3B8O9pm3lsg96AXIN5MXX4pECEuExh/A0Du3AuI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.93.0/go.mod h1:/sJLzHtiiZvs6C1RbxS/anSAFwZD6oC6M/kotQzOiLw= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.3 h1:d/6xOGIllc/XW1lzG9a4AUBMmpLA9PXcQnVPTuHHcik= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.3/go.mod h1:fQ7E7Qj9GiW8y0ClD7cUJk3Bz5Iw8wZkWDHsTe8vDKs= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.6 h1:8sTTiw+9yuNXcfWeqKF2x01GqCF49CpP4Z9nKrrk/ts= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.6/go.mod h1:8WYg+Y40Sn3X2hioaaWAAIngndR8n1XFdRPPX+7QBaM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.11 h1:E+KqWoVsSrj1tJ6I/fjDIu5xoS2Zacuu1zT+H7KtiIk= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.11/go.mod h1:qyWHz+4lvkXcr3+PoGlGHEI+3DLLiU6/GdrFfMaAhB0= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.3 h1:tzMkjh0yTChUqJDgGkcDdxvZDSrJ/WB6R6ymI5ehqJI= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.3/go.mod h1:T270C0R5sZNLbWUe8ueiAF42XSZxxPocTaGSgs5c/60= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -79,353 +72,164 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/eggsampler/acme/v3 v3.2.1 h1:Lfsrg3M2zt00QRnizOFzdpSfsS9oDvPsGrodXS/w1KI= -github.com/eggsampler/acme/v3 v3.2.1/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= -github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= -github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg= -github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01/go.mod h1:ypD5nozFk9vcGw1ATYefw6jHe/jZP++Z15/+VTMcWhc= -github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8= -github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52/go.mod h1:yIquW87NGRw1FU5p5lEkpnt/QxoH5uPAOUlOVkAUuMg= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/eggsampler/acme/v3 v3.6.2 h1:gvyZbQ92wNQLDASVftGpHEdFwPSfg0+17P0lLt09Tp8= +github.com/eggsampler/acme/v3 v3.6.2/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.7.1/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gorp/gorp/v3 v3.0.2 h1:ULqJXIekoqMx29FI5ekXXFoH1dT2Vc8UhnRzBg+Emz4= -github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY= +github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI= +github.com/go-jose/go-jose/v4 v4.1.2/go.mod h1:22cg9HWM1pOlnRiY+9cQYJ9XHmya1bYW8OeDM6Ku6Oo= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= -github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-sql-driver/mysql v1.9.1 h1:FrjNGn/BsJQjVRuSa8CBrM5BWA9BWoXXat3KrtSb/iI= +github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobuffalo/attrs v0.1.0/go.mod h1:fmNpaWyHM0tRm8gCZWKx8yY9fvaNLo2PyzBNSrBZ5Hw= -github.com/gobuffalo/envy v1.8.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= -github.com/gobuffalo/envy v1.9.0/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= -github.com/gobuffalo/fizz v1.10.0/go.mod h1:J2XGPO0AfJ1zKw7+2BA+6FEGAkyEsdCOLvN93WCT2WI= -github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gobuffalo/flect v0.2.1/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= -github.com/gobuffalo/genny/v2 v2.0.5/go.mod h1:kRkJuAw9mdI37AiEYjV4Dl+TgkBDYf8HZVjLkqe5eBg= -github.com/gobuffalo/github_flavored_markdown v1.1.0/go.mod h1:TSpTKWcRTI0+v7W3x8dkSKMLJSUpuVitlptCkpeY8ic= -github.com/gobuffalo/helpers v0.6.0/go.mod h1:pncVrer7x/KRvnL5aJABLAuT/RhKRR9klL6dkUOhyv8= -github.com/gobuffalo/helpers v0.6.1/go.mod h1:wInbDi0vTJKZBviURTLRMFLE4+nF2uRuuL2fnlYo7w4= -github.com/gobuffalo/logger v1.0.3/go.mod h1:SoeejUwldiS7ZsyCBphOGURmWdwUFXs0J7TCjEhjKxM= -github.com/gobuffalo/nulls v0.2.0/go.mod h1:w4q8RoSCEt87Q0K0sRIZWYeIxkxog5mh3eN3C/n+dUc= -github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= -github.com/gobuffalo/packd v1.0.0/go.mod h1:6VTc4htmJRFB7u1m/4LeMTWjFoYrUiBkU9Fdec9hrhI= -github.com/gobuffalo/packr/v2 v2.8.0/go.mod h1:PDk2k3vGevNE3SwVyVRgQCCXETC9SaONCNSXT1Q8M1g= -github.com/gobuffalo/plush/v4 v4.0.0/go.mod h1:ErFS3UxKqEb8fpFJT7lYErfN/Nw6vHGiDMTjxpk5bQ0= -github.com/gobuffalo/pop/v5 v5.3.1/go.mod h1:vcEDhh6cJ3WVENqJDFt/6z7zNb7lLnlN8vj3n5G9rYA= -github.com/gobuffalo/tags/v3 v3.0.2/go.mod h1:ZQeN6TCTiwAFnS0dNcbDtSgZDwNKSpqajvVtt6mlYpA= -github.com/gobuffalo/tags/v3 v3.1.0/go.mod h1:ZQeN6TCTiwAFnS0dNcbDtSgZDwNKSpqajvVtt6mlYpA= -github.com/gobuffalo/validate/v3 v3.0.0/go.mod h1:HFpjq+AIiA2RHoQnQVTFKF/ZpUPXwyw82LgyDPxQ9r0= -github.com/gobuffalo/validate/v3 v3.1.0/go.mod h1:HFpjq+AIiA2RHoQnQVTFKF/ZpUPXwyw82LgyDPxQ9r0= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/certificate-transparency-go v1.0.22-0.20181127102053-c25855a82c75 h1:+QTUt+tQMFBQPGB/gfpLj6JKfKISHo0c4U6jLkLYLoY= -github.com/google/certificate-transparency-go v1.0.22-0.20181127102053-c25855a82c75/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/certificate-transparency-go v1.3.2-0.20250507091337-0eddb39e94f8 h1:1RSWsOSxq2gk4pD/63bhsPwoOXgz2yXVadxXPbwZ0ec= +github.com/google/certificate-transparency-go v1.3.2-0.20250507091337-0eddb39e94f8/go.mod h1:6Rm5w0Mlv87LyBNOCgfKYjdIBBpF42XpXGsbQvQGomQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/honeycombio/beeline-go v1.1.1 h1:sU8r4ae34uEL3/CguSl8Mr+Asz9DL1nfH9Wwk85Pc7U= -github.com/honeycombio/beeline-go v1.1.1/go.mod h1:kN0cfUGBMfA87DyCYbiiLoSzWsnw3bluZvNEWtatHxk= -github.com/honeycombio/libhoney-go v1.15.2 h1:5NGcjOxZZma13dmzNcl3OtGbF1hECA0XHJNHEb2t2ck= -github.com/honeycombio/libhoney-go v1.15.2/go.mod h1:JzhRPYgoBCd0rZvudrqmej4Ntx0w7AT3wAJpf5+t1WA= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.6.0/go.mod h1:yeseQo4xhQbgyJs2c87RAXOH2i624N0Fh1KSPJya7qo= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik= -github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4= -github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/karrick/godirwalk v1.15.3/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= -github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.11.4 h1:kz40R/YWls3iqT9zX9AHN3WoVsrAWVyui5sxuLqiXqU= -github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.3.0/go.mod h1:PvmtTvhVqKDzDQy4d3bWzPjZLzom4iQbAZy2sgZ/qI8= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/letsencrypt/challtestsrv v1.2.1 h1:Lzv4jM+wSgVMCeO5a/F/IzSanhClstFMnX6SfrAJXjI= -github.com/letsencrypt/challtestsrv v1.2.1/go.mod h1:Ur4e4FvELUXLGhkMztHOsPIsvGxD/kzSJninOrkM+zc= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/borp v0.0.0-20251118150929-89c6927051ae h1:yFuF5yRIwaandcuNMi1A4he4FMWJsGRv38rsizIaxJA= +github.com/letsencrypt/borp v0.0.0-20251118150929-89c6927051ae/go.mod h1:gMSMCNKhxox/ccR923EJsIvHeVVYfCABGbirqa0EwuM= +github.com/letsencrypt/challtestsrv v1.3.3 h1:ki02PH84fo6IOe/A+zt1/kfRBp2JrtauEaa5xwjg4/Q= +github.com/letsencrypt/challtestsrv v1.3.3/go.mod h1:Ur4e4FvELUXLGhkMztHOsPIsvGxD/kzSJninOrkM+zc= github.com/letsencrypt/pkcs11key/v4 v4.0.0 h1:qLc/OznH7xMr5ARJgkZCCWk+EomQkiNTOoOF5LAgagc= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/luna-duclos/instrumentedsql v1.1.3/go.mod h1:9J1njvFds+zN7y85EDhN9XNQLANWwZt2ULeIC8yMNYs= +github.com/letsencrypt/validator/v10 v10.0.0-20230215210743-a0c7dfc17158 h1:HGFsIltYMUiB5eoFSowFzSoXkocM2k9ctmJ57QMGjys= +github.com/letsencrypt/validator/v10 v10.0.0-20230215210743-a0c7dfc17158/go.mod h1:ZFNBS3H6OEsprCRjscty6GCBe5ZiX44x6qY4s7+bDX0= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= -github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= -github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/mattn/go-sqlite3 v1.14.26 h1:h72fc7d3zXGhHpwjWw+fPOBxYUupuKlbhUAQi5n6t58= +github.com/mattn/go-sqlite3 v1.14.26/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.45 h1:g5fRIhm9nx7g8osrAvgb16QJfmyMsyOCb+J7LSv+Qzk= -github.com/miekg/dns v1.1.45/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= +github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= +github.com/mreiferson/go-httpclient v0.0.0-20201222173833-5e475fde3a4d/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= -github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1 h1:oL4IBbcqwhhNWh31bjOX8C/OCy0zs9906d/VUru+bqg= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= +github.com/poy/onpar v0.0.0-20200406201722-06f95a1c68e8/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/redis/go-redis/extra/rediscmd/v9 v9.5.3 h1:1/BDligzCa40GTllkDnY3Y5DTHuKCONbB2JcRyIfl20= +github.com/redis/go-redis/extra/rediscmd/v9 v9.5.3/go.mod h1:3dZmcLn3Qw6FLlWASn1g4y+YO9ycEFUOM+bhBmzLVKQ= +github.com/redis/go-redis/extra/redisotel/v9 v9.5.3 h1:kuvuJL/+MZIEdvtb/kTBRiRgYaOmx1l+lYJyVdrRUOs= +github.com/redis/go-redis/extra/redisotel/v9 v9.5.3/go.mod h1:7f/FMrf5RRRVHXgfk7CzSVzXHiWeuOQUu2bsVqWoa+g= +github.com/redis/go-redis/v9 v9.10.0 h1:FxwK3eV8p/CQa0Ch276C7u2d0eNC9kCmAYQ7mCXCzVs= +github.com/redis/go-redis/v9 v9.10.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -435,418 +239,224 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= -github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/weppos/publicsuffix-go v0.15.1-0.20210807195340-dc689ff0bb59/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= -github.com/weppos/publicsuffix-go v0.15.1-0.20220329081811-9a40b608a236 h1:vMJBP3PQViZsF6cOINtvyMC8ptpLsyJ4EwyFnzuWNxc= -github.com/weppos/publicsuffix-go v0.15.1-0.20220329081811-9a40b608a236/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= +github.com/weppos/publicsuffix-go v0.13.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= +github.com/weppos/publicsuffix-go v0.40.3-0.20250127173806-e489a31678ca/go.mod h1:43Dfyxu2dpmLg56at26Q4k9gwf3yWSUiwk8kGnwzULk= +github.com/weppos/publicsuffix-go v0.50.1 h1:elrBHeSkS/eIb169+DnLrknqmdP4AjT0Q0tEdytz1Og= +github.com/weppos/publicsuffix-go v0.50.1/go.mod h1:znn0JVXjcR5hpUl9pbEogwH6I710rA1AX0QQPT0bf+k= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= +github.com/zmap/rc2 v0.0.0-20190804163417-abaa70531248/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= -github.com/zmap/zcrypto v0.0.0-20210811211718-6f9bc4aff20f h1:MzfKzJlHUwLuo5aFqBY5PHBoJRdTCiUpyieQ2NC82kM= -github.com/zmap/zcrypto v0.0.0-20210811211718-6f9bc4aff20f/go.mod h1:y/9hjFEub4DtQxTHp/pqticBgdYeCwL97vojV3lsvHY= -github.com/zmap/zlint/v3 v3.3.1-0.20211019173530-cb17369b4628 h1:KdHxmTxFmsKE8BGa5/JP7RuNgyNya4VoB15cnHDYWpw= -github.com/zmap/zlint/v3 v3.3.1-0.20211019173530-cb17369b4628/go.mod h1:O+4OXRfNLKqOyDl4eKZ1SBlYudKGUBGRFcv+m1KLr28= +github.com/zmap/zcertificate v0.0.1/go.mod h1:q0dlN54Jm4NVSSuzisusQY0hqDWvu92C+TWveAxiVWk= +github.com/zmap/zcrypto v0.0.0-20201128221613-3719af1573cf/go.mod h1:aPM7r+JOkfL+9qSB4KbYjtoEzJqUK50EXkkJabeNJDQ= +github.com/zmap/zcrypto v0.0.0-20201211161100-e54a5822fb7e/go.mod h1:aPM7r+JOkfL+9qSB4KbYjtoEzJqUK50EXkkJabeNJDQ= +github.com/zmap/zcrypto v0.0.0-20250129210703-03c45d0bae98 h1:Qp98bmMm9JHPPOaLi2Nb6oWoZ+1OyOMWI7PPeJrirI0= +github.com/zmap/zcrypto v0.0.0-20250129210703-03c45d0bae98/go.mod h1:YTUyN/U1oJ7RzCEY5hUweYxbVUu7X+11wB7OXZT15oE= +github.com/zmap/zlint/v3 v3.0.0/go.mod h1:paGwFySdHIBEMJ61YjoqT4h7Ge+fdYG4sUQhnTb1lJ8= +github.com/zmap/zlint/v3 v3.6.6 h1:tH7RJM9bDmh7IonlLEkFIkIn8XDYDYjehhUPgpLVqYA= +github.com/zmap/zlint/v3 v3.6.6/go.mod h1:6yXG+CBOQBRpMCOnpIVPUUL296m5HYksZC9bj5LZkwE= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/propagators v0.19.0 h1:HrixVNZYFjUl/Db+Tr3DhqzLsVW9GeVf/Gye+C5dNUY= -go.opentelemetry.io/contrib/propagators v0.19.0/go.mod h1:4QOdZClXISU5S43xZxk5tYaWcpb+lehqfKtE6PK6msE= -go.opentelemetry.io/otel v0.19.0 h1:Lenfy7QHRXPZVsw/12CWpxX6d/JkrX8wrx2vO8G80Ng= -go.opentelemetry.io/otel v0.19.0/go.mod h1:j9bF567N9EfomkSidSfmMwIwIBuP37AMAIzVW85OxSg= -go.opentelemetry.io/otel/metric v0.19.0 h1:dtZ1Ju44gkJkYvo+3qGqVXmf88tc+a42edOywypengg= -go.opentelemetry.io/otel/metric v0.19.0/go.mod h1:8f9fglJPRnXuskQmKpnad31lcLJ2VmNNqIsx/uIwBSc= -go.opentelemetry.io/otel/oteltest v0.19.0 h1:YVfA0ByROYqTwOxqHVZYZExzEpfZor+MU1rU+ip2v9Q= -go.opentelemetry.io/otel/oteltest v0.19.0/go.mod h1:tI4yxwh8U21v7JD6R3BcA/2+RBoTKFexE/PJ/nSO7IA= -go.opentelemetry.io/otel/trace v0.19.0 h1:1ucYlenXIDA1OlHVLDZKX0ObXV5RLaq06DtUKz5e5zc= -go.opentelemetry.io/otel/trace v0.19.0/go.mod h1:4IXiNextNOpPnRlI4ryK69mn5iC84bjBWZQA5DXz/qg= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -goji.io/v3 v3.0.0/go.mod h1:c02FFnNiVNCDo+DpR2IhBQpM9r5G1BG/MkHNTPUJ13U= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU= +golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211029224645-99673261e6eb h1:pirldcYWx7rx7kE5r+9WsOXPXK0+WH5+uZ7uPmJ44uM= -golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2 h1:BonxutuHCTL0rBDnZlKjpGIQFTjyUVTexFOdWkB6Fg0= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 h1:PDIOdWxZ8eRizhKa1AAvY53xsvLB1cWorMjslvY3VA8= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.36.1 h1:cmUfbeGKnz9+2DD/UYsMQXeqbHZqZDs4eQwW0sFOpBY= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= -gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.4.1 h1:H0TmLt7/KmzlrDOpa1F+zr0Tk90PbJYBfsVUmRLrf9Y= -gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/goodkey/blocked.go b/goodkey/blocked.go deleted file mode 100644 index 3457f5b12b5..00000000000 --- a/goodkey/blocked.go +++ /dev/null @@ -1,98 +0,0 @@ -package goodkey - -import ( - "crypto" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "errors" - "io/ioutil" - - "github.com/letsencrypt/boulder/core" - - yaml "gopkg.in/yaml.v2" -) - -// blockedKeys is a type for maintaining a map of SHA256 hashes -// of SubjectPublicKeyInfo's that should be considered blocked. -// blockedKeys are created by using loadBlockedKeysList. -type blockedKeys map[core.Sha256Digest]bool - -var ErrWrongDecodedSize = errors.New("not enough bytes decoded for sha256 hash") - -// blocked checks if the given public key is considered administratively -// blocked based on a SHA256 hash of the SubjectPublicKeyInfo. -// Important: blocked should not be called except on a blockedKeys instance -// returned from loadBlockedKeysList. -// function should not be used until after `loadBlockedKeysList` has returned. -func (b blockedKeys) blocked(key crypto.PublicKey) (bool, error) { - hash, err := core.KeyDigest(key) - if err != nil { - // the bool result should be ignored when err is != nil but to be on the - // paranoid side return true anyway so that a key we can't compute the - // digest for will always be blocked even if a caller foolishly discards the - // err result. - return true, err - } - return b[hash], nil -} - -// loadBlockedKeysList creates a blockedKeys object that can be used to check if -// a key is blocked. It creates a lookup map from a list of -// SHA256 hashes of SubjectPublicKeyInfo's in the input YAML file -// with the expected format: -// -// ``` -// blocked: -// - cuwGhNNI6nfob5aqY90e7BleU6l7rfxku4X3UTJ3Z7M= -// -// - Qebc1V3SkX3izkYRGNJilm9Bcuvf0oox4U2Rn+b4JOE= -// ``` -// -// If no hashes are found in the input YAML an error is returned. -func loadBlockedKeysList(filename string) (*blockedKeys, error) { - yamlBytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - - var list struct { - BlockedHashes []string `yaml:"blocked"` - BlockedHashesHex []string `yaml:"blockedHashesHex"` - } - err = yaml.Unmarshal(yamlBytes, &list) - if err != nil { - return nil, err - } - - if len(list.BlockedHashes) == 0 && len(list.BlockedHashesHex) == 0 { - return nil, errors.New("no blocked hashes in YAML") - } - - blockedKeys := make(blockedKeys, len(list.BlockedHashes)+len(list.BlockedHashesHex)) - for _, b64Hash := range list.BlockedHashes { - decoded, err := base64.StdEncoding.DecodeString(b64Hash) - if err != nil { - return nil, err - } - if len(decoded) != sha256.Size { - return nil, ErrWrongDecodedSize - } - var sha256Digest core.Sha256Digest - copy(sha256Digest[:], decoded[0:sha256.Size]) - blockedKeys[sha256Digest] = true - } - for _, hexHash := range list.BlockedHashesHex { - decoded, err := hex.DecodeString(hexHash) - if err != nil { - return nil, err - } - if len(decoded) != sha256.Size { - return nil, ErrWrongDecodedSize - } - var sha256Digest core.Sha256Digest - copy(sha256Digest[:], decoded[0:sha256.Size]) - blockedKeys[sha256Digest] = true - } - return &blockedKeys, nil -} diff --git a/goodkey/blocked_test.go b/goodkey/blocked_test.go deleted file mode 100644 index f8f8ad2ef80..00000000000 --- a/goodkey/blocked_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package goodkey - -import ( - "context" - "crypto" - "io/ioutil" - "os" - "testing" - - "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/test" - "github.com/letsencrypt/boulder/web" - yaml "gopkg.in/yaml.v2" -) - -func TestBlockedKeys(t *testing.T) { - // Start with an empty list - var inList struct { - BlockedHashes []string `yaml:"blocked"` - BlockedHashesHex []string `yaml:"blockedHashesHex"` - } - - yamlList, err := yaml.Marshal(&inList) - test.AssertNotError(t, err, "error marshaling test blockedKeys list") - - yamlListFile, err := ioutil.TempFile("", "test-blocked-keys-list.*.yaml") - test.AssertNotError(t, err, "error creating test blockedKeys yaml file") - defer os.Remove(yamlListFile.Name()) - - err = ioutil.WriteFile(yamlListFile.Name(), yamlList, 0640) - test.AssertNotError(t, err, "error writing test blockedKeys yaml file") - - // Trying to load it should error - _, err = loadBlockedKeysList(yamlListFile.Name()) - test.AssertError(t, err, "expected error loading empty blockedKeys yaml file") - - // Load some test certs/keys - see ../test/block-a-key/test/README.txt - // for more information. - testCertA, err := core.LoadCert("../test/block-a-key/test/test.rsa.cert.pem") - test.AssertNotError(t, err, "error loading test.rsa.cert.pem") - testCertB, err := core.LoadCert("../test/block-a-key/test/test.ecdsa.cert.pem") - test.AssertNotError(t, err, "error loading test.ecdsa.cert.pem") - testJWKA, err := web.LoadJWK("../test/block-a-key/test/test.rsa.jwk.json") - test.AssertNotError(t, err, "error loading test.rsa.jwk.pem") - testJWKB, err := web.LoadJWK("../test/block-a-key/test/test.ecdsa.jwk.json") - test.AssertNotError(t, err, "error loading test.ecdsa.jwk.pem") - - // All of the above should be blocked - blockedKeys := []crypto.PublicKey{ - testCertA.PublicKey, - testCertB.PublicKey, - testJWKA.Key, - testJWKB.Key, - } - - // Now use a populated list - these values match the base64 digest of the - // public keys in the test certs/JWKs - inList.BlockedHashes = []string{ - "cuwGhNNI6nfob5aqY90e7BleU6l7rfxku4X3UTJ3Z7M=", - } - inList.BlockedHashesHex = []string{ - "41e6dcd55dd2917de2ce461118d262966f4172ebdfd28a31e14d919fe6f824e1", - } - - yamlList, err = yaml.Marshal(&inList) - test.AssertNotError(t, err, "error marshaling test blockedKeys list") - - yamlListFile, err = ioutil.TempFile("", "test-blocked-keys-list.*.yaml") - test.AssertNotError(t, err, "error creating test blockedKeys yaml file") - defer os.Remove(yamlListFile.Name()) - - err = ioutil.WriteFile(yamlListFile.Name(), yamlList, 0640) - test.AssertNotError(t, err, "error writing test blockedKeys yaml file") - - // Trying to load it should not error - outList, err := loadBlockedKeysList(yamlListFile.Name()) - test.AssertNotError(t, err, "unexpected error loading empty blockedKeys yaml file") - - // Create a test policy that doesn't reference the blocked list - testingPolicy := &KeyPolicy{ - AllowRSA: true, - AllowECDSANISTP256: true, - AllowECDSANISTP384: true, - } - - // All of the test keys should not be considered blocked - for _, k := range blockedKeys { - err := testingPolicy.GoodKey(context.Background(), k) - test.AssertNotError(t, err, "test key was blocked by key policy without block list") - } - - // Now update the key policy with the blocked list - testingPolicy.blockedList = outList - - // Now all of the test keys should be considered blocked, and with the correct - // type of error. - for _, k := range blockedKeys { - err := testingPolicy.GoodKey(context.Background(), k) - test.AssertError(t, err, "test key was not blocked by key policy with block list") - test.AssertErrorIs(t, err, ErrBadKey) - } -} diff --git a/goodkey/good_key.go b/goodkey/good_key.go index b751c376cd1..d8efd703ddc 100644 --- a/goodkey/good_key.go +++ b/goodkey/good_key.go @@ -12,10 +12,6 @@ import ( "sync" "github.com/letsencrypt/boulder/core" - berrors "github.com/letsencrypt/boulder/errors" - "github.com/letsencrypt/boulder/features" - sapb "github.com/letsencrypt/boulder/sa/proto" - "google.golang.org/grpc" "github.com/titanous/rocacheck" ) @@ -43,80 +39,99 @@ var ( ) type Config struct { - // WeakKeyFile is the path to a JSON file containing truncated modulus hashes - // of known weak RSA keys. If this config value is empty, then RSA modulus - // hash checking will be disabled. - WeakKeyFile string - // BlockedKeyFile is the path to a YAML file containing base64-encoded SHA256 - // hashes of PKIX Subject Public Keys that should be blocked. If this config - // value is empty, then blocked key checking will be disabled. - BlockedKeyFile string + // AllowedKeys enables or disables specific key algorithms and sizes. If + // nil, defaults to just those keys allowed by the Let's Encrypt CPS. + AllowedKeys *AllowedKeys // FermatRounds is an integer number of rounds of Fermat's factorization // method that should be performed to attempt to detect keys whose modulus can // be trivially factored because the two factors are very close to each other. - // If this config value is empty (0), no factorization will be attempted. + // If this config value is empty or 0, it will default to 110 rounds. FermatRounds int } +// AllowedKeys is a map of six specific key algorithm and size combinations to +// booleans indicating whether keys of that type are considered good. +type AllowedKeys struct { + // Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple + // of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes + // Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which + // have a known method to easily compute their private key, such as Debian Weak + // Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at + // common key sizes, so we restrict all issuance to those common key sizes. + RSA2048 bool + RSA3072 bool + RSA4096 bool + // Baseline Requirements, Section 6.1.5 requires that ECDSA keys be valid + // points on the NIST P-256, P-384, or P-521 elliptic curves. + ECDSAP256 bool + ECDSAP384 bool + ECDSAP521 bool +} + +// LetsEncryptCPS encodes the five key algorithms and sizes allowed by the Let's +// Encrypt CPS CV-SSL Subscriber Certificate Profile: RSA 2048, RSA 3076, RSA +// 4096, ECDSA 256 and ECDSA P384. +// https://github.com/letsencrypt/cp-cps/blob/main/CP-CPS.md#dv-ssl-subscriber-certificate +// If this is ever changed, the CP/CPS MUST be changed first. +func LetsEncryptCPS() AllowedKeys { + return AllowedKeys{ + RSA2048: true, + RSA3072: true, + RSA4096: true, + ECDSAP256: true, + ECDSAP384: true, + } +} + // ErrBadKey represents an error with a key. It is distinct from the various // ways in which an ACME request can have an erroneous key (BadPublicKeyError, // BadCSRError) because this library is used to check both JWS signing keys and // keys in CSRs. var ErrBadKey = errors.New("") -func badKey(msg string, args ...interface{}) error { +func badKey(msg string, args ...any) error { return fmt.Errorf("%w%s", ErrBadKey, fmt.Errorf(msg, args...)) } -// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey method to KeyPolicy, -// rather than storing a full sa.SQLStorageAuthority. This makes testing +// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey functionality to KeyPolicy, +// rather than storing a full sa.SQLStorageAuthority. This allows external +// users who don’t want to import all of boulder/sa, and makes testing // significantly simpler. -type BlockedKeyCheckFunc func(context.Context, *sapb.KeyBlockedRequest, ...grpc.CallOption) (*sapb.Exists, error) +// On success, the function returns a boolean which is true if the key is blocked. +type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error) // KeyPolicy determines which types of key may be used with various boulder // operations. type KeyPolicy struct { - AllowRSA bool // Whether RSA keys should be allowed. - AllowECDSANISTP256 bool // Whether ECDSA NISTP256 keys should be allowed. - AllowECDSANISTP384 bool // Whether ECDSA NISTP384 keys should be allowed. - weakRSAList *WeakRSAKeys - blockedList *blockedKeys - fermatRounds int - dbCheck BlockedKeyCheckFunc + allowedKeys AllowedKeys + fermatRounds int + blockedCheck BlockedKeyCheckFunc } -// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384. -// weakKeyFile contains the path to a JSON file containing truncated modulus -// hashes of known weak RSA keys. If this argument is empty RSA modulus hash -// checking will be disabled. blockedKeyFile contains the path to a YAML file -// containing Base64 encoded SHA256 hashes of pkix subject public keys that -// should be blocked. If this argument is empty then no blocked key checking is -// performed. -func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) { - kp := KeyPolicy{ - AllowRSA: true, - AllowECDSANISTP256: true, - AllowECDSANISTP384: true, - dbCheck: bkc, +// NewPolicy returns a key policy based on the given configuration, with sane +// defaults. If the config's AllowedKeys is nil, the LetsEncryptCPS AllowedKeys +// is used. If the configured FermatRounds is 0, Fermat Factorization defaults to +// attempting 110 rounds. +func NewPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) { + if config == nil { + config = &Config{} } - if config.WeakKeyFile != "" { - keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile) - if err != nil { - return KeyPolicy{}, err - } - kp.weakRSAList = keyList + kp := KeyPolicy{ + blockedCheck: bkc, } - if config.BlockedKeyFile != "" { - blocked, err := loadBlockedKeysList(config.BlockedKeyFile) - if err != nil { - return KeyPolicy{}, err - } - kp.blockedList = blocked + if config.AllowedKeys == nil { + kp.allowedKeys = LetsEncryptCPS() + } else { + kp.allowedKeys = *config.AllowedKeys } - if config.FermatRounds < 0 { - return KeyPolicy{}, fmt.Errorf("Fermat factorization rounds cannot be negative: %d", config.FermatRounds) + if config.FermatRounds == 0 { + // The BRs require 100 rounds, so give ourselves a margin above that. + kp.fermatRounds = 110 + } else if config.FermatRounds < 100 { + return KeyPolicy{}, fmt.Errorf("Fermat factorization rounds must be at least 100: %d", config.FermatRounds) + } else { + kp.fermatRounds = config.FermatRounds } - kp.fermatRounds = config.FermatRounds return kp, nil } @@ -133,24 +148,15 @@ func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) erro default: return badKey("unsupported key type %T", t) } - // If there is a blocked list configured then check if the public key is one - // that has been administratively blocked. - if policy.blockedList != nil { - if blocked, err := policy.blockedList.blocked(key); err != nil { - return berrors.InternalServerError("error checking blocklist for key: %v", key) - } else if blocked { - return badKey("public key is forbidden") - } - } - if policy.dbCheck != nil { + if policy.blockedCheck != nil { digest, err := core.KeyDigest(key) if err != nil { return badKey("%w", err) } - exists, err := policy.dbCheck(ctx, &sapb.KeyBlockedRequest{KeyHash: digest[:]}) + exists, err := policy.blockedCheck(ctx, digest[:]) if err != nil { return err - } else if exists.Exists { + } else if exists { return badKey("public key is forbidden") } } @@ -266,51 +272,24 @@ func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) { // Simply use a whitelist for now. params := c.Params() switch { - case policy.AllowECDSANISTP256 && params == elliptic.P256().Params(): + case policy.allowedKeys.ECDSAP256 && params == elliptic.P256().Params(): return nil - case policy.AllowECDSANISTP384 && params == elliptic.P384().Params(): + case policy.allowedKeys.ECDSAP384 && params == elliptic.P384().Params(): + return nil + case policy.allowedKeys.ECDSAP521 && params == elliptic.P521().Params(): return nil default: return badKey("ECDSA curve %v not allowed", params.Name) } } -var acceptableRSAKeySizes = map[int]bool{ - 2048: true, - 3072: true, - 4096: true, -} - // GoodKeyRSA determines if a RSA pubkey meets our requirements -func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) { - if !policy.AllowRSA { - return badKey("RSA keys are not allowed") - } - if policy.weakRSAList != nil && policy.weakRSAList.Known(key) { - return badKey("key is on a known weak RSA key list") - } - - // Baseline Requirements Appendix A - // Modulus must be >= 2048 bits and <= 4096 bits +func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) error { modulus := key.N - modulusBitLen := modulus.BitLen() - if features.Enabled(features.RestrictRSAKeySizes) { - if !acceptableRSAKeySizes[modulusBitLen] { - return badKey("key size not supported: %d", modulusBitLen) - } - } else { - const maxKeySize = 4096 - if modulusBitLen < 2048 { - return badKey("key too small: %d", modulusBitLen) - } - if modulusBitLen > maxKeySize { - return badKey("key too large: %d > %d", modulusBitLen, maxKeySize) - } - // Bit lengths that are not a multiple of 8 may cause problems on some - // client implementations. - if modulusBitLen%8 != 0 { - return badKey("key length wasn't a multiple of 8: %d", modulusBitLen) - } + + err := policy.goodRSABitLen(key) + if err != nil { + return err } // Rather than support arbitrary exponents, which significantly increases @@ -341,17 +320,31 @@ func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) { if rocacheck.IsWeak(key) { return badKey("key generated by vulnerable Infineon-based hardware") } + // Check if the key can be easily factored via Fermat's factorization method. - if policy.fermatRounds > 0 { - err := checkPrimeFactorsTooClose(modulus, policy.fermatRounds) - if err != nil { - return badKey("key generated with factors too close together: %w", err) - } + err = checkPrimeFactorsTooClose(modulus, policy.fermatRounds) + if err != nil { + return badKey("key generated with factors too close together: %w", err) } return nil } +func (policy *KeyPolicy) goodRSABitLen(key *rsa.PublicKey) error { + // See comment on AllowedKeys above. + modulusBitLen := key.N.BitLen() + switch { + case modulusBitLen == 2048 && policy.allowedKeys.RSA2048: + return nil + case modulusBitLen == 3072 && policy.allowedKeys.RSA3072: + return nil + case modulusBitLen == 4096 && policy.allowedKeys.RSA4096: + return nil + default: + return badKey("key size not supported: %d", modulusBitLen) + } +} + // Returns true iff integer i is divisible by any of the primes in smallPrimes. // // Short circuits; execution time is dependent on i. Do not use this on secret @@ -411,7 +404,7 @@ func checkPrimeFactorsTooClose(n *big.Int, rounds int) error { b2 := new(big.Int) b2.Mul(a, a).Sub(b2, n) - for i := 0; i < rounds; i++ { + for round := range rounds { // To see if b2 is a perfect square, we take its square root, square that, // and check to see if we got the same result back. bb.Sqrt(b2).Mul(bb, bb) @@ -421,7 +414,7 @@ func checkPrimeFactorsTooClose(n *big.Int, rounds int) error { bb.Sqrt(bb) p := new(big.Int).Add(a, bb) q := new(big.Int).Sub(a, bb) - return fmt.Errorf("public modulus n = pq factored into p: %s; q: %s", p, q) + return fmt.Errorf("public modulus n = pq factored in %d rounds into p: %s and q: %s", round+1, p, q) } // Set up the next iteration by incrementing a by one and recalculating b2. diff --git a/goodkey/good_key_test.go b/goodkey/good_key_test.go index 2f0a9116d82..133b6ac11ef 100644 --- a/goodkey/good_key_test.go +++ b/goodkey/good_key_test.go @@ -10,30 +10,21 @@ import ( "math/big" "testing" - "github.com/letsencrypt/boulder/features" - sapb "github.com/letsencrypt/boulder/sa/proto" "github.com/letsencrypt/boulder/test" - "google.golang.org/grpc" ) -var testingPolicy = &KeyPolicy{ - AllowRSA: true, - AllowECDSANISTP256: true, - AllowECDSANISTP384: true, -} +// testingPolicy is a simple policy which allows all of the key types, so that +// the unit tests can exercise checks against all key types. +var testingPolicy = &KeyPolicy{allowedKeys: AllowedKeys{ + RSA2048: true, RSA3072: true, RSA4096: true, + ECDSAP256: true, ECDSAP384: true, ECDSAP521: true, +}} func TestUnknownKeyType(t *testing.T) { notAKey := struct{}{} err := testingPolicy.GoodKey(context.Background(), notAKey) test.AssertError(t, err, "Should have rejected a key of unknown type") test.AssertEquals(t, err.Error(), "unsupported key type struct {}") - - // Check for early rejection and that no error is seen from blockedKeys.blocked. - testingPolicyWithBlockedKeys := *testingPolicy - testingPolicyWithBlockedKeys.blockedList = &blockedKeys{} - err = testingPolicyWithBlockedKeys.GoodKey(context.Background(), notAKey) - test.AssertError(t, err, "Should have rejected a key of unknown type") - test.AssertEquals(t, err.Error(), "unsupported key type struct {}") } func TestNilKey(t *testing.T) { @@ -54,7 +45,7 @@ func TestSmallModulus(t *testing.T) { } err := testingPolicy.GoodKey(context.Background(), &pubKey) test.AssertError(t, err, "Should have rejected too-short key") - test.AssertEquals(t, err.Error(), "key too small: 2040") + test.AssertEquals(t, err.Error(), "key size not supported: 2040") } func TestLargeModulus(t *testing.T) { @@ -69,7 +60,7 @@ func TestLargeModulus(t *testing.T) { } err := testingPolicy.GoodKey(context.Background(), &pubKey) test.AssertError(t, err, "Should have rejected too-long key") - test.AssertEquals(t, err.Error(), "key too large: 4097 > 4096") + test.AssertEquals(t, err.Error(), "key size not supported: 4097") } func TestModulusModulo8(t *testing.T) { @@ -80,7 +71,7 @@ func TestModulusModulo8(t *testing.T) { } err := testingPolicy.GoodKey(context.Background(), &key) test.AssertError(t, err, "Should have rejected modulus with length not divisible by 8") - test.AssertEquals(t, err.Error(), "key length wasn't a multiple of 8: 2049") + test.AssertEquals(t, err.Error(), "key size not supported: 2049") } var mod2048 = big.NewInt(0).Sub(big.NewInt(0).Lsh(big.NewInt(1), 2048), big.NewInt(1)) @@ -149,12 +140,12 @@ func TestECDSABadCurve(t *testing.T) { var invalidCurves = []elliptic.Curve{ elliptic.P224(), - elliptic.P521(), } var validCurves = []elliptic.Curve{ elliptic.P256(), elliptic.P384(), + elliptic.P521(), } func TestECDSAGoodKey(t *testing.T) { @@ -260,25 +251,28 @@ func TestNonRefKey(t *testing.T) { } func TestDBBlocklistAccept(t *testing.T) { - testCheck := func(context.Context, *sapb.KeyBlockedRequest, ...grpc.CallOption) (*sapb.Exists, error) { - return &sapb.Exists{Exists: false}, nil + for _, testCheck := range []BlockedKeyCheckFunc{ + nil, + func(context.Context, []byte) (bool, error) { + return false, nil + }, + } { + policy, err := NewPolicy(nil, testCheck) + test.AssertNotError(t, err, "NewKeyPolicy failed") + + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "ecdsa.GenerateKey failed") + err = policy.GoodKey(context.Background(), k.Public()) + test.AssertNotError(t, err, "GoodKey failed with a non-blocked key") } - - policy, err := NewKeyPolicy(&Config{}, testCheck) - test.AssertNotError(t, err, "NewKeyPolicy failed") - - k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "ecdsa.GenerateKey failed") - err = policy.GoodKey(context.Background(), k.Public()) - test.AssertNotError(t, err, "GoodKey failed with a non-blocked key") } func TestDBBlocklistReject(t *testing.T) { - testCheck := func(context.Context, *sapb.KeyBlockedRequest, ...grpc.CallOption) (*sapb.Exists, error) { - return &sapb.Exists{Exists: true}, nil + testCheck := func(context.Context, []byte) (bool, error) { + return true, nil } - policy, err := NewKeyPolicy(&Config{}, testCheck) + policy, err := NewPolicy(nil, testCheck) test.AssertNotError(t, err, "NewKeyPolicy failed") k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) @@ -289,61 +283,130 @@ func TestDBBlocklistReject(t *testing.T) { test.AssertEquals(t, err.Error(), "public key is forbidden") } -func TestRSAStrangeSize(t *testing.T) { - err := features.Set(map[string]bool{"RestrictRSAKeySizes": true}) - test.AssertNotError(t, err, "failed to set features") - defer features.Reset() +func TestDefaultAllowedKeys(t *testing.T) { + policy, err := NewPolicy(nil, nil) + test.AssertNotError(t, err, "NewPolicy with nil config failed") + test.Assert(t, policy.allowedKeys.RSA2048, "RSA 2048 should be allowed") + test.Assert(t, policy.allowedKeys.RSA3072, "RSA 3072 should be allowed") + test.Assert(t, policy.allowedKeys.RSA4096, "RSA 4096 should be allowed") + test.Assert(t, policy.allowedKeys.ECDSAP256, "NIST P256 should be allowed") + test.Assert(t, policy.allowedKeys.ECDSAP384, "NIST P384 should be allowed") + test.Assert(t, !policy.allowedKeys.ECDSAP521, "NIST P521 should not be allowed") + + policy, err = NewPolicy(&Config{}, nil) + test.AssertNotError(t, err, "NewPolicy with nil config.AllowedKeys failed") + test.Assert(t, policy.allowedKeys.RSA2048, "RSA 2048 should be allowed") + test.Assert(t, policy.allowedKeys.RSA3072, "RSA 3072 should be allowed") + test.Assert(t, policy.allowedKeys.RSA4096, "RSA 4096 should be allowed") + test.Assert(t, policy.allowedKeys.ECDSAP256, "NIST P256 should be allowed") + test.Assert(t, policy.allowedKeys.ECDSAP384, "NIST P384 should be allowed") + test.Assert(t, !policy.allowedKeys.ECDSAP521, "NIST P521 should not be allowed") +} +func TestRSAStrangeSize(t *testing.T) { k := &rsa.PublicKey{N: big.NewInt(10)} - err = testingPolicy.GoodKey(context.Background(), k) + err := testingPolicy.GoodKey(context.Background(), k) test.AssertError(t, err, "expected GoodKey to fail") test.AssertEquals(t, err.Error(), "key size not supported: 4") } func TestCheckPrimeFactorsTooClose(t *testing.T) { - // The prime factors of 5959 are 59 and 101. The values a and b calculated - // by Fermat's method will be 80 and 21. The ceil of the square root of 5959 - // is 78. Therefore it takes 3 rounds of Fermat's method to find the factors. - n := big.NewInt(5959) - err := checkPrimeFactorsTooClose(n, 2) - test.AssertNotError(t, err, "factored n in too few iterations") - err = checkPrimeFactorsTooClose(n, 3) - test.AssertError(t, err, "failed to factor n") - test.AssertContains(t, err.Error(), "p: 101") - test.AssertContains(t, err.Error(), "q: 59") - - // These factors differ only in their second-to-last digit. They're so close - // that a single iteration of Fermat's method is sufficient to find them. - p, ok := new(big.Int).SetString("12451309173743450529024753538187635497858772172998414407116324997634262083672423797183640278969532658774374576700091736519352600717664126766443002156788367", 10) - test.Assert(t, ok, "failed to create large prime") - q, ok := new(big.Int).SetString("12451309173743450529024753538187635497858772172998414407116324997634262083672423797183640278969532658774374576700091736519352600717664126766443002156788337", 10) - test.Assert(t, ok, "failed to create large prime") - n = n.Mul(p, q) - err = checkPrimeFactorsTooClose(n, 0) - test.AssertNotError(t, err, "factored n in too few iterations") - err = checkPrimeFactorsTooClose(n, 1) - test.AssertError(t, err, "failed to factor n") - test.AssertContains(t, err.Error(), fmt.Sprintf("p: %s", p)) - test.AssertContains(t, err.Error(), fmt.Sprintf("q: %s", q)) - - // These factors differ by slightly more than 2^256. - p, ok = p.SetString("11779932606551869095289494662458707049283241949932278009554252037480401854504909149712949171865707598142483830639739537075502512627849249573564209082969463", 10) - test.Assert(t, ok, "failed to create large prime") - q, ok = q.SetString("11779932606551869095289494662458707049283241949932278009554252037480401854503793357623711855670284027157475142731886267090836872063809791989556295953329083", 10) - test.Assert(t, ok, "failed to create large prime") - n = n.Mul(p, q) - err = checkPrimeFactorsTooClose(n, 13) - test.AssertNotError(t, err, "factored n in too few iterations") - err = checkPrimeFactorsTooClose(n, 14) - test.AssertError(t, err, "failed to factor n") - test.AssertContains(t, err.Error(), fmt.Sprintf("p: %s", p)) - test.AssertContains(t, err.Error(), fmt.Sprintf("q: %s", q)) + type testCase struct { + name string + p string + q string + expectRounds int + } + + testCases := []testCase{ + { + // The factors 59 and 101 multiply to 5959. The values a and b calculated + // by Fermat's method will be 80 and 21. The ceil of the square root of + // 5959 is 78. Therefore it takes 3 rounds of Fermat's method to find the + // factors. + name: "tiny", + p: "101", + q: "59", + expectRounds: 3, + }, + { + // These factors differ only in their second-to-last digit. They're so close + // that a single iteration of Fermat's method is sufficient to find them. + name: "very close", + p: "12451309173743450529024753538187635497858772172998414407116324997634262083672423797183640278969532658774374576700091736519352600717664126766443002156788367", + q: "12451309173743450529024753538187635497858772172998414407116324997634262083672423797183640278969532658774374576700091736519352600717664126766443002156788337", + expectRounds: 1, + }, + { + // These factors differ by slightly more than 2^256, which takes fourteen + // rounds to factor. + name: "still too close", + p: "11779932606551869095289494662458707049283241949932278009554252037480401854504909149712949171865707598142483830639739537075502512627849249573564209082969463", + q: "11779932606551869095289494662458707049283241949932278009554252037480401854503793357623711855670284027157475142731886267090836872063809791989556295953329083", + expectRounds: 14, + }, + { + // These factors come from a real canon printer in the wild with a broken + // key generation mechanism. + name: "canon printer (2048 bit, 1 round)", + p: "155536235030272749691472293262418471207550926406427515178205576891522284497518443889075039382254334975506248481615035474816604875321501901699955105345417152355947783063521554077194367454070647740704883461064399268622437721385112646454393005862535727615809073410746393326688230040267160616554768771412289114449", + q: "155536235030272749691472293262418471207550926406427515178205576891522284497518443889075039382254334975506248481615035474816604875321501901699955105345417152355947783063521554077194367454070647740704883461064399268622437721385112646454393005862535727615809073410746393326688230040267160616554768771412289114113", + expectRounds: 1, + }, + { + // These factors come from a real innsbruck printer in the wild with a + // broken key generation mechanism. + name: "innsbruck printer (4096 bit, 1 round)", + p: "25868808535211632564072019392873831934145242707953960515208595626279836366691068618582894100813803673421320899654654938470888358089618966238341690624345530870988951109006149164192566967552401505863871260691612081236189439839963332690997129144163260418447718577834226720411404568398865166471102885763673744513186211985402019037772108416694793355840983833695882936201196462579254234744648546792097397517107797153785052856301942321429858537224127598198913168345965493941246097657533085617002572245972336841716321849601971924830462771411171570422802773095537171762650402420866468579928479284978914972383512240254605625661", + q: "25868808535211632564072019392873831934145242707953960515208595626279836366691068618582894100813803673421320899654654938470888358089618966238341690624345530870988951109006149164192566967552401505863871260691612081236189439839963332690997129144163260418447718577834226720411404568398865166471102885763673744513186211985402019037772108416694793355840983833695882936201196462579254234744648546792097397517107797153785052856301942321429858537224127598198913168345965493941246097657533085617002572245972336841716321849601971924830462771411171570422802773095537171762650402420866468579928479284978914972383512240254605624819", + expectRounds: 1, + }, + { + // FIPS requires that |p-q| > 2^(nlen/2 - 100). For example, a 2048-bit + // RSA key must have prime factors with a difference of at least 2^924. + // These two factors have a difference of exactly 2^924 + 4, just *barely* + // FIPS-compliant. Their first different digit is in column 52 of this + // file, which makes them vastly further apart than the cases above. Their + // product cannot be factored even with 100,000,000 rounds of Fermat's + // Algorithm. + name: "barely FIPS compliant (2048 bit)", + p: "151546560166767007654995655231369126386504564489055366370313539237722892921762327477057109592614214965864835328962951695621854530739049166771701397343693962526456985866167580660948398404000483264137738772983130282095332559392185543017295488346592188097443414824871619976114874896240350402349774470198190454623", + q: "151546560166767007654995655231510939369872272987323309037144546294925352276321214430320942815891873491060949332482502812040326472743233767963240491605860423063942576391584034077877871768428333113881339606298282107984376151546711223157061364850161576363709081794948857957944390170575452970542651659150041855843", + expectRounds: -1, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + p, ok := new(big.Int).SetString(tc.p, 10) + if !ok { + t.Fatalf("failed to load prime factor p (%s)", tc.p) + } + + q, ok := new(big.Int).SetString(tc.q, 10) + if !ok { + t.Fatalf("failed to load prime factor q (%s)", tc.q) + } + + n := new(big.Int).Mul(p, q) + err := checkPrimeFactorsTooClose(n, 100) + + if tc.expectRounds > 0 { + test.AssertError(t, err, "failed to factor n") + test.AssertContains(t, err.Error(), fmt.Sprintf("p: %s", tc.p)) + test.AssertContains(t, err.Error(), fmt.Sprintf("q: %s", tc.q)) + test.AssertContains(t, err.Error(), fmt.Sprintf("in %d rounds", tc.expectRounds)) + } else { + test.AssertNil(t, err, "factored the unfactorable") + } + }) + } } func benchFermat(rounds int, b *testing.B) { n := big.NewInt(0) n.SetString("801622717394169050106926578578301725055526605503706912100006286161529273473377413824975745384114446662904851914935980611269769546695796451504160869649117000521094368058953989236438103975426680952076533198797388295193391779933559668812684470909409457778161223896975426492372231040386646816154793996920467596916193680611886097694746368434138296683172992347929528214464827172059378866098534956467670429228681248968588692628197119606249988365750115578731538804653322115223303388019261933988266126675740797091559541980722545880793708750882230374320698192373040882555154628949384420712168289605526223733016176898368282023301917856921049583659644200174763940543991507836551835324807116188739389620816364505209568211448815747330488813651206715564392791134964121857454359816296832013457790067067190116393364546525054134704119475840526673114964766611499226043189928040037210929720682839683846078550615582181112536768195193557758454282232948765374797970874053642822355832904812487562117265271449547063765654262549173209805579494164339236981348054782533307762260970390747872669357067489756517340817289701322583209366268084923373164395703994945233187987667632964509271169622904359262117908604555420100186491963838567445541249128944592555657626247", 10) - for i := 0; i < b.N; i++ { + for b.Loop() { if checkPrimeFactorsTooClose(n, rounds) != nil { b.Fatal("factored the unfactorable!") } diff --git a/goodkey/sagoodkey/good_key.go b/goodkey/sagoodkey/good_key.go new file mode 100644 index 00000000000..a339b65f73e --- /dev/null +++ b/goodkey/sagoodkey/good_key.go @@ -0,0 +1,32 @@ +package sagoodkey + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/letsencrypt/boulder/goodkey" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey method to KeyPolicy, +// rather than storing a full sa.SQLStorageAuthority. This makes testing +// significantly simpler. +type BlockedKeyCheckFunc func(context.Context, *sapb.SPKIHash, ...grpc.CallOption) (*sapb.Exists, error) + +// NewPolicy returns a KeyPolicy that uses a sa.BlockedKey method. +// See goodkey.NewPolicy for more details about the policy itself. +func NewPolicy(config *goodkey.Config, bkc BlockedKeyCheckFunc) (goodkey.KeyPolicy, error) { + var genericCheck goodkey.BlockedKeyCheckFunc + if bkc != nil { + genericCheck = func(ctx context.Context, keyHash []byte) (bool, error) { + exists, err := bkc(ctx, &sapb.SPKIHash{KeyHash: keyHash}) + if err != nil { + return false, err + } + return exists.Exists, nil + } + } + + return goodkey.NewPolicy(config, genericCheck) +} diff --git a/goodkey/sagoodkey/good_key_test.go b/goodkey/sagoodkey/good_key_test.go new file mode 100644 index 00000000000..814804d3d16 --- /dev/null +++ b/goodkey/sagoodkey/good_key_test.go @@ -0,0 +1,48 @@ +package sagoodkey + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "testing" + + "google.golang.org/grpc" + + "github.com/letsencrypt/boulder/goodkey" + sapb "github.com/letsencrypt/boulder/sa/proto" + "github.com/letsencrypt/boulder/test" +) + +func TestDBBlocklistAccept(t *testing.T) { + for _, testCheck := range []BlockedKeyCheckFunc{ + nil, + func(context.Context, *sapb.SPKIHash, ...grpc.CallOption) (*sapb.Exists, error) { + return &sapb.Exists{Exists: false}, nil + }, + } { + policy, err := NewPolicy(&goodkey.Config{}, testCheck) + test.AssertNotError(t, err, "NewKeyPolicy failed") + + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "ecdsa.GenerateKey failed") + err = policy.GoodKey(context.Background(), k.Public()) + test.AssertNotError(t, err, "GoodKey failed with a non-blocked key") + } +} + +func TestDBBlocklistReject(t *testing.T) { + testCheck := func(context.Context, *sapb.SPKIHash, ...grpc.CallOption) (*sapb.Exists, error) { + return &sapb.Exists{Exists: true}, nil + } + + policy, err := NewPolicy(&goodkey.Config{}, testCheck) + test.AssertNotError(t, err, "NewKeyPolicy failed") + + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "ecdsa.GenerateKey failed") + err = policy.GoodKey(context.Background(), k.Public()) + test.AssertError(t, err, "GoodKey didn't fail with a blocked key") + test.AssertErrorIs(t, err, goodkey.ErrBadKey) + test.AssertEquals(t, err.Error(), "public key is forbidden") +} diff --git a/goodkey/weak.go b/goodkey/weak.go deleted file mode 100644 index 4a63af09a0a..00000000000 --- a/goodkey/weak.go +++ /dev/null @@ -1,66 +0,0 @@ -package goodkey - -// This file defines a basic method for testing if a given RSA public key is on one of -// the Debian weak key lists and is therefore considered compromised. Instead of -// directly loading the hash suffixes from the individual lists we flatten them all -// into a single JSON list using cmd/weak-key-flatten for ease of use. - -import ( - "crypto/rsa" - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "io/ioutil" -) - -type truncatedHash [10]byte - -type WeakRSAKeys struct { - suffixes map[truncatedHash]struct{} -} - -func LoadWeakRSASuffixes(path string) (*WeakRSAKeys, error) { - f, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - - var suffixList []string - err = json.Unmarshal(f, &suffixList) - if err != nil { - return nil, err - } - - wk := &WeakRSAKeys{suffixes: make(map[truncatedHash]struct{})} - for _, suffix := range suffixList { - err := wk.addSuffix(suffix) - if err != nil { - return nil, err - } - } - return wk, nil -} - -func (wk *WeakRSAKeys) addSuffix(str string) error { - var suffix truncatedHash - decoded, err := hex.DecodeString(str) - if err != nil { - return err - } - if len(decoded) != 10 { - return fmt.Errorf("unexpected suffix length of %d", len(decoded)) - } - copy(suffix[:], decoded) - wk.suffixes[suffix] = struct{}{} - return nil -} - -func (wk *WeakRSAKeys) Known(key *rsa.PublicKey) bool { - // Hash input is in the format "Modulus={upper-case hex of modulus}\n" - hash := sha1.Sum([]byte(fmt.Sprintf("Modulus=%X\n", key.N.Bytes()))) - var suffix truncatedHash - copy(suffix[:], hash[10:]) - _, present := wk.suffixes[suffix] - return present -} diff --git a/goodkey/weak_test.go b/goodkey/weak_test.go deleted file mode 100644 index 2f63a748afa..00000000000 --- a/goodkey/weak_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package goodkey - -import ( - "crypto/rsa" - "encoding/hex" - "io/ioutil" - "math/big" - "os" - "path/filepath" - "testing" - - "github.com/letsencrypt/boulder/test" -) - -func TestKnown(t *testing.T) { - modBytes, err := hex.DecodeString("D673252AF6723C3F72529403EAB7C30DEF3C52F97E799825F4A70191C616ADCF1ECE1113F1625971074C492C592025FDEADBDB146A081826BDF0D77C3C913DCF1B6F0B3B78F5108D2E493AD0EEE8CA5C021711ADC13D358E61133870FCD19C8E5C22403959782AA82E72AEE53A3D491E3912CE27B27E1A85EA69C19A527D28F7934C9823B7E56FDD657DAC83FDC65BB22A98D843DF73238919781B714C81A5E2AFEC71F5C54AA2A27C590AD94C03C1062D50EFCFFAC743E3C8A3AE056846A1D756EB862BF4224169D467C35215ADE0AFCC11E85FE629AFB802C4786FF2E9C929BCCF502B3D3B8876C6A11785CC398B389F1D86BDD9CB0BD4EC13956EC3FA270D") - test.AssertNotError(t, err, "Failed to decode modulus bytes") - mod := &big.Int{} - mod.SetBytes(modBytes) - testKey := rsa.PublicKey{N: mod} - otherKey := rsa.PublicKey{N: big.NewInt(2020)} - - wk := &WeakRSAKeys{suffixes: make(map[truncatedHash]struct{})} - err = wk.addSuffix("8df20e6961a16398b85a") - // a3853d0c563765e504c18df20e6961a16398b85a - test.AssertNotError(t, err, "WeakRSAKeys.addSuffix failed") - test.Assert(t, wk.Known(&testKey), "WeakRSAKeys.Known failed to find suffix that has been added") - test.Assert(t, !wk.Known(&otherKey), "WeakRSAKeys.Known found a suffix that has not been added") -} - -func TestLoadKeys(t *testing.T) { - modBytes, err := hex.DecodeString("D673252AF6723C3F72529403EAB7C30DEF3C52F97E799825F4A70191C616ADCF1ECE1113F1625971074C492C592025FDEADBDB146A081826BDF0D77C3C913DCF1B6F0B3B78F5108D2E493AD0EEE8CA5C021711ADC13D358E61133870FCD19C8E5C22403959782AA82E72AEE53A3D491E3912CE27B27E1A85EA69C19A527D28F7934C9823B7E56FDD657DAC83FDC65BB22A98D843DF73238919781B714C81A5E2AFEC71F5C54AA2A27C590AD94C03C1062D50EFCFFAC743E3C8A3AE056846A1D756EB862BF4224169D467C35215ADE0AFCC11E85FE629AFB802C4786FF2E9C929BCCF502B3D3B8876C6A11785CC398B389F1D86BDD9CB0BD4EC13956EC3FA270D") - test.AssertNotError(t, err, "Failed to decode modulus bytes") - mod := &big.Int{} - mod.SetBytes(modBytes) - testKey := rsa.PublicKey{N: mod} - tempDir := t.TempDir() - tempPath := filepath.Join(tempDir, "a.json") - err = ioutil.WriteFile(tempPath, []byte("[\"8df20e6961a16398b85a\"]"), os.ModePerm) - test.AssertNotError(t, err, "Failed to create temporary file") - - wk, err := LoadWeakRSASuffixes(tempPath) - test.AssertNotError(t, err, "Failed to load suffixes from directory") - test.Assert(t, wk.Known(&testKey), "WeakRSAKeys.Known failed to find suffix that has been added") -} diff --git a/grpc/client.go b/grpc/client.go index 1716728569c..87ff82f7995 100644 --- a/grpc/client.go +++ b/grpc/client.go @@ -3,60 +3,88 @@ package grpc import ( "crypto/tls" "errors" - "net" + "fmt" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/honeycombio/beeline-go/wrappers/hnygrpc" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus" "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/cmd" - bcreds "github.com/letsencrypt/boulder/grpc/creds" "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" - // Import for its init function, which causes clients to rely on the - // Health Service for load-balancing. + "github.com/letsencrypt/boulder/cmd" + bcreds "github.com/letsencrypt/boulder/grpc/creds" + + // 'grpc/internal/resolver/dns' is imported for its init function, which + // registers the SRV resolver. + "google.golang.org/grpc/balancer/roundrobin" + + // 'grpc/health' is imported for its init function, which causes clients to + // rely on the Health Service for load-balancing as long as a + // "healthCheckConfig" is specified in the gRPC service config. _ "google.golang.org/grpc/health" + + _ "github.com/letsencrypt/boulder/grpc/internal/resolver/dns" ) // ClientSetup creates a gRPC TransportCredentials that presents -// a client certificate and validates the the server certificate based +// a client certificate and validates the server certificate based // on the provided *tls.Config. // It dials the remote service and returns a grpc.ClientConn if successful. -func ClientSetup(c *cmd.GRPCClientConfig, tlsConfig *tls.Config, metrics clientMetrics, clk clock.Clock, interceptors ...grpc.UnaryClientInterceptor) (*grpc.ClientConn, error) { +func ClientSetup(c *cmd.GRPCClientConfig, tlsConfig *tls.Config, statsRegistry prometheus.Registerer, clk clock.Clock) (*grpc.ClientConn, error) { if c == nil { - return nil, errors.New("nil gRPC client config provided. JSON config is probably missing a fooService section.") - } - if c.ServerAddress == "" { - return nil, errors.New("ServerAddress must not be empty") + return nil, errors.New("nil gRPC client config provided: JSON config is probably missing a fooService section") } if tlsConfig == nil { return nil, errNilTLS } - ci := clientInterceptor{c.Timeout.Duration, metrics, clk} - allInterceptors := []grpc.UnaryClientInterceptor{ - ci.intercept, - ci.metrics.grpcMetrics.UnaryClientInterceptor(), - hnygrpc.UnaryClientInterceptor(), + metrics, err := newClientMetrics(statsRegistry) + if err != nil { + return nil, err + } + + cmi := clientMetadataInterceptor{c.Timeout.Duration, metrics, clk, !c.NoWaitForReady} + + unaryInterceptors := []grpc.UnaryClientInterceptor{ + cmi.Unary, + cmi.metrics.grpcMetrics.UnaryClientInterceptor(), } - allInterceptors = append(interceptors, allInterceptors...) - host, _, err := net.SplitHostPort(c.ServerAddress) + + streamInterceptors := []grpc.StreamClientInterceptor{ + cmi.Stream, + cmi.metrics.grpcMetrics.StreamClientInterceptor(), + } + + target, hostOverride, err := c.MakeTargetAndHostOverride() if err != nil { return nil, err } - creds := bcreds.NewClientCredentials(tlsConfig.RootCAs, tlsConfig.Certificates, host) - return grpc.Dial( - "dns:///"+c.ServerAddress, - grpc.WithBalancerName("round_robin"), + + creds := bcreds.NewClientCredentials(tlsConfig.RootCAs, tlsConfig.Certificates, hostOverride) + return grpc.NewClient( + target, + grpc.WithDefaultServiceConfig( + fmt.Sprintf( + // By setting the service name to an empty string in + // healthCheckConfig, we're instructing the gRPC client to query + // the overall health status of each server. The grpc-go health + // server, as constructed by health.NewServer(), unconditionally + // sets the overall service (e.g. "") status to SERVING. If a + // specific service name were set, the server would need to + // explicitly transition that service to SERVING; otherwise, + // clients would receive a NOT_FOUND status and the connection + // would be marked as unhealthy (TRANSIENT_FAILURE). + `{"healthCheckConfig": {"serviceName": ""},"loadBalancingConfig": [{"%s":{}}]}`, + roundrobin.Name, + ), + ), grpc.WithTransportCredentials(creds), - grpc.WithChainUnaryInterceptor(allInterceptors...), + grpc.WithChainUnaryInterceptor(unaryInterceptors...), + grpc.WithChainStreamInterceptor(streamInterceptors...), + grpc.WithStatsHandler(otelgrpc.NewClientHandler()), ) } -type registry interface { - MustRegister(...prometheus.Collector) -} - // clientMetrics is a struct type used to return registered metrics from // `NewClientMetrics` type clientMetrics struct { @@ -66,24 +94,43 @@ type clientMetrics struct { inFlightRPCs *prometheus.GaugeVec } -// NewClientMetrics constructs a *grpc_prometheus.ClientMetrics, registered with +// newClientMetrics constructs a *grpc_prometheus.ClientMetrics, registered with // the given registry, with timing histogram enabled. It must be called a // maximum of once per registry, or there will be conflicting names. -func NewClientMetrics(stats registry) clientMetrics { +func newClientMetrics(stats prometheus.Registerer) (clientMetrics, error) { // Create the grpc prometheus client metrics instance and register it - grpcMetrics := grpc_prometheus.NewClientMetrics() - grpcMetrics.EnableClientHandlingTimeHistogram() - stats.MustRegister(grpcMetrics) + grpcMetrics := grpc_prometheus.NewClientMetrics( + grpc_prometheus.WithClientHandlingTimeHistogram( + grpc_prometheus.WithHistogramBuckets([]float64{.01, .025, .05, .1, .5, 1, 2.5, 5, 10, 45, 90}), + ), + ) + err := stats.Register(grpcMetrics) + if err != nil { + are := prometheus.AlreadyRegisteredError{} + if errors.As(err, &are) { + grpcMetrics = are.ExistingCollector.(*grpc_prometheus.ClientMetrics) + } else { + return clientMetrics{}, err + } + } // Create a gauge to track in-flight RPCs and register it. inFlightGauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "grpc_in_flight", Help: "Number of in-flight (sent, not yet completed) RPCs", }, []string{"method", "service"}) - stats.MustRegister(inFlightGauge) + err = stats.Register(inFlightGauge) + if err != nil { + are := prometheus.AlreadyRegisteredError{} + if errors.As(err, &are) { + inFlightGauge = are.ExistingCollector.(*prometheus.GaugeVec) + } else { + return clientMetrics{}, err + } + } return clientMetrics{ grpcMetrics: grpcMetrics, inFlightRPCs: inFlightGauge, - } + }, nil } diff --git a/grpc/client_test.go b/grpc/client_test.go new file mode 100644 index 00000000000..65dc285d24f --- /dev/null +++ b/grpc/client_test.go @@ -0,0 +1,39 @@ +package grpc + +import ( + "crypto/tls" + "testing" + + "github.com/jmhodges/clock" + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/metrics" + "github.com/letsencrypt/boulder/test" + _ "google.golang.org/grpc/health" +) + +func TestClientSetup(t *testing.T) { + tests := []struct { + name string + cfg *cmd.GRPCClientConfig + expectTarget string + wantErr bool + }{ + {"valid, address provided", &cmd.GRPCClientConfig{ServerAddress: "localhost:8080"}, "dns:///localhost:8080", false}, + {"valid, implicit localhost with port provided", &cmd.GRPCClientConfig{ServerAddress: ":8080"}, "dns:///:8080", false}, + {"valid, IPv6 address provided", &cmd.GRPCClientConfig{ServerAddress: "[::1]:8080"}, "dns:///[::1]:8080", false}, + {"invalid, no address or addresses provided", &cmd.GRPCClientConfig{}, "", true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client, err := ClientSetup(tt.cfg, &tls.Config{}, metrics.NoopRegisterer, clock.NewFake()) + if tt.wantErr { + test.AssertError(t, err, "expected error, got nil") + } else { + test.AssertNotError(t, err, "unexpected error") + } + if tt.expectTarget != "" { + test.AssertEquals(t, client.Target(), tt.expectTarget) + } + }) + } +} diff --git a/grpc/creds/creds.go b/grpc/creds/creds.go index b1d775f0c38..31da6e234ba 100644 --- a/grpc/creds/creds.go +++ b/grpc/creds/creds.go @@ -86,10 +86,7 @@ func (tc *clientTransportCredentials) ServerHandshake(rawConn net.Conn) (net.Con // Info returns information about the transport protocol used func (tc *clientTransportCredentials) Info() credentials.ProtocolInfo { - return credentials.ProtocolInfo{ - SecurityProtocol: "tls", - SecurityVersion: "1.2", // We *only* support TLS 1.2 - } + return credentials.ProtocolInfo{SecurityProtocol: "tls"} } // GetRequestMetadata returns nil, nil since TLS credentials do not have metadata. @@ -217,10 +214,7 @@ func (tc *serverTransportCredentials) ClientHandshake(ctx context.Context, addr // Info provides the ProtocolInfo of this TransportCredentials. func (tc *serverTransportCredentials) Info() credentials.ProtocolInfo { - return credentials.ProtocolInfo{ - SecurityProtocol: "tls", - SecurityVersion: "1.2", // We *only* support TLS 1.2 - } + return credentials.ProtocolInfo{SecurityProtocol: "tls"} } // GetRequestMetadata returns nil, nil since TLS credentials do not have metadata. diff --git a/grpc/creds/creds_test.go b/grpc/creds/creds_test.go index 486f56e8ed1..0cbf92b6152 100644 --- a/grpc/creds/creds_test.go +++ b/grpc/creds/creds_test.go @@ -2,8 +2,9 @@ package creds import ( "context" + "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" - "crypto/rsa" "crypto/tls" "crypto/x509" "math/big" @@ -12,59 +13,58 @@ import ( "testing" "time" - "github.com/letsencrypt/boulder/core" + "github.com/jmhodges/clock" + "github.com/letsencrypt/boulder/test" ) func TestServerTransportCredentials(t *testing.T) { + _, badCert := test.ThrowAwayCert(t, clock.New()) + goodCert := &x509.Certificate{ + DNSNames: []string{"creds-test"}, + IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1)}, + } acceptedSANs := map[string]struct{}{ - "boulder-client": {}, + "creds-test": {}, } - certFile := "testdata/boulder-client/cert.pem" - badCertFile := "testdata/example.com/cert.pem" - goodCert, err := core.LoadCert(certFile) - test.AssertNotError(t, err, "core.LoadCert failed on "+certFile) - badCert, err := core.LoadCert(badCertFile) - test.AssertNotError(t, err, "core.LoadCert failed on "+badCertFile) servTLSConfig := &tls.Config{} // NewServerCredentials with a nil serverTLSConfig should return an error - _, err = NewServerCredentials(nil, acceptedSANs) + _, err := NewServerCredentials(nil, acceptedSANs) test.AssertEquals(t, err, ErrNilServerConfig) - // A creds with a empty acceptedSANs list should consider any peer valid + // A creds with a nil acceptedSANs list should consider any peer valid wrappedCreds, err := NewServerCredentials(servTLSConfig, nil) test.AssertNotError(t, err, "NewServerCredentials failed with nil acceptedSANs") bcreds := wrappedCreds.(*serverTransportCredentials) - emptyState := tls.ConnectionState{} - err = bcreds.validateClient(emptyState) + err = bcreds.validateClient(tls.ConnectionState{}) test.AssertNotError(t, err, "validateClient() errored for emptyState") + + // A creds with a empty acceptedSANs list should consider any peer valid wrappedCreds, err = NewServerCredentials(servTLSConfig, map[string]struct{}{}) test.AssertNotError(t, err, "NewServerCredentials failed with empty acceptedSANs") bcreds = wrappedCreds.(*serverTransportCredentials) - err = bcreds.validateClient(emptyState) + err = bcreds.validateClient(tls.ConnectionState{}) test.AssertNotError(t, err, "validateClient() errored for emptyState") - // A creds given an empty TLS ConnectionState to verify should return an error + // A properly-initialized creds should fail to verify an empty ConnectionState bcreds = &serverTransportCredentials{servTLSConfig, acceptedSANs} - err = bcreds.validateClient(emptyState) + err = bcreds.validateClient(tls.ConnectionState{}) test.AssertEquals(t, err, ErrEmptyPeerCerts) // A creds should reject peers that don't have a leaf certificate with // a SAN on the accepted list. - wrongState := tls.ConnectionState{ + err = bcreds.validateClient(tls.ConnectionState{ PeerCertificates: []*x509.Certificate{badCert}, - } - err = bcreds.validateClient(wrongState) + }) var errSANNotAccepted ErrSANNotAccepted test.AssertErrorWraps(t, err, &errSANNotAccepted) // A creds should accept peers that have a leaf certificate with a SAN // that is on the accepted list - rightState := tls.ConnectionState{ + err = bcreds.validateClient(tls.ConnectionState{ PeerCertificates: []*x509.Certificate{goodCert}, - } - err = bcreds.validateClient(rightState) + }) test.AssertNotError(t, err, "validateClient(rightState) failed") // A creds configured with an IP SAN in the accepted list should accept a peer @@ -74,13 +74,15 @@ func TestServerTransportCredentials(t *testing.T) { "127.0.0.1": {}, } bcreds = &serverTransportCredentials{servTLSConfig, acceptedIPSans} - err = bcreds.validateClient(rightState) + err = bcreds.validateClient(tls.ConnectionState{ + PeerCertificates: []*x509.Certificate{goodCert}, + }) test.AssertNotError(t, err, "validateClient(rightState) failed with an IP accepted SAN list") } func TestClientTransportCredentials(t *testing.T) { - priv, err := rsa.GenerateKey(rand.Reader, 1024) - test.AssertNotError(t, err, "rsa.GenerateKey failed") + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "failed to generate test key") temp := &x509.Certificate{ SerialNumber: big.NewInt(1), diff --git a/grpc/creds/testdata/boulder-client/cert.pem b/grpc/creds/testdata/boulder-client/cert.pem deleted file mode 100644 index 12785d04747..00000000000 --- a/grpc/creds/testdata/boulder-client/cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDHTCCAgWgAwIBAgIIFYDku7cu18wwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE -AxMVbWluaWNhIHJvb3QgY2EgNDk2YzRkMCAXDTE2MTIyNjE5MTE1N1oYDzIxMDYx -MjI2MTkxMTU3WjAZMRcwFQYDVQQDEw5ib3VsZGVyLWNsaWVudDCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAN7YTImYkKMOVHk/pc/nSbvL6rUiwr4hvKSN -nTvr1ZP4+k5EJ/rhWHYyItj3Mp9mxpI3MQvb5MUdgXmybeYFZyxSdD0xgD2EpVL2 -yZ/MYX3VKaYxSPqvJuBTGj6Hb1HLJN/4j6m7FUYcq2RuM3STaBVBlAaOY/p9/UKD -N+WHaeXItFHZsn/MLf9tbAWp8lGpBZgdOqZhHimQ9nj16xJC9fTarxgKziuJAhxJ -q90RHEJVXUu8lgKXftQg6c7he4yyR1wRFxYXjOsc//GaHeKRxINWwYQJdMgwB0+C -KG4hWQY+5NpFCAKZS5dBqCOumQETv1+EZE3X7ELL1KxF01QUPx8CAwEAAaNgMF4w -DgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAM -BgNVHRMBAf8EAjAAMB8GA1UdEQQYMBaCDmJvdWxkZXItY2xpZW50hwR/AAABMA0G -CSqGSIb3DQEBCwUAA4IBAQA0kIJ4jK4mSSh0zfsPYJ4IMJPnEUSmVH1F8ftX6B8C -BHY99REegwZT2lZqw6HU95jnycYrLplRAPLErMMnBadEdoI3Ir1we0HxWnZ/OLYl -O7M8nPSVucgyYvvwY+bENeoF0fBSCt9VJnPYaqTnrjbaAU3nX4e+FgFEkWIiFVtk -sQJCqRcFsE0u+hPMAuhhzPC2PcXf7UO3t7pDBuMfPfcHUe4QqU6VBg5VlVQBguwL -fvAiVtDI55RxwAWP9q4cKY/S//L09ukHdipcBLeh+SHkY5B86UIBL55MG1iycZcE -KnCWVZ6w0hYMX7uSxaXAjoYj2eZZJ5s98dBWwPpnBK5K ------END CERTIFICATE----- diff --git a/grpc/creds/testdata/boulder-client/key.pem b/grpc/creds/testdata/boulder-client/key.pem deleted file mode 100644 index bd0f49aa160..00000000000 --- a/grpc/creds/testdata/boulder-client/key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA3thMiZiQow5UeT+lz+dJu8vqtSLCviG8pI2dO+vVk/j6TkQn -+uFYdjIi2Pcyn2bGkjcxC9vkxR2BebJt5gVnLFJ0PTGAPYSlUvbJn8xhfdUppjFI -+q8m4FMaPodvUcsk3/iPqbsVRhyrZG4zdJNoFUGUBo5j+n39QoM35Ydp5ci0Udmy -f8wt/21sBanyUakFmB06pmEeKZD2ePXrEkL19NqvGArOK4kCHEmr3REcQlVdS7yW -Apd+1CDpzuF7jLJHXBEXFheM6xz/8Zod4pHEg1bBhAl0yDAHT4IobiFZBj7k2kUI -AplLl0GoI66ZARO/X4RkTdfsQsvUrEXTVBQ/HwIDAQABAoIBAD7kTLWImUlrgeu2 -StWiMO0PX+4/ITcHznWigf2nUV+xpPLxyVbEwnKZM7yIB4F1QBWD/hOau7Xl3eS6 -tsl0POoFW0jp74dtKDuB6uVuiRU6U8bTogUd6Zrl2UJMdt5etsl/HQyKADcJYFW3 -xE3iTD2zhFNVXGIVrKRY5C2jSf/PXhn/VKxIY6fmWtTSBfLLJozL5DD77nu0r7cw -+95c27QDVw4NT3BFvUmq+KU4nTf5BSFmKzx/QGKSQr8SKQxxP18dbr3loMSUcXX3 -DV6rfAy1S4HAAQf0rlVOaCdeHyV7cbNu6RreWEA1ool6J1rmXn3DoF2BZQ9KyqYR -fsQzwAECgYEA+s9ZUeWV68eFQbHogKCWKyI9UP8EXtpsUsWjf8IIsHj6leuRmZO/ -xcn1dzzgLyQnc4sJfo5nM250anjVfG4sVxJ9xVM3n+h3uvP+32EYEBiOHqDJjJEJ -dgRRQVjaiqzZ9sSLqeFiUDiz9vBzCfRLnmO1b+YQmzuKg4U+OkttvZ8CgYEA43TO -kZqyDUl8Du8JtdUnwxkNTAWzHxe8ZfjZPeysn5py/XkJL0+9+awf6RuF7jePZIPd -b44sy183DbZPph1F/x6oYxlnB+Ln1+Q+MAg3yOYdVtTq1J4MIKg/HF+ZGLUmmitr -BWm/k05qqiA7xMsKqANO+wiHgKGcMlvJ94P3DoECgYA8JSUiQpAlLn+CNYuv7NKW -HWve1IjTxkVbxBFlPb6fsyNEeG0+CvReFgRLR8nrBByB1MG+G6Ab8Uzxi3CGmlT/ -vGt0unQIYNL1fjWMk9gVZA4QwjYTA2TlbDugFBK0VqceShvD7UyKOE7P1PGBXZgU -r4x+/QjWcPzSdU1q5VO3uQKBgF9mvr2ThNrN15rr58a8gNDz0P/x5A9GhV6/4V53 -xm+q+FAYVCHj8GqoJzo+sQ4Zf6tJI7eGioNCjHTT4tRpstIuqbHnFa/ZHCURNoQu -7jb1TZj99QD4yV1pNA9SiYQzdt1zGJBMqm2+lj6FBoykHPQbbyUbjroFDt6QYUXP -s/ABAoGBAMT+vFAkt4GNNxFeQN3+fpZHuiuFc6mE0dKMPsLn7BK+pCv0GvyakpkJ -/f6uBAXntzTRY4ER+tCxHf7AE0gbMaSwkhgW4fOv6Co3ObQio3bZWMIQK2zS74np -52bVHfXi3bX26CIT3b5o1rw6pebDYwbXex0j1uexyhx30xi15v2I ------END RSA PRIVATE KEY----- diff --git a/grpc/creds/testdata/example.com/cert.pem b/grpc/creds/testdata/example.com/cert.pem deleted file mode 100644 index e991ca1b4af..00000000000 --- a/grpc/creds/testdata/example.com/cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDETCCAfmgAwIBAgIITp8UbMgujuEwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE -AxMVbWluaWNhIHJvb3QgY2EgNDk2YzRkMCAXDTE2MTIyNjE5MTEyOFoYDzIxMDYx -MjI2MTkxMTI4WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAL18+TWZsdGOxfObbuHQ8mOSXvc6+gtVHN9lSFOt -x7JiM2OZhQFOlYPDox/KqQX0tlyfYZ808NZcwWConQL+Atme8AKy0pahqI99WChh -li9ehbbbTGoWa8NxWbkqGDgD3waQ8YFZbWXosiK+dt4cAbNpAdX1yByQts/GUKW0 -PYyqwoOvjE5tBXBzrIL6PVxmGz5ALjq8GMl3HTyZXO5AfBuomNRYYkEV6zx/TOTq -PhO7flLnMVauv0aJbsaD+ZpPF2Zi/fw/4q2nolag+oA1f55mHxjN39ocLHa++CJA -ft4LRK/75QVaYKICn4r13DiCvGI44ltv+lmwSPZ311lvIF8CAwEAAaNXMFUwDgYD -VR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV -HRMBAf8EAjAAMBYGA1UdEQQPMA2CC2V4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUA -A4IBAQAp/W32B/Pnm1oZXSVWTSN6ztSWjgiB3du1ryPe5VSPBmYZU1hHvORBfjuH -5JI9mHioW+0aoiDuABgpIXf5hMfXljyJXN+vO70C5PStUnFmHTtGADw62vRxhVVU -PLKtSAph8QpMTEUe+skV5RZ525aqHH54GSrSm7EdkIrgrkuGQhOViZ6QEqew29I3 -UK6cNe3w4d0XTzwPej4TNDGwumwWf/TEopp/kdOsFn93aZh/C/uTuI8gyqI9HiO0 -uQCwsePBr0G0w+vns38oC9jgyu6S3bOnq8XBzLjWgJ2lL//0g7bqvc5Wi1ClJnNS -OW48oQi9pw/ceqkYaMjCc0M5M0ix ------END CERTIFICATE----- diff --git a/grpc/creds/testdata/example.com/key.pem b/grpc/creds/testdata/example.com/key.pem deleted file mode 100644 index 295a72f3b37..00000000000 --- a/grpc/creds/testdata/example.com/key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAvXz5NZmx0Y7F85tu4dDyY5Je9zr6C1Uc32VIU63HsmIzY5mF -AU6Vg8OjH8qpBfS2XJ9hnzTw1lzBYKidAv4C2Z7wArLSlqGoj31YKGGWL16FtttM -ahZrw3FZuSoYOAPfBpDxgVltZeiyIr523hwBs2kB1fXIHJC2z8ZQpbQ9jKrCg6+M -Tm0FcHOsgvo9XGYbPkAuOrwYyXcdPJlc7kB8G6iY1FhiQRXrPH9M5Oo+E7t+Uucx -Vq6/RoluxoP5mk8XZmL9/D/iraeiVqD6gDV/nmYfGM3f2hwsdr74IkB+3gtEr/vl -BVpgogKfivXcOIK8YjjiW2/6WbBI9nfXWW8gXwIDAQABAoIBAQCrLixgXMGEQ8vW -YBOSktV2WHPMOw5KkJBtzCzD05k1MHumPbknThvKFkHWZZm+VK0uDZn+XrA3p0HX -FVwKqPhgKrI+bdfK1q3VOvIaQNaRYn2/jGuC51BhFpRsr3eDmxOu9eAG74fh6Y6L -zq7JxllO/8z1wn0OOTm9iDWxDJwR51+tq/BSJhj681QPTOYmMxeHVxlXbZWs3JH3 -2md/s3M2ZKuyS/i6B4d2wijxMbZsbmX2gYC/N+i/DfLyfwh1+/6BvTZIsW5e1LRQ -kcIltZxlCT/PQw/rQjgDZROujlpiuYc2jaedn5JRDYNu+tnITi5oPswXezMH7QQs -PpQCcQfpAoGBAN53rCeLOyenihR35L5J/pqgMTwvGywEiNzVLqv9KUxyhZZvexIj -n5nQhRBIWD+2LpM1wmkMwb0xJT9PKbZgtaxYoledkFbWC+n7F6VqG/jb8ZUlkYdD -6QVUqAOIiuQLKJTzKStDQlAJXhGF5eItI+yAnL7utUsliLPbh1zUrLXTAoGBANoM -u5F/bqXOf2kQqXx7PfIuFRmQau97l0e7M1R7agvsgSnFvoa47Lkkx+KztZO+n8YD -wpEe3otuEYQAhG4WnLcZsBkAtKlGNv9JXwYOKFttKHSEtQ2LA10AsgILknJpZggE -/rMVyam+bjwusTfb610S8gYSjl7IKMIU+S+aAdfFAoGBAMgm3VF6l882kimWMMvv -YM0XQRTHwOeacNRWTLZaf9SS2JOfWxfXyxklHQKoRBWWQFMbs/y1iH1CASPzgjDe -07TqzayMSzeFPpTV3tFpJR+CKtkoQsVzGOw93SfIqkU/sNRJ7YlJ6xh9RQ/46vnR -6Rc4I045EA07CMHgyemAQp8XAoGAbIYtzKqp/WgbTcV3NXd5S1HYOpMARhUzJAZt -87xA+ZJKbun2e8MKPtOpkJF07AXSK5Gvgt7kUG0F1rcTMl+avB7S4H7Ta/SAZuqz -mqXtPCPGIMfz/LuVfvJbplzwFHWUzKT/x04uwob/AoESvwR7ziUhxBf0OARTFNWv -eBukkykCgYAuJ9jYMXVXae4phx0SgUNR40y7TA/TWbK2QgVGhWoGLlOOD3eqlxRS -xjV5ZcOy5XcCsL5tyN5IhTRUdCWF0l/v9EfvY0Zib7BWZk/dFcmLba2w2YW4cWD4 -WI5hndU1a8engsQ9C7PQPzU9GiRbcnwU8n1pGAE5Aa8u7b3WCFi2ag== ------END RSA PRIVATE KEY----- diff --git a/grpc/creds/testdata/minica-key.pem b/grpc/creds/testdata/minica-key.pem deleted file mode 100644 index 7b6b3138660..00000000000 --- a/grpc/creds/testdata/minica-key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA2DOzYwShG632UEuo2117zegIEsILQOLlk4mC804862wqpUr4 -ABVaetU43np6Noh5jm5R4uzOIKhLe+tc27uUeArSVWHmxw7guidax5H63es7tILK -J/hW+pCPOtBWJbGgyT/LfSzd4XMnM+QOs24buBtlRgckiRa+ddo2zSKw9UOCmOa2 -VyTYLc9CAGFLJWYEfTWTiS3vdD2xbaONvqkURWmUfqrwUvrTWYx5RwcIc/s7sa1g -jrcj1H+vqgDlcJHgrDfRGvh7y7b+68wdhvwTPctosnIqkJ3VjHKf0jir8ui3iH2P -VFopkIY8KIM7x5wgdlXE8IiZ0HG1a/4fjAdiqQIDAQABAoIBACn+0Od47Lptrhym -taP2oqe6XGcYXgs6h5GC8nbESb0JA7ebulR50Vtus18zty2EkOgDsdA8Y0Wgqxjk -/OKJqysuHvEK1ULxNuGx50ZAqtQUb0J+7TmaWXrOPaQU98bWm/67b5Fe97JlJcZE -lD5yMtwuJhvWI1ilnFVUD7UE+g6JtO5JJ+xb1iuWi6F6Rv+4Ud1nVTXALSvmZs6T -ufVZ+VfzSfAFm1zwIfC+rhvEG2TmTUDbwKrKccBDBuE9EeUseyLuMrgOAVPTpZkx -xdfHJf5pca5w1ATzuLkY1ucxFTZKEImQRdCB11tWEK8xinmuvxIY70PFGK1p502E -2PH9TAECgYEA3NrbvAxtyoOhluMn9Gj3sD54Bk08u356JmQfCUskPqxI5Mhxrb5H -1s15+0gLHnAydDEKS8afBKJTfDO1A+spFtAd2EIpMYjxBmdMTm1yiMlgxyTbyD/S -2YWt669u27GDsL2X0UeM6ZWr66SR7qv8XyrxG3vFwAT2RhHQl85dVsECgYEA+ptK -hWHYqqehUQVgGextVPl/cJNkiQYPvua3dx3kV8BeoJpcQ4kBW4dvOOOhUMGrNu27 -ySg83ztoWNx/DuJj6mjYLeRWmjiy4LsM1ulVdelETLD4gvA64gcjn2wHdmqswyZr -+AgQDtj/zJGSlwHwa89lHf/Vn94sOju6c50zrekCgYACXnqi7sMyu+y80Jz6GzIk -+taQwr1XCYlH5iULFXpDDdgOVDQb+AGPRU78qg4semmgI3KH046bHx6kI23ap+gd -7YeUbUlB1cU4G12PLc9ftkFKgZO19zFM0glGkPAwfRv7hl0dipXxX4Cjm3VRSRkA -8FX7xH0fjS4THFfPeRA5QQKBgQDLIBPHCQgVSsn2YeUDHh+AQLqLBOz4I3GfymHq -qs/qUIJiqrseAm6XQa9k7RxT2KZuX0NWSMcOKZjqbc6OwfE6jsCqcenAFzTl8rb1 -eytJ50j04HyNpSoVCrRVA7sIwG5Hv1zELcTWRA0SQuwOegPlXrUXG8aLTgmLKd52 -13SMAQKBgQCSlG65tZpBC8l34keR5fphQWwbVg3x6pPcsWiVe+UjKNgk7ZCGS0vX -tk9ybTaUanps9YVtQfqpp2DtuH/nldnWn0tVe8WWe4Z+MdIuINem/xmWg4zrQGIr -MOvH4660E0D0cNR6aPASL2SJjJrBjKEgOuf4z+wgaVXbmhe1u9Jz5A== ------END RSA PRIVATE KEY----- diff --git a/grpc/creds/testdata/minica.pem b/grpc/creds/testdata/minica.pem deleted file mode 100644 index c1c32eccc6c..00000000000 --- a/grpc/creds/testdata/minica.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDCTCCAfGgAwIBAgIISWxN2VYSiYswDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE -AxMVbWluaWNhIHJvb3QgY2EgNDk2YzRkMCAXDTE2MTIyNjE5MTEyNFoYDzIxMTYx -MjI2MTkxMTI0WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSA0OTZjNGQwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDYM7NjBKEbrfZQS6jbXXvN6AgS -wgtA4uWTiYLzTjzrbCqlSvgAFVp61Tjeeno2iHmOblHi7M4gqEt761zbu5R4CtJV -YebHDuC6J1rHkfrd6zu0gson+Fb6kI860FYlsaDJP8t9LN3hcycz5A6zbhu4G2VG -BySJFr512jbNIrD1Q4KY5rZXJNgtz0IAYUslZgR9NZOJLe90PbFto42+qRRFaZR+ -qvBS+tNZjHlHBwhz+zuxrWCOtyPUf6+qAOVwkeCsN9Ea+HvLtv7rzB2G/BM9y2iy -ciqQndWMcp/SOKvy6LeIfY9UWimQhjwogzvHnCB2VcTwiJnQcbVr/h+MB2KpAgMB -AAGjRTBDMA4GA1UdDwEB/wQEAwIChDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB -BQUHAwIwEgYDVR0TAQH/BAgwBgEB/wIBADANBgkqhkiG9w0BAQsFAAOCAQEAbO8H -2Lt9jbQ8F4bEwSfC/D9FbF5VF5OnnDDFox0GoGSNvRqGs6vBFcSTapVsYeSWcUOZ -IK/RbLGpWmj8D3euoERbx32WCPY/ZOB22Jw/g7S9Fg5A/xO0RxbREpRnFMwb5EYH -G/WvE92EPovKHahaYunWVrMn0wOsffJUOm9zRdrAwA84Igipx5x5SqzrHWcAReYa -T337r655ziz5Sr9Pq2Maomy/Yu4rduNieuryi55gZ/hg9kYCr6B4K1hCQFhikueJ -OrNtzWrOjV5vmH2iwidFctRcFKriuZS60FIOY8mzgIe9wuCGWB4tdK73dbAwBJbM -zo7+nn4osye9ua3gCg== ------END CERTIFICATE----- diff --git a/grpc/errors.go b/grpc/errors.go index 3d3e83836a7..7f9aabbb6cf 100644 --- a/grpc/errors.go +++ b/grpc/errors.go @@ -6,9 +6,9 @@ import ( "errors" "fmt" "strconv" + "time" "google.golang.org/grpc" - "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" @@ -20,12 +20,13 @@ import ( // context. errors.BoulderError error types are encoded using the grpc/metadata // in the context.Context for the RPC which is considered to be the 'proper' // method of encoding custom error types (grpc/grpc#4543 and grpc/grpc-go#478) -func wrapError(ctx context.Context, err error) error { - if err == nil { +func wrapError(ctx context.Context, appErr error) error { + if appErr == nil { return nil } + var berr *berrors.BoulderError - if errors.As(err, &berr) { + if errors.As(appErr, &berr) { pairs := []string{ "errortype", strconv.Itoa(int(berr.Type)), } @@ -38,20 +39,26 @@ func wrapError(ctx context.Context, err error) error { jsonSubErrs, err := json.Marshal(berr.SubErrors) if err != nil { return berrors.InternalServerError( - "error marshaling json SubErrors, orig error %q", - err) + "error marshaling json SubErrors, orig error %q", err) } - pairs = append(pairs, "suberrors") - pairs = append(pairs, string(jsonSubErrs)) + headerSafeSubErrs := strconv.QuoteToASCII(string(jsonSubErrs)) + pairs = append(pairs, "suberrors", headerSafeSubErrs) + } + + // If there is a RetryAfter value then extend the metadata pairs to + // include the value. + if berr.RetryAfter != 0 { + pairs = append(pairs, "retryafter", berr.RetryAfter.String()) } - // Ignoring the error return here is safe because if setting the metadata - // fails, we'll still return an error, but it will be interpreted on the - // other side as an InternalServerError instead of a more specific one. - _ = grpc.SetTrailer(ctx, metadata.Pairs(pairs...)) - return status.Errorf(codes.Unknown, err.Error()) + err := grpc.SetTrailer(ctx, metadata.Pairs(pairs...)) + if err != nil { + return berrors.InternalServerError( + "error setting gRPC error metadata, orig error %q", appErr) + } } - return status.Errorf(codes.Unknown, err.Error()) + + return appErr } // unwrapError unwraps errors returned from gRPC client calls which were wrapped @@ -63,59 +70,85 @@ func unwrapError(err error, md metadata.MD) error { return nil } - unwrappedErr := status.Convert(err).Message() - errTypeStrs, ok := md["errortype"] if !ok { return err } + + inErrMsg := status.Convert(err).Message() if len(errTypeStrs) != 1 { return berrors.InternalServerError( - "multiple errorType metadata, wrapped error %q", - unwrappedErr, + "multiple 'errortype' metadata, wrapped error %q", + inErrMsg, ) } - errType, decErr := strconv.Atoi(errTypeStrs[0]) + inErrType, decErr := strconv.Atoi(errTypeStrs[0]) if decErr != nil { return berrors.InternalServerError( "failed to decode error type, decoding error %q, wrapped error %q", decErr, - unwrappedErr, + inErrMsg, ) } - outErr := berrors.New(berrors.ErrorType(errType), unwrappedErr) - - subErrsJSON, ok := md["suberrors"] - if !ok { - return outErr - } - if len(subErrsJSON) != 1 { - return berrors.InternalServerError( - "multiple suberrors metadata, wrapped error %q", - unwrappedErr, + inErr := berrors.New(berrors.ErrorType(inErrType), inErrMsg) + var outErr *berrors.BoulderError + if !errors.As(inErr, &outErr) { + return fmt.Errorf( + "expected type of inErr to be %T got %T: %q", + outErr, + inErr, + inErr.Error(), ) } - var suberrs []berrors.SubBoulderError - err2 := json.Unmarshal([]byte(subErrsJSON[0]), &suberrs) - if err2 != nil { - return berrors.InternalServerError( - "error unmarshaling suberrs JSON %q, wrapped error %q", - subErrsJSON[0], - unwrappedErr, - ) + subErrorsVal, ok := md["suberrors"] + if ok { + if len(subErrorsVal) != 1 { + return berrors.InternalServerError( + "multiple 'suberrors' in metadata, wrapped error %q", + inErrMsg, + ) + } + + unquotedSubErrors, unquoteErr := strconv.Unquote(subErrorsVal[0]) + if unquoteErr != nil { + return fmt.Errorf( + "unquoting 'suberrors' %q, wrapped error %q: %w", + subErrorsVal[0], + inErrMsg, + unquoteErr, + ) + } + + unmarshalErr := json.Unmarshal([]byte(unquotedSubErrors), &outErr.SubErrors) + if unmarshalErr != nil { + return berrors.InternalServerError( + "JSON unmarshaling 'suberrors' %q, wrapped error %q: %s", + subErrorsVal[0], + inErrMsg, + unmarshalErr, + ) + } } - var berr *berrors.BoulderError - if errors.As(outErr, &berr) { - outErr = berr.WithSubErrors(suberrs) - } else { - return fmt.Errorf( - "expected type of outErr to be %T got %T: %q", - berr, outErr, - outErr.Error(), - ) + retryAfterVal, ok := md["retryafter"] + if ok { + if len(retryAfterVal) != 1 { + return berrors.InternalServerError( + "multiple 'retryafter' in metadata, wrapped error %q", + inErrMsg, + ) + } + var parseErr error + outErr.RetryAfter, parseErr = time.ParseDuration(retryAfterVal[0]) + if parseErr != nil { + return berrors.InternalServerError( + "parsing 'retryafter' as int64, wrapped error %q, parsing error: %s", + inErrMsg, + parseErr, + ) + } } return outErr } diff --git a/grpc/errors_test.go b/grpc/errors_test.go index 41ee7fa364e..98fd1eb3248 100644 --- a/grpc/errors_test.go +++ b/grpc/errors_test.go @@ -2,14 +2,17 @@ package grpc import ( "context" + "errors" "fmt" "net" "testing" "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "github.com/jmhodges/clock" + berrors "github.com/letsencrypt/boulder/errors" "github.com/letsencrypt/boulder/grpc/test_proto" "github.com/letsencrypt/boulder/identifier" @@ -27,10 +30,13 @@ func (s *errorServer) Chill(_ context.Context, _ *test_proto.Time) (*test_proto. } func TestErrorWrapping(t *testing.T) { - serverMetrics := NewServerMetrics(metrics.NoopRegisterer) - si := newServerInterceptor(serverMetrics, clock.NewFake()) - ci := clientInterceptor{time.Second, NewClientMetrics(metrics.NoopRegisterer), clock.NewFake()} - srv := grpc.NewServer(grpc.UnaryInterceptor(si.intercept)) + serverMetrics, err := newServerMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating server metrics") + smi := newServerMetadataInterceptor(serverMetrics, clock.NewFake()) + clientMetrics, err := newClientMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating client metrics") + cmi := clientMetadataInterceptor{time.Second, clientMetrics, clock.NewFake(), true} + srv := grpc.NewServer(grpc.UnaryInterceptor(smi.Unary)) es := &errorServer{} test_proto.RegisterChillerServer(srv, es) lis, err := net.Listen("tcp", "127.0.0.1:") @@ -40,28 +46,40 @@ func TestErrorWrapping(t *testing.T) { conn, err := grpc.Dial( lis.Addr().String(), - grpc.WithInsecure(), - grpc.WithUnaryInterceptor(ci.intercept), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithUnaryInterceptor(cmi.Unary), ) test.AssertNotError(t, err, "Failed to dial grpc test server") client := test_proto.NewChillerClient(conn) - es.err = berrors.MalformedError("yup") + // RateLimitError with a RetryAfter of 500ms. + expectRetryAfter := time.Millisecond * 500 + es.err = berrors.RateLimitError(expectRetryAfter, "yup") _, err = client.Chill(context.Background(), &test_proto.Time{}) test.Assert(t, err != nil, fmt.Sprintf("nil error returned, expected: %s", err)) test.AssertDeepEquals(t, err, es.err) + var bErr *berrors.BoulderError + ok := errors.As(err, &bErr) + test.Assert(t, ok, "asserting error as boulder error") + // Ensure we got a RateLimitError + test.AssertErrorIs(t, bErr, berrors.RateLimit) + // Ensure our RetryAfter is still 500ms. + test.AssertEquals(t, bErr.RetryAfter, expectRetryAfter) - test.AssertEquals(t, wrapError(context.Background(), nil), nil) - test.AssertEquals(t, unwrapError(nil, nil), nil) + test.AssertNil(t, wrapError(context.Background(), nil), "Wrapping nil should still be nil") + test.AssertNil(t, unwrapError(nil, nil), "Unwrapping nil should still be nil") } // TestSubErrorWrapping tests that a boulder error with suberrors can be // correctly wrapped and unwrapped across the RPC layer. func TestSubErrorWrapping(t *testing.T) { - serverMetrics := NewServerMetrics(metrics.NoopRegisterer) - si := newServerInterceptor(serverMetrics, clock.NewFake()) - ci := clientInterceptor{time.Second, NewClientMetrics(metrics.NoopRegisterer), clock.NewFake()} - srv := grpc.NewServer(grpc.UnaryInterceptor(si.intercept)) + serverMetrics, err := newServerMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating server metrics") + smi := newServerMetadataInterceptor(serverMetrics, clock.NewFake()) + clientMetrics, err := newClientMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating client metrics") + cmi := clientMetadataInterceptor{time.Second, clientMetrics, clock.NewFake(), true} + srv := grpc.NewServer(grpc.UnaryInterceptor(smi.Unary)) es := &errorServer{} test_proto.RegisterChillerServer(srv, es) lis, err := net.Listen("tcp", "127.0.0.1:") @@ -71,15 +89,15 @@ func TestSubErrorWrapping(t *testing.T) { conn, err := grpc.Dial( lis.Addr().String(), - grpc.WithInsecure(), - grpc.WithUnaryInterceptor(ci.intercept), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithUnaryInterceptor(cmi.Unary), ) test.AssertNotError(t, err, "Failed to dial grpc test server") client := test_proto.NewChillerClient(conn) subErrors := []berrors.SubBoulderError{ { - Identifier: identifier.DNSIdentifier("chillserver.com"), + Identifier: identifier.NewDNS("chillserver.com"), BoulderError: &berrors.BoulderError{ Type: berrors.RejectedIdentifier, Detail: "2 ill 2 chill", diff --git a/grpc/interceptors.go b/grpc/interceptors.go index 5c130ebb41d..9880c05ab46 100644 --- a/grpc/interceptors.go +++ b/grpc/interceptors.go @@ -11,65 +11,88 @@ import ( "github.com/prometheus/client_golang/prometheus" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/status" + "github.com/letsencrypt/boulder/cmd" berrors "github.com/letsencrypt/boulder/errors" - "github.com/letsencrypt/boulder/probs" + "github.com/letsencrypt/boulder/web" ) const ( returnOverhead = 20 * time.Millisecond meaningfulWorkOverhead = 100 * time.Millisecond clientRequestTimeKey = "client-request-time" - serverLatencyKey = "server-latency" + userAgentKey = "acme-client-user-agent" ) -// NoCancelInterceptor is a gRPC interceptor that creates a new context, -// separate from the original context, that has the same deadline but does -// not propagate cancellation. This is used by SA. -// -// Because this interceptor throws away annotations on the context, it -// breaks tracing for events that get the modified context. To minimize that -// impact, this interceptor should always be last. -func NoCancelInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - cancel := func() {} - if deadline, ok := ctx.Deadline(); ok { - ctx, cancel = context.WithDeadline(context.Background(), deadline) - } else { - ctx = context.Background() - } - defer cancel() +type serverInterceptor interface { + Unary(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) + Stream(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error +} + +// noopServerInterceptor provides no-op interceptors. It can be substituted for +// an interceptor that has been disabled. +type noopServerInterceptor struct{} + +// Unary is a gRPC unary interceptor. +func (n *noopServerInterceptor) Unary(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { return handler(ctx, req) } -// serverInterceptor is a gRPC interceptor that adds Prometheus +// Stream is a gRPC stream interceptor. +func (n *noopServerInterceptor) Stream(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return handler(srv, ss) +} + +// Ensure noopServerInterceptor matches the serverInterceptor interface. +var _ serverInterceptor = &noopServerInterceptor{} + +type clientInterceptor interface { + Unary(ctx context.Context, method string, req any, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error + Stream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) +} + +// serverMetadataInterceptor is a gRPC interceptor that adds Prometheus // metrics to requests handled by a gRPC server, and wraps Boulder-specific // errors for transmission in a grpc/metadata trailer (see bcodes.go). -type serverInterceptor struct { +type serverMetadataInterceptor struct { metrics serverMetrics clk clock.Clock } -func newServerInterceptor(metrics serverMetrics, clk clock.Clock) serverInterceptor { - return serverInterceptor{ +func newServerMetadataInterceptor(metrics serverMetrics, clk clock.Clock) serverMetadataInterceptor { + return serverMetadataInterceptor{ metrics: metrics, clk: clk, } } -func (si *serverInterceptor) intercept(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { +// Unary implements the grpc.UnaryServerInterceptor interface. +func (smi *serverMetadataInterceptor) Unary( + ctx context.Context, + req any, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler) (any, error) { if info == nil { return nil, berrors.InternalServerError("passed nil *grpc.UnaryServerInfo") } - // Extract the grpc metadata from the context. If the context has - // a `clientRequestTimeKey` field, and it has a value, then observe the RPC - // latency with Prometheus. - if md, ok := metadata.FromIncomingContext(ctx); ok && len(md[clientRequestTimeKey]) > 0 { - err := si.observeLatency(md[clientRequestTimeKey][0]) - if err != nil { - return nil, err + // Extract the grpc metadata from the context, and handle the client request + // timestamp embedded in it. It's okay if the timestamp is missing, since some + // clients (like nomad's health-checker) don't set it. + md, ok := metadata.FromIncomingContext(ctx) + if ok { + if len(md[clientRequestTimeKey]) > 0 { + err := smi.checkLatency(md[clientRequestTimeKey][0]) + if err != nil { + return nil, err + } + } + if len(md[userAgentKey]) > 0 { + ctx = web.WithUserAgent(ctx, md[userAgentKey][0]) } } @@ -81,6 +104,9 @@ func (si *serverInterceptor) intercept(ctx context.Context, req interface{}, inf // opposed to "RA.NewCertificate timed out" (causing a 500). // Once we've shaved the deadline, we ensure we have we have at least another // 100ms left to do work; otherwise we abort early. + // Note that these computations use the global clock (time.Now) instead of + // the local clock (smi.clk.Now) because context.WithTimeout also uses the + // global clock. deadline, ok := ctx.Deadline() // Should never happen: there was no deadline. if !ok { @@ -91,17 +117,82 @@ func (si *serverInterceptor) intercept(ctx context.Context, req interface{}, inf if remaining < meaningfulWorkOverhead { return nil, status.Errorf(codes.DeadlineExceeded, "not enough time left on clock: %s", remaining) } - var cancel func() - ctx, cancel = context.WithDeadline(ctx, deadline) + + localCtx, cancel := context.WithDeadline(ctx, deadline) defer cancel() - resp, err := handler(ctx, req) + resp, err := handler(localCtx, req) if err != nil { - err = wrapError(ctx, err) + err = wrapError(localCtx, err) } return resp, err } +// interceptedServerStream wraps an existing server stream, but replaces its +// context with its own. +type interceptedServerStream struct { + grpc.ServerStream + ctx context.Context +} + +// Context implements part of the grpc.ServerStream interface. +func (iss interceptedServerStream) Context() context.Context { + return iss.ctx +} + +// Stream implements the grpc.StreamServerInterceptor interface. +func (smi *serverMetadataInterceptor) Stream( + srv any, + ss grpc.ServerStream, + info *grpc.StreamServerInfo, + handler grpc.StreamHandler) error { + ctx := ss.Context() + + // Extract the grpc metadata from the context, and handle the client request + // timestamp embedded in it. It's okay if the timestamp is missing, since some + // clients (like nomad's health-checker) don't set it. + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md[clientRequestTimeKey]) > 0 { + err := smi.checkLatency(md[clientRequestTimeKey][0]) + if err != nil { + return err + } + } + + // Shave 20 milliseconds off the deadline to ensure that if the RPC server times + // out any sub-calls it makes (like DNS lookups, or onwards RPCs), it has a + // chance to report that timeout to the client. This allows for more specific + // errors, e.g "the VA timed out looking up CAA for example.com" (when called + // from RA.NewCertificate, which was called from WFE.NewCertificate), as + // opposed to "RA.NewCertificate timed out" (causing a 500). + // Once we've shaved the deadline, we ensure we have we have at least another + // 100ms left to do work; otherwise we abort early. + // Note that these computations use the global clock (time.Now) instead of + // the local clock (smi.clk.Now) because context.WithTimeout also uses the + // global clock. + deadline, ok := ctx.Deadline() + // Should never happen: there was no deadline. + if !ok { + deadline = time.Now().Add(100 * time.Second) + } + deadline = deadline.Add(-returnOverhead) + remaining := time.Until(deadline) + if remaining < meaningfulWorkOverhead { + return status.Errorf(codes.DeadlineExceeded, "not enough time left on clock: %s", remaining) + } + + // Server stream interceptors are synchronous (they return their error, if + // any, when the stream is done) so defer cancel() is safe here. + localCtx, cancel := context.WithDeadline(ctx, deadline) + defer cancel() + + err := handler(srv, interceptedServerStream{ss, localCtx}) + if err != nil { + err = wrapError(localCtx, err) + } + return err +} + // splitMethodName is borrowed directly from // `grpc-ecosystem/go-grpc-prometheus/util.go` and is used to extract the // service and method name from the `method` argument to @@ -114,12 +205,13 @@ func splitMethodName(fullMethodName string) (string, string) { return "unknown", "unknown" } -// observeLatency is called with the `clientRequestTimeKey` value from +// checkLatency is called with the `clientRequestTimeKey` value from // a request's gRPC metadata. This string value is converted to a timestamp and // used to calculate the latency between send and receive time. The latency is // published to the server interceptor's rpcLag prometheus histogram. An error -// is returned if the `clientReqTime` string is not a valid timestamp. -func (si *serverInterceptor) observeLatency(clientReqTime string) error { +// is returned if the `clientReqTime` string is not a valid timestamp, or if +// the latency is so large that it indicates dangerous levels of clock skew. +func (smi *serverMetadataInterceptor) checkLatency(clientReqTime string) error { // Convert the metadata request time into an int64 reqTimeUnixNanos, err := strconv.ParseInt(clientReqTime, 10, 64) if err != nil { @@ -128,56 +220,74 @@ func (si *serverInterceptor) observeLatency(clientReqTime string) error { } // Calculate the elapsed time since the client sent the RPC reqTime := time.Unix(0, reqTimeUnixNanos) - elapsed := si.clk.Since(reqTime) + elapsed := smi.clk.Since(reqTime) + + // If the elapsed time is very large, that indicates it is probably due to + // clock skew rather than simple latency. Refuse to handle the request, since + // accurate timekeeping is critical to CA operations and large skew indicates + // something has gone very wrong. + if tooSkewed(elapsed) { + return fmt.Errorf( + "gRPC client reported a very different time: %s (client) vs %s (this server)", + reqTime, smi.clk.Now()) + } + // Publish an RPC latency observation to the histogram - si.metrics.rpcLag.Observe(elapsed.Seconds()) + smi.metrics.rpcLag.Observe(elapsed.Seconds()) return nil } -// clientInterceptor is a gRPC interceptor that adds Prometheus +// Ensure serverMetadataInterceptor matches the serverInterceptor interface. +var _ serverInterceptor = (*serverMetadataInterceptor)(nil) + +// clientMetadataInterceptor is a gRPC interceptor that adds Prometheus // metrics to sent requests, and disables FailFast. We disable FailFast because // non-FailFast mode is most similar to the old AMQP RPC layer: If a client // makes a request while all backends are briefly down (e.g. for a restart), the // request doesn't necessarily fail. A backend can service the request if it // comes back up within the timeout. Under gRPC the same effect is achieved by // retries up to the Context deadline. -type clientInterceptor struct { +type clientMetadataInterceptor struct { timeout time.Duration metrics clientMetrics clk clock.Clock + + waitForReady bool } -// intercept fulfils the grpc.UnaryClientInterceptor interface, it should be noted that while this API -// is currently experimental the metrics it reports should be kept as stable as can be, *within reason*. -func (ci *clientInterceptor) intercept( +// Unary implements the grpc.UnaryClientInterceptor interface. +func (cmi *clientMetadataInterceptor) Unary( ctx context.Context, fullMethod string, req, - reply interface{}, + reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { // This should not occur but fail fast with a clear error if it does (e.g. // because of buggy unit test code) instead of a generic nil panic later! - if ci.metrics.inFlightRPCs == nil { + if cmi.metrics.inFlightRPCs == nil { return berrors.InternalServerError("clientInterceptor has nil inFlightRPCs gauge") } - localCtx, cancel := context.WithTimeout(ctx, ci.timeout) + // Ensure that the context has a deadline set. + localCtx, cancel := context.WithTimeout(ctx, cmi.timeout) defer cancel() - // Disable fail-fast so RPCs will retry until deadline, even if all backends - // are down. - opts = append(opts, grpc.WaitForReady(true)) // Convert the current unix nano timestamp to a string for embedding in the grpc metadata - nowTS := strconv.FormatInt(ci.clk.Now().UnixNano(), 10) - + nowTS := strconv.FormatInt(cmi.clk.Now().UnixNano(), 10) // Create a grpc/metadata.Metadata instance for the request metadata. - // Initialize it with the request time. - reqMD := metadata.New(map[string]string{clientRequestTimeKey: nowTS}) + reqMD := metadata.New(map[string]string{ + clientRequestTimeKey: nowTS, + userAgentKey: web.UserAgent(ctx), + }) // Configure the localCtx with the metadata so it gets sent along in the request localCtx = metadata.NewOutgoingContext(localCtx, reqMD) + // Disable fail-fast so RPCs will retry until deadline, even if all backends + // are down. + opts = append(opts, grpc.WaitForReady(cmi.waitForReady)) + // Create a grpc/metadata.Metadata instance for a grpc.Trailer. respMD := metadata.New(nil) // Configure a grpc Trailer with respMD. This allows us to wrap error @@ -193,13 +303,13 @@ func (ci *clientInterceptor) intercept( "method": method, "service": service, } - // Increment the inFlightRPCs gauge for this method/service - ci.metrics.inFlightRPCs.With(labels).Inc() + cmi.metrics.inFlightRPCs.With(labels).Inc() // And defer decrementing it when we're done - defer ci.metrics.inFlightRPCs.With(labels).Dec() + defer cmi.metrics.inFlightRPCs.With(labels).Dec() + // Handle the RPC - begin := ci.clk.Now() + begin := cmi.clk.Now() err := invoker(localCtx, fullMethod, req, reply, cc, opts...) if err != nil { err = unwrapError(err, respMD) @@ -207,27 +317,134 @@ func (ci *clientInterceptor) intercept( return deadlineDetails{ service: service, method: method, - latency: ci.clk.Since(begin), + latency: cmi.clk.Since(begin), } } } return err } -// CancelTo408Interceptor calls the underlying invoker, checks to see if the -// resulting error was a gRPC Canceled error (because this client cancelled -// the request, likely because the ACME client itself canceled the HTTP -// request), and converts that into a Problem which can be "returned" to the -// (now missing) client, and into our logs. This should be the outermost client -// interceptor, and should only be enabled in the WFEs. -func CancelTo408Interceptor(ctx context.Context, fullMethod string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - err := invoker(ctx, fullMethod, req, reply, cc, opts...) - if err != nil && status.Code(err) == codes.Canceled { - return probs.Canceled(err.Error()) +// interceptedClientStream wraps an existing client stream, and calls finish +// when the stream ends or any operation on it fails. +type interceptedClientStream struct { + grpc.ClientStream + finish func(error) error +} + +// Header implements part of the grpc.ClientStream interface. +func (ics interceptedClientStream) Header() (metadata.MD, error) { + md, err := ics.ClientStream.Header() + if err != nil { + err = ics.finish(err) + } + return md, err +} + +// SendMsg implements part of the grpc.ClientStream interface. +func (ics interceptedClientStream) SendMsg(m any) error { + err := ics.ClientStream.SendMsg(m) + if err != nil { + err = ics.finish(err) + } + return err +} + +// RecvMsg implements part of the grpc.ClientStream interface. +func (ics interceptedClientStream) RecvMsg(m any) error { + err := ics.ClientStream.RecvMsg(m) + if err != nil { + err = ics.finish(err) + } + return err +} + +// CloseSend implements part of the grpc.ClientStream interface. +func (ics interceptedClientStream) CloseSend() error { + err := ics.ClientStream.CloseSend() + if err != nil { + err = ics.finish(err) } return err } +// Stream implements the grpc.StreamClientInterceptor interface. +func (cmi *clientMetadataInterceptor) Stream( + ctx context.Context, + desc *grpc.StreamDesc, + cc *grpc.ClientConn, + fullMethod string, + streamer grpc.Streamer, + opts ...grpc.CallOption) (grpc.ClientStream, error) { + // This should not occur but fail fast with a clear error if it does (e.g. + // because of buggy unit test code) instead of a generic nil panic later! + if cmi.metrics.inFlightRPCs == nil { + return nil, berrors.InternalServerError("clientInterceptor has nil inFlightRPCs gauge") + } + + // We don't defer cancel() here, because this function is going to return + // immediately. Instead we store it in the interceptedClientStream. + localCtx, cancel := context.WithTimeout(ctx, cmi.timeout) + + // Convert the current unix nano timestamp to a string for embedding in the grpc metadata + nowTS := strconv.FormatInt(cmi.clk.Now().UnixNano(), 10) + // Create a grpc/metadata.Metadata instance for the request metadata. + // Initialize it with the request time. + reqMD := metadata.New(map[string]string{ + clientRequestTimeKey: nowTS, + userAgentKey: web.UserAgent(ctx), + }) + // Configure the localCtx with the metadata so it gets sent along in the request + localCtx = metadata.NewOutgoingContext(localCtx, reqMD) + + // Disable fail-fast so RPCs will retry until deadline, even if all backends + // are down. + opts = append(opts, grpc.WaitForReady(cmi.waitForReady)) + + // Create a grpc/metadata.Metadata instance for a grpc.Trailer. + respMD := metadata.New(nil) + // Configure a grpc Trailer with respMD. This allows us to wrap error + // types in the server interceptor later on. + opts = append(opts, grpc.Trailer(&respMD)) + + // Split the method and service name from the fullMethod. + // UnaryClientInterceptor's receive a `method` arg of the form + // "/ServiceName/MethodName" + service, method := splitMethodName(fullMethod) + // Slice the inFlightRPC inc/dec calls by method and service + labels := prometheus.Labels{ + "method": method, + "service": service, + } + // Increment the inFlightRPCs gauge for this method/service + cmi.metrics.inFlightRPCs.With(labels).Inc() + begin := cmi.clk.Now() + + // Cancel the local context and decrement the metric when we're done. Also + // transform the error into a more usable form, if necessary. + finish := func(err error) error { + cancel() + cmi.metrics.inFlightRPCs.With(labels).Dec() + if err != nil { + err = unwrapError(err, respMD) + if status.Code(err) == codes.DeadlineExceeded { + return deadlineDetails{ + service: service, + method: method, + latency: cmi.clk.Since(begin), + } + } + } + return err + } + + // Handle the RPC + cs, err := streamer(localCtx, desc, cc, fullMethod, opts...) + ics := interceptedClientStream{cs, finish} + return ics, err +} + +var _ clientInterceptor = (*clientMetadataInterceptor)(nil) + // deadlineDetails is an error type that we use in place of gRPC's // DeadlineExceeded errors in order to add more detail for debugging. type deadlineDetails struct { @@ -240,3 +457,94 @@ func (dd deadlineDetails) Error() string { return fmt.Sprintf("%s.%s timed out after %d ms", dd.service, dd.method, int64(dd.latency/time.Millisecond)) } + +// authInterceptor provides two server interceptors (Unary and Stream) which can +// check that every request for a given gRPC service is being made over an mTLS +// connection from a client which is allow-listed for that particular service. +type authInterceptor struct { + // serviceClientNames is a map of gRPC service names (e.g. "ca.CertificateAuthority") + // to allowed client certificate SANs (e.g. "ra.boulder") which are allowed to + // make RPCs to that service. The set of client names is implemented as a map + // of names to empty structs for easy lookup. + serviceClientNames map[string]map[string]struct{} +} + +// newServiceAuthChecker takes a GRPCServerConfig and uses its Service stanzas +// to construct a serviceAuthChecker which enforces the service/client mappings +// contained in the config. +func newServiceAuthChecker(c *cmd.GRPCServerConfig) *authInterceptor { + names := make(map[string]map[string]struct{}) + for serviceName, service := range c.Services { + names[serviceName] = make(map[string]struct{}) + for _, clientName := range service.ClientNames { + names[serviceName][clientName] = struct{}{} + } + } + return &authInterceptor{names} +} + +// Unary is a gRPC unary interceptor. +func (ac *authInterceptor) Unary(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + err := ac.checkContextAuth(ctx, info.FullMethod) + if err != nil { + return nil, err + } + return handler(ctx, req) +} + +// Stream is a gRPC stream interceptor. +func (ac *authInterceptor) Stream(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + err := ac.checkContextAuth(ss.Context(), info.FullMethod) + if err != nil { + return err + } + return handler(srv, ss) +} + +// checkContextAuth does most of the heavy lifting. It extracts TLS information +// from the incoming context, gets the set of DNS names contained in the client +// mTLS cert, and returns nil if at least one of those names appears in the set +// of allowed client names for given service (or if the set of allowed client +// names is empty). +func (ac *authInterceptor) checkContextAuth(ctx context.Context, fullMethod string) error { + serviceName, _ := splitMethodName(fullMethod) + + allowedClientNames, ok := ac.serviceClientNames[serviceName] + if !ok || len(allowedClientNames) == 0 { + return fmt.Errorf("service %q has no allowed client names", serviceName) + } + + p, ok := peer.FromContext(ctx) + if !ok { + return fmt.Errorf("unable to fetch peer info from grpc context") + } + + if p.AuthInfo == nil { + return fmt.Errorf("grpc connection appears to be plaintext") + } + + tlsAuth, ok := p.AuthInfo.(credentials.TLSInfo) + if !ok { + return fmt.Errorf("connection is not TLS authed") + } + + if len(tlsAuth.State.VerifiedChains) == 0 || len(tlsAuth.State.VerifiedChains[0]) == 0 { + return fmt.Errorf("connection auth not verified") + } + + cert := tlsAuth.State.VerifiedChains[0][0] + + for _, clientName := range cert.DNSNames { + _, ok := allowedClientNames[clientName] + if ok { + return nil + } + } + + return fmt.Errorf( + "client names %v are not authorized for service %q (%v)", + cert.DNSNames, serviceName, allowedClientNames) +} + +// Ensure authInterceptor matches the serverInterceptor interface. +var _ serverInterceptor = (*authInterceptor)(nil) diff --git a/grpc/interceptors_test.go b/grpc/interceptors_test.go index c1c4ba66399..fa42c252d15 100644 --- a/grpc/interceptors_test.go +++ b/grpc/interceptors_test.go @@ -2,10 +2,12 @@ package grpc import ( "context" + "crypto/tls" + "crypto/x509" "errors" + "fmt" "log" "net" - "net/http" "strconv" "strings" "sync" @@ -15,19 +17,23 @@ import ( "github.com/jmhodges/clock" "github.com/prometheus/client_golang/prometheus" "google.golang.org/grpc" - "google.golang.org/grpc/codes" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" "github.com/letsencrypt/boulder/grpc/test_proto" "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/probs" "github.com/letsencrypt/boulder/test" + "github.com/letsencrypt/boulder/web" ) var fc = clock.NewFake() -func testHandler(_ context.Context, i interface{}) (interface{}, error) { +func testHandler(_ context.Context, i any) (any, error) { if i != nil { return nil, errors.New("") } @@ -35,7 +41,7 @@ func testHandler(_ context.Context, i interface{}) (interface{}, error) { return nil, nil } -func testInvoker(_ context.Context, method string, _, _ interface{}, _ *grpc.ClientConn, opts ...grpc.CallOption) error { +func testInvoker(_ context.Context, method string, _, _ any, _ *grpc.ClientConn, opts ...grpc.CallOption) error { switch method { case "-service-brokeTest": return errors.New("") @@ -47,141 +53,138 @@ func testInvoker(_ context.Context, method string, _, _ interface{}, _ *grpc.Cli } func TestServerInterceptor(t *testing.T) { - serverMetrics := NewServerMetrics(metrics.NoopRegisterer) - si := newServerInterceptor(serverMetrics, clock.NewFake()) + serverMetrics, err := newServerMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating server metrics") + si := newServerMetadataInterceptor(serverMetrics, clock.NewFake()) md := metadata.New(map[string]string{clientRequestTimeKey: "0"}) ctxWithMetadata := metadata.NewIncomingContext(context.Background(), md) - _, err := si.intercept(context.Background(), nil, nil, testHandler) + _, err = si.Unary(context.Background(), nil, nil, testHandler) test.AssertError(t, err, "si.intercept didn't fail with a context missing metadata") - _, err = si.intercept(ctxWithMetadata, nil, nil, testHandler) + _, err = si.Unary(ctxWithMetadata, nil, nil, testHandler) test.AssertError(t, err, "si.intercept didn't fail with a nil grpc.UnaryServerInfo") - _, err = si.intercept(ctxWithMetadata, nil, &grpc.UnaryServerInfo{FullMethod: "-service-test"}, testHandler) + _, err = si.Unary(ctxWithMetadata, nil, &grpc.UnaryServerInfo{FullMethod: "-service-test"}, testHandler) test.AssertNotError(t, err, "si.intercept failed with a non-nil grpc.UnaryServerInfo") - _, err = si.intercept(ctxWithMetadata, 0, &grpc.UnaryServerInfo{FullMethod: "brokeTest"}, testHandler) + _, err = si.Unary(ctxWithMetadata, 0, &grpc.UnaryServerInfo{FullMethod: "brokeTest"}, testHandler) test.AssertError(t, err, "si.intercept didn't fail when handler returned a error") } func TestClientInterceptor(t *testing.T) { - ci := clientInterceptor{ + clientMetrics, err := newClientMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating client metrics") + ci := clientMetadataInterceptor{ timeout: time.Second, - metrics: NewClientMetrics(metrics.NoopRegisterer), + metrics: clientMetrics, clk: clock.NewFake(), } - err := ci.intercept(context.Background(), "-service-test", nil, nil, nil, testInvoker) + + err = ci.Unary(context.Background(), "-service-test", nil, nil, nil, testInvoker) test.AssertNotError(t, err, "ci.intercept failed with a non-nil grpc.UnaryServerInfo") - err = ci.intercept(context.Background(), "-service-brokeTest", nil, nil, nil, testInvoker) + err = ci.Unary(context.Background(), "-service-brokeTest", nil, nil, nil, testInvoker) test.AssertError(t, err, "ci.intercept didn't fail when handler returned a error") } -func TestCancelTo408Interceptor(t *testing.T) { - err := CancelTo408Interceptor(context.Background(), "-service-test", nil, nil, nil, testInvoker) - test.AssertNotError(t, err, "CancelTo408Interceptor returned an error when it shouldn't") - - err = CancelTo408Interceptor(context.Background(), "-service-requesterCanceledTest", nil, nil, nil, testInvoker) - test.AssertError(t, err, "CancelTo408Interceptor didn't return an error when it should") +// TestWaitForReadyTrue configures a gRPC client with waitForReady: true and +// sends a request to a backend that is unavailable. It ensures that the +// request doesn't error out until the timeout is reached, i.e. that +// FailFast is set to false. +// https://github.com/grpc/grpc/blob/main/doc/wait-for-ready.md +func TestWaitForReadyTrue(t *testing.T) { + clientMetrics, err := newClientMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating client metrics") + ci := &clientMetadataInterceptor{ + timeout: 100 * time.Millisecond, + metrics: clientMetrics, + clk: clock.NewFake(), + waitForReady: true, + } + conn, err := grpc.NewClient("localhost:19876", // random, probably unused port + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name)), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithUnaryInterceptor(ci.Unary)) + if err != nil { + t.Fatalf("did not connect: %v", err) + } + defer conn.Close() + c := test_proto.NewChillerClient(conn) - var probDetails *probs.ProblemDetails - test.AssertErrorWraps(t, err, &probDetails) - test.AssertEquals(t, probDetails.Type, probs.MalformedProblem) - test.AssertEquals(t, probDetails.HTTPStatus, http.StatusRequestTimeout) + start := time.Now() + _, err = c.Chill(context.Background(), &test_proto.Time{Duration: durationpb.New(time.Second)}) + if err == nil { + t.Errorf("Successful Chill when we expected failure.") + } + if time.Since(start) < 90*time.Millisecond { + t.Errorf("Chill failed fast, when WaitForReady should be enabled.") + } } -// TestFailFastFalse sends a gRPC request to a backend that is -// unavailable, and ensures that the request doesn't error out until the -// timeout is reached, i.e. that FailFast is set to false. -// https://github.com/grpc/grpc/blob/main/doc/wait-for-ready.md -func TestFailFastFalse(t *testing.T) { - ci := &clientInterceptor{ - timeout: 100 * time.Millisecond, - metrics: NewClientMetrics(metrics.NoopRegisterer), - clk: clock.NewFake(), +// TestWaitForReadyFalse configures a gRPC client with waitForReady: false and +// sends a request to a backend that is unavailable, and ensures that the request +// errors out promptly. +func TestWaitForReadyFalse(t *testing.T) { + clientMetrics, err := newClientMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating client metrics") + ci := &clientMetadataInterceptor{ + timeout: time.Second, + metrics: clientMetrics, + clk: clock.NewFake(), + waitForReady: false, } - conn, err := grpc.Dial("localhost:19876", // random, probably unused port - grpc.WithInsecure(), - grpc.WithBalancerName("round_robin"), - grpc.WithUnaryInterceptor(ci.intercept)) + conn, err := grpc.NewClient("localhost:19876", // random, probably unused port + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, roundrobin.Name)), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithUnaryInterceptor(ci.Unary)) if err != nil { t.Fatalf("did not connect: %v", err) } + defer conn.Close() c := test_proto.NewChillerClient(conn) start := time.Now() - _, err = c.Chill(context.Background(), &test_proto.Time{Time: time.Second.Nanoseconds()}) + _, err = c.Chill(context.Background(), &test_proto.Time{Duration: durationpb.New(time.Second)}) if err == nil { t.Errorf("Successful Chill when we expected failure.") } - if time.Since(start) < 90*time.Millisecond { - t.Errorf("Chill failed fast, when FailFast should be disabled.") + if time.Since(start) > 200*time.Millisecond { + t.Errorf("Chill failed slow, when WaitForReady should be disabled.") } - _ = conn.Close() } -// testServer is used to implement TestTimeouts, and will attempt to sleep for +// testTimeoutServer is used to implement TestTimeouts, and will attempt to sleep for // the given amount of time (unless it hits a timeout or cancel). -type testServer struct { +type testTimeoutServer struct { test_proto.UnimplementedChillerServer } // Chill implements ChillerServer.Chill -func (s *testServer) Chill(ctx context.Context, in *test_proto.Time) (*test_proto.Time, error) { +func (s *testTimeoutServer) Chill(ctx context.Context, in *test_proto.Time) (*test_proto.Time, error) { start := time.Now() // Sleep for either the requested amount of time, or the context times out or // is canceled. select { - case <-time.After(time.Duration(in.Time) * time.Nanosecond): - spent := int64(time.Since(start) / time.Nanosecond) - return &test_proto.Time{Time: spent}, nil + case <-time.After(in.Duration.AsDuration() * time.Nanosecond): + spent := time.Since(start) / time.Nanosecond + return &test_proto.Time{Duration: durationpb.New(spent)}, nil case <-ctx.Done(): - return nil, status.Errorf(codes.DeadlineExceeded, "the chiller overslept") + return nil, errors.New("unique error indicating that the server's shortened context timed itself out") } } func TestTimeouts(t *testing.T) { - // start server - lis, err := net.Listen("tcp", ":0") - if err != nil { - log.Fatalf("failed to listen: %v", err) - } - port := lis.Addr().(*net.TCPAddr).Port - - serverMetrics := NewServerMetrics(metrics.NoopRegisterer) - si := newServerInterceptor(serverMetrics, clock.NewFake()) - s := grpc.NewServer(grpc.UnaryInterceptor(si.intercept)) - test_proto.RegisterChillerServer(s, &testServer{}) - go func() { - start := time.Now() - err := s.Serve(lis) - if err != nil && !strings.HasSuffix(err.Error(), "use of closed network connection") { - t.Logf("s.Serve: %v after %s", err, time.Since(start)) - } - }() - defer s.Stop() - - // make client - ci := &clientInterceptor{ - timeout: 30 * time.Second, - metrics: NewClientMetrics(metrics.NoopRegisterer), - clk: clock.NewFake(), - } - conn, err := grpc.Dial(net.JoinHostPort("localhost", strconv.Itoa(port)), - grpc.WithInsecure(), - grpc.WithUnaryInterceptor(ci.intercept)) - if err != nil { - t.Fatalf("did not connect: %v", err) - } - c := test_proto.NewChillerClient(conn) + server := new(testTimeoutServer) + client, _, stop := setup(t, server, clock.NewFake()) + defer stop() testCases := []struct { timeout time.Duration expectedErrorPrefix string }{ - {250 * time.Millisecond, "rpc error: code = Unknown desc = rpc error: code = DeadlineExceeded desc = the chiller overslept"}, + {250 * time.Millisecond, "rpc error: code = Unknown desc = unique error indicating that the server's shortened context timed itself out"}, {100 * time.Millisecond, "Chiller.Chill timed out after 0 ms"}, {10 * time.Millisecond, "Chiller.Chill timed out after 0 ms"}, } @@ -189,7 +192,7 @@ func TestTimeouts(t *testing.T) { t.Run(tc.timeout.String(), func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), tc.timeout) defer cancel() - _, err := c.Chill(ctx, &test_proto.Time{Time: time.Second.Nanoseconds()}) + _, err := client.Chill(ctx, &test_proto.Time{Duration: durationpb.New(time.Second)}) if err == nil { t.Fatal("Got no error, expected a timeout") } @@ -201,61 +204,75 @@ func TestTimeouts(t *testing.T) { } func TestRequestTimeTagging(t *testing.T) { - clk := clock.NewFake() - // Listen for TCP requests on a random system assigned port number - lis, err := net.Listen("tcp", ":0") - if err != nil { - log.Fatalf("failed to listen: %v", err) + server := new(testTimeoutServer) + serverMetrics, err := newServerMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating server metrics") + client, _, stop := setup(t, server, serverMetrics) + defer stop() + + // Make an RPC request with the ChillerClient with a timeout higher than the + // requested ChillerServer delay so that the RPC completes normally + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if _, err := client.Chill(ctx, &test_proto.Time{Duration: durationpb.New(time.Second * 5)}); err != nil { + t.Fatalf("Unexpected error calling Chill RPC: %s", err) } - // Retrieve the concrete port numberthe system assigned our listener - port := lis.Addr().(*net.TCPAddr).Port - // Create a new ChillerServer - serverMetrics := NewServerMetrics(metrics.NoopRegisterer) - si := newServerInterceptor(serverMetrics, clk) - s := grpc.NewServer(grpc.UnaryInterceptor(si.intercept)) - test_proto.RegisterChillerServer(s, &testServer{}) - // Chill until ill - go func() { - start := time.Now() - err := s.Serve(lis) - if err != nil && !strings.HasSuffix(err.Error(), "use of closed network connection") { - t.Logf("s.Serve: %v after %s", err, time.Since(start)) - } - }() - defer s.Stop() + // There should be one histogram sample in the serverInterceptor rpcLag stat + test.AssertMetricWithLabelsEquals(t, serverMetrics.rpcLag, prometheus.Labels{}, 1) +} - // Dial the ChillerServer - ci := &clientInterceptor{ +func TestClockSkew(t *testing.T) { + // Create two separate clocks for the client and server + serverClk := clock.NewFake() + serverClk.Set(time.Now()) + clientClk := clock.NewFake() + clientClk.Set(time.Now()) + + _, serverPort, stop := setup(t, &testTimeoutServer{}, serverClk) + defer stop() + + clientMetrics, err := newClientMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating client metrics") + ci := &clientMetadataInterceptor{ timeout: 30 * time.Second, - metrics: NewClientMetrics(metrics.NoopRegisterer), - clk: clk, + metrics: clientMetrics, + clk: clientClk, } - conn, err := grpc.Dial(net.JoinHostPort("localhost", strconv.Itoa(port)), - grpc.WithInsecure(), - grpc.WithUnaryInterceptor(ci.intercept)) + conn, err := grpc.NewClient(net.JoinHostPort("localhost", strconv.Itoa(serverPort)), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithUnaryInterceptor(ci.Unary)) if err != nil { t.Fatalf("did not connect: %v", err) } - // Create a ChillerClient with the connection to the ChillerServer - c := test_proto.NewChillerClient(conn) - // Make an RPC request with the ChillerClient with a timeout higher than the - // requested ChillerServer delay so that the RPC completes normally - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + client := test_proto.NewChillerClient(conn) + + // Create a context with plenty of timeout + ctx, cancel := context.WithDeadline(context.Background(), clientClk.Now().Add(10*time.Second)) defer cancel() - delayTime := (time.Second * 5).Nanoseconds() - if _, err := c.Chill(ctx, &test_proto.Time{Time: delayTime}); err != nil { - t.Fatalf("Unexpected error calling Chill RPC: %s", err) - } - // There should be one histogram sample in the serverInterceptor rpcLag stat - test.AssertMetricWithLabelsEquals(t, si.metrics.rpcLag, prometheus.Labels{}, 1) + // Attempt a gRPC request which should succeed + _, err = client.Chill(ctx, &test_proto.Time{Duration: durationpb.New(100 * time.Millisecond)}) + test.AssertNotError(t, err, "should succeed with no skew") + + // Skew the client clock forward and the request should fail due to skew + clientClk.Add(time.Hour) + _, err = client.Chill(ctx, &test_proto.Time{Duration: durationpb.New(100 * time.Millisecond)}) + test.AssertError(t, err, "should fail with positive client skew") + test.AssertContains(t, err.Error(), "very different time") + + // Skew the server clock forward and the request should fail due to skew + serverClk.Add(2 * time.Hour) + _, err = client.Chill(ctx, &test_proto.Time{Duration: durationpb.New(100 * time.Millisecond)}) + test.AssertError(t, err, "should fail with negative client skew") + test.AssertContains(t, err.Error(), "very different time") } // blockedServer implements a ChillerServer with a Chill method that: -// a) Calls Done() on the received waitgroup when receiving an RPC -// b) Blocks the RPC on the roadblock waitgroup +// 1. Calls Done() on the received waitgroup when receiving an RPC +// 2. Blocks the RPC on the roadblock waitgroup +// // This is used by TestInFlightRPCStat to test that the gauge for in-flight RPCs // is incremented and decremented as expected. type blockedServer struct { @@ -270,22 +287,19 @@ func (s *blockedServer) Chill(_ context.Context, _ *test_proto.Time) (*test_prot // Wait for the roadblock to be cleared s.roadblock.Wait() // Return a dummy spent value to adhere to the chiller protocol - return &test_proto.Time{Time: int64(1)}, nil + return &test_proto.Time{Duration: durationpb.New(time.Millisecond)}, nil } func TestInFlightRPCStat(t *testing.T) { - clk := clock.NewFake() - // Listen for TCP requests on a random system assigned port number - lis, err := net.Listen("tcp", ":0") - if err != nil { - log.Fatalf("failed to listen: %v", err) - } - // Retrieve the concrete port numberthe system assigned our listener - port := lis.Addr().(*net.TCPAddr).Port - // Create a new blockedServer to act as a ChillerServer server := &blockedServer{} + metrics, err := newClientMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating client metrics") + + client, _, stop := setup(t, server, metrics) + defer stop() + // Increment the roadblock waitgroup - this will cause all chill RPCs to // the server to block until we call Done()! server.roadblock.Add(1) @@ -296,40 +310,11 @@ func TestInFlightRPCStat(t *testing.T) { numRPCs := 5 server.received.Add(numRPCs) - serverMetrics := NewServerMetrics(metrics.NoopRegisterer) - si := newServerInterceptor(serverMetrics, clk) - s := grpc.NewServer(grpc.UnaryInterceptor(si.intercept)) - test_proto.RegisterChillerServer(s, server) - // Chill until ill - go func() { - start := time.Now() - err := s.Serve(lis) - if err != nil && !strings.HasSuffix(err.Error(), "use of closed network connection") { - t.Logf("s.Serve: %v after %s", err, time.Since(start)) - } - }() - defer s.Stop() - - // Dial the ChillerServer - ci := &clientInterceptor{ - timeout: 30 * time.Second, - metrics: NewClientMetrics(metrics.NoopRegisterer), - clk: clk, - } - conn, err := grpc.Dial(net.JoinHostPort("localhost", strconv.Itoa(port)), - grpc.WithInsecure(), - grpc.WithUnaryInterceptor(ci.intercept)) - if err != nil { - t.Fatalf("did not connect: %v", err) - } - // Create a ChillerClient with the connection to the ChillerServer - c := test_proto.NewChillerClient(conn) - // Fire off a few RPCs. They will block on the blockedServer's roadblock wg - for i := 0; i < numRPCs; i++ { + for range numRPCs { go func() { // Ignore errors, just chilllll. - _, _ = c.Chill(context.Background(), &test_proto.Time{}) + _, _ = client.Chill(context.Background(), &test_proto.Time{}) }() } @@ -344,7 +329,7 @@ func TestInFlightRPCStat(t *testing.T) { } // We expect the inFlightRPCs gauge for the Chiller.Chill RPCs to be equal to numRPCs. - test.AssertMetricWithLabelsEquals(t, ci.metrics.inFlightRPCs, labels, float64(numRPCs)) + test.AssertMetricWithLabelsEquals(t, metrics.inFlightRPCs, labels, float64(numRPCs)) // Unblock the blockedServer to let all of the Chiller.Chill RPCs complete server.roadblock.Done() @@ -352,27 +337,161 @@ func TestInFlightRPCStat(t *testing.T) { time.Sleep(1 * time.Second) // Check the gauge value again - test.AssertMetricWithLabelsEquals(t, ci.metrics.inFlightRPCs, labels, 0) + test.AssertMetricWithLabelsEquals(t, metrics.inFlightRPCs, labels, 0) +} + +func TestServiceAuthChecker(t *testing.T) { + ac := authInterceptor{ + map[string]map[string]struct{}{ + "package.ServiceName": { + "allowed.client": {}, + "also.allowed": {}, + }, + }, + } + + // No allowlist is a bad configuration. + ctx := context.Background() + err := ac.checkContextAuth(ctx, "/package.OtherService/Method/") + test.AssertError(t, err, "checking empty allowlist") + + // Context with no peering information is disallowed. + err = ac.checkContextAuth(ctx, "/package.ServiceName/Method/") + test.AssertError(t, err, "checking un-peered context") + + // Context with no auth info is disallowed. + ctx = peer.NewContext(ctx, &peer.Peer{}) + err = ac.checkContextAuth(ctx, "/package.ServiceName/Method/") + test.AssertError(t, err, "checking peer with no auth") + + // Context with no verified chains is disallowed. + ctx = peer.NewContext(ctx, &peer.Peer{ + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{}, + }, + }) + err = ac.checkContextAuth(ctx, "/package.ServiceName/Method/") + test.AssertError(t, err, "checking TLS with no valid chains") + + // Context with cert with wrong name is disallowed. + ctx = peer.NewContext(ctx, &peer.Peer{ + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{ + VerifiedChains: [][]*x509.Certificate{ + { + &x509.Certificate{ + DNSNames: []string{ + "disallowed.client", + }, + }, + }, + }, + }, + }, + }) + err = ac.checkContextAuth(ctx, "/package.ServiceName/Method/") + test.AssertError(t, err, "checking disallowed cert") + + // Context with cert with good name is allowed. + ctx = peer.NewContext(ctx, &peer.Peer{ + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{ + VerifiedChains: [][]*x509.Certificate{ + { + &x509.Certificate{ + DNSNames: []string{ + "disallowed.client", + "also.allowed", + }, + }, + }, + }, + }, + }, + }) + err = ac.checkContextAuth(ctx, "/package.ServiceName/Method/") + test.AssertNotError(t, err, "checking allowed cert") +} + +// testUserAgentServer stores the last value it saw in the user agent field of its context. +type testUserAgentServer struct { + test_proto.UnimplementedChillerServer + + lastSeenUA string } -func TestNoCancelInterceptor(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - ctx, cancel2 := context.WithDeadline(ctx, time.Now().Add(time.Second)) - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - select { - case <-ctx.Done(): - return nil, errors.New("oh no canceled") - case <-time.After(50 * time.Millisecond): +// Chill implements ChillerServer.Chill +func (s *testUserAgentServer) Chill(ctx context.Context, in *test_proto.Time) (*test_proto.Time, error) { + s.lastSeenUA = web.UserAgent(ctx) + return nil, nil +} + +func TestUserAgentMetadata(t *testing.T) { + server := new(testUserAgentServer) + client, _, stop := setup(t, server) + defer stop() + + testUA := "test UA" + ctx := web.WithUserAgent(context.Background(), testUA) + + _, err := client.Chill(ctx, &test_proto.Time{}) + if err != nil { + t.Fatalf("calling c.Chill: %s", err) + } + + if server.lastSeenUA != testUA { + t.Errorf("last seen User-Agent on server side was %q, want %q", server.lastSeenUA, testUA) + } +} + +// setup creates a server and client, returning the created client, the running server's port, and a stop function. +func setup(t *testing.T, server test_proto.ChillerServer, opts ...any) (test_proto.ChillerClient, int, func()) { + clk := clock.NewFake() + serverMetricsVal, err := newServerMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating server metrics") + clientMetricsVal, err := newClientMetrics(metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating client metrics") + + for _, opt := range opts { + switch optTyped := opt.(type) { + case clock.FakeClock: + clk = optTyped + case clientMetrics: + clientMetricsVal = optTyped + case serverMetrics: + serverMetricsVal = optTyped + default: + t.Fatalf("setup called with unrecognize option %#v", t) } - return nil, nil } + lis, err := net.Listen("tcp", ":0") + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + port := lis.Addr().(*net.TCPAddr).Port + + si := newServerMetadataInterceptor(serverMetricsVal, clk) + s := grpc.NewServer(grpc.UnaryInterceptor(si.Unary)) + test_proto.RegisterChillerServer(s, server) + go func() { - time.Sleep(10 * time.Millisecond) - cancel() - cancel2() + start := time.Now() + err := s.Serve(lis) + if err != nil && !strings.HasSuffix(err.Error(), "use of closed network connection") { + t.Logf("s.Serve: %v after %s", err, time.Since(start)) + } }() - _, err := NoCancelInterceptor(ctx, nil, nil, handler) + + ci := &clientMetadataInterceptor{ + timeout: 30 * time.Second, + metrics: clientMetricsVal, + clk: clock.NewFake(), + } + conn, err := grpc.NewClient(net.JoinHostPort("localhost", strconv.Itoa(port)), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithUnaryInterceptor(ci.Unary)) if err != nil { - t.Error(err) + t.Fatalf("did not connect: %v", err) } + return test_proto.NewChillerClient(conn), port, s.Stop } diff --git a/grpc/internal/backoff/backoff.go b/grpc/internal/backoff/backoff.go new file mode 100644 index 00000000000..e8baaf4d777 --- /dev/null +++ b/grpc/internal/backoff/backoff.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + "github.com/letsencrypt/boulder/grpc/internal/grpcrand" + grpcbackoff "google.golang.org/grpc/backoff" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return bc.Config.BaseDelay + } + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.Config.Multiplier + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/grpc/internal/grpcrand/grpcrand.go b/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 00000000000..f4df372935e --- /dev/null +++ b/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand/v2" + "sync" +) + +var ( + r = rand.New(rand.NewPCG(rand.Uint64(), rand.Uint64())) + mu sync.Mutex +) + +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + defer mu.Unlock() + return r.Int64N(n) +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + defer mu.Unlock() + return r.IntN(n) +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + defer mu.Unlock() + return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() +} diff --git a/grpc/internal/leakcheck/leakcheck.go b/grpc/internal/leakcheck/leakcheck.go new file mode 100644 index 00000000000..07a0e17d2de --- /dev/null +++ b/grpc/internal/leakcheck/leakcheck.go @@ -0,0 +1,124 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package leakcheck contains functions to check leaked goroutines. +// +// Call "defer leakcheck.Check(t)" at the beginning of tests. +package leakcheck + +import ( + "runtime" + "sort" + "strings" + "time" +) + +var goroutinesToIgnore = []string{ + "testing.Main(", + "testing.tRunner(", + "testing.(*M).", + "runtime.goexit", + "created by runtime.gc", + "created by runtime/trace.Start", + "interestingGoroutines", + "runtime.MHeap_Scavenger", + "signal.signal_recv", + "sigterm.handler", + "runtime_mcall", + "(*loggingT).flushDaemon", + "goroutine in C code", + // Ignore the http read/write goroutines. gce metadata.OnGCE() was leaking + // these, root cause unknown. + // + // https://github.com/grpc/grpc-go/issues/5171 + // https://github.com/grpc/grpc-go/issues/5173 + "created by net/http.(*Transport).dialConn", +} + +// RegisterIgnoreGoroutine appends s into the ignore goroutine list. The +// goroutines whose stack trace contains s will not be identified as leaked +// goroutines. Not thread-safe, only call this function in init(). +func RegisterIgnoreGoroutine(s string) { + goroutinesToIgnore = append(goroutinesToIgnore, s) +} + +func ignore(g string) bool { + sl := strings.SplitN(g, "\n", 2) + if len(sl) != 2 { + return true + } + stack := strings.TrimSpace(sl[1]) + if strings.HasPrefix(stack, "testing.RunTests") { + return true + } + + if stack == "" { + return true + } + + for _, s := range goroutinesToIgnore { + if strings.Contains(stack, s) { + return true + } + } + + return false +} + +// interestingGoroutines returns all goroutines we care about for the purpose of +// leak checking. It excludes testing or runtime ones. +func interestingGoroutines() (gs []string) { + buf := make([]byte, 2<<20) + buf = buf[:runtime.Stack(buf, true)] + for g := range strings.SplitSeq(string(buf), "\n\n") { + if !ignore(g) { + gs = append(gs, g) + } + } + sort.Strings(gs) + return +} + +// Errorfer is the interface that wraps the Errorf method. It's a subset of +// testing.TB to make it easy to use Check. +type Errorfer interface { + Errorf(format string, args ...any) +} + +func check(efer Errorfer, timeout time.Duration) { + // Loop, waiting for goroutines to shut down. + // Wait up to timeout, but finish as quickly as possible. + deadline := time.Now().Add(timeout) + var leaked []string + for time.Now().Before(deadline) { + if leaked = interestingGoroutines(); len(leaked) == 0 { + return + } + time.Sleep(50 * time.Millisecond) + } + for _, g := range leaked { + efer.Errorf("Leaked goroutine: %v", g) + } +} + +// Check looks at the currently-running goroutines and checks if there are any +// interesting (created by gRPC) goroutines leaked. It waits up to 10 seconds +// in the error cases. +func Check(efer Errorfer) { + check(efer, 10*time.Second) +} diff --git a/grpc/internal/leakcheck/leakcheck_test.go b/grpc/internal/leakcheck/leakcheck_test.go new file mode 100644 index 00000000000..21dbd5df035 --- /dev/null +++ b/grpc/internal/leakcheck/leakcheck_test.go @@ -0,0 +1,76 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package leakcheck + +import ( + "fmt" + "strings" + "testing" + "time" +) + +type testErrorfer struct { + errorCount int + errors []string +} + +func (e *testErrorfer) Errorf(format string, args ...any) { + e.errors = append(e.errors, fmt.Sprintf(format, args...)) + e.errorCount++ +} + +func TestCheck(t *testing.T) { + const leakCount = 3 + for range leakCount { + go func() { time.Sleep(2 * time.Second) }() + } + if ig := interestingGoroutines(); len(ig) == 0 { + t.Error("blah") + } + e := &testErrorfer{} + check(e, time.Second) + if e.errorCount != leakCount { + t.Errorf("check found %v leaks, want %v leaks", e.errorCount, leakCount) + t.Logf("leaked goroutines:\n%v", strings.Join(e.errors, "\n")) + } + check(t, 3*time.Second) +} + +func ignoredTestingLeak(d time.Duration) { + time.Sleep(d) +} + +func TestCheckRegisterIgnore(t *testing.T) { + RegisterIgnoreGoroutine("ignoredTestingLeak") + const leakCount = 3 + for range leakCount { + go func() { time.Sleep(2 * time.Second) }() + } + go func() { ignoredTestingLeak(3 * time.Second) }() + if ig := interestingGoroutines(); len(ig) == 0 { + t.Error("blah") + } + e := &testErrorfer{} + check(e, time.Second) + if e.errorCount != leakCount { + t.Errorf("check found %v leaks, want %v leaks", e.errorCount, leakCount) + t.Logf("leaked goroutines:\n%v", strings.Join(e.errors, "\n")) + } + check(t, 3*time.Second) +} diff --git a/grpc/internal/resolver/dns/dns_resolver.go b/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 00000000000..a25bee078df --- /dev/null +++ b/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,318 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Forked from the default internal DNS resolver in the grpc-go package. The +// original source can be found at: +// https://github.com/grpc/grpc-go/blob/v1.49.0/internal/resolver/dns/dns_resolver.go + +package dns + +import ( + "context" + "errors" + "fmt" + "net" + "net/netip" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + + "github.com/letsencrypt/boulder/bdns" + "github.com/letsencrypt/boulder/grpc/internal/backoff" + "github.com/letsencrypt/boulder/grpc/noncebalancer" +) + +var logger = grpclog.Component("srv") + +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( + newTimer = time.NewTimer + newTimerDNSResRate = time.NewTimer +) + +func init() { + resolver.Register(NewDefaultSRVBuilder()) + resolver.Register(NewNonceSRVBuilder()) +} + +const defaultDNSSvrPort = "53" + +var defaultResolver netResolver = net.DefaultResolver + +var ( + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second +) + +var customAuthorityDialer = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, authority) + } +} + +var customAuthorityResolver = func(authority string) (*net.Resolver, error) { + host, port, err := bdns.ParseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + return &net.Resolver{ + PreferGo: true, + Dial: customAuthorityDialer(net.JoinHostPort(host, port)), + }, nil +} + +// NewDefaultSRVBuilder creates a srvBuilder which is used to factory SRV DNS +// resolvers. +func NewDefaultSRVBuilder() resolver.Builder { + return &srvBuilder{scheme: "srv"} +} + +// NewNonceSRVBuilder creates a srvBuilder which is used to factory SRV DNS +// resolvers with a custom grpc.Balancer used by nonce-service clients. +func NewNonceSRVBuilder() resolver.Builder { + return &srvBuilder{scheme: noncebalancer.SRVResolverScheme, balancer: noncebalancer.Name} +} + +type srvBuilder struct { + scheme string + balancer string +} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *srvBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + var names []name + for i := range strings.SplitSeq(target.Endpoint(), ",") { + service, domain, err := parseServiceDomain(i) + if err != nil { + return nil, err + } + names = append(names, name{service: service, domain: domain}) + } + + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + names: names, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + } + + if target.URL.Host == "" { + d.resolver = defaultResolver + } else { + var err error + d.resolver, err = customAuthorityResolver(target.URL.Host) + if err != nil { + return nil, err + } + } + + if b.balancer != "" { + d.serviceConfig = cc.ParseServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, b.balancer)) + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder. +func (b *srvBuilder) Scheme() string { + return b.scheme +} + +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) +} + +type name struct { + service string + domain string +} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + names []name + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + serviceConfig *serviceconfig.ParseResult +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + backoffIndex := 1 + for { + state, err := d.lookup() + if err != nil { + // Report error to the underlying grpc.ClientConn. + d.cc.ReportError(err) + } else { + if d.serviceConfig != nil { + state.ServiceConfig = d.serviceConfig + } + err = d.cc.UpdateState(*state) + } + + var timer *time.Timer + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least + // to prevent constantly re-resolving. + backoffIndex = 1 + timer = newTimerDNSResRate(minDNSResRate) + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-timer.C: + } + } +} + +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + var newAddrs []resolver.Address + var errs []error + for _, n := range d.names { + _, srvs, err := d.resolver.LookupSRV(d.ctx, n.service, "tcp", n.domain) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + if err != nil { + errs = append(errs, err) + continue + } + } + for _, s := range srvs { + backendAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err != nil { + errs = append(errs, err) + continue + } + } + for _, a := range backendAddrs { + ip, ok := formatIP(a) + if !ok { + errs = append(errs, fmt.Errorf("srv: error parsing A record IP address %v", a)) + continue + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) + } + } + } + // Only return an error if all lookups failed. + if len(errs) > 0 && len(newAddrs) == 0 { + return nil, errors.Join(errs...) + } + return newAddrs, nil +} + +func handleDNSError(err error, lookupType string) error { + if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + if err != nil { + err = fmt.Errorf("srv: %v record lookup error: %v", lookupType, err) + logger.Info(err) + } + return err +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + addrs, err := d.lookupSRV() + if err != nil { + return nil, err + } + return &resolver.State{Addresses: addrs}, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip, err := netip.ParseAddr(addr) + if err != nil { + return "", false + } + if ip.Is4() { + return addr, true + } + return "[" + addr + "]", true +} + +// parseServiceDomain takes the user input target string and parses the service domain +// names for SRV lookup. Input is expected to be a hostname containing at least +// two labels (e.g. "foo.bar", "foo.bar.baz"). The first label is the service +// name and the rest is the domain name. If the target is not in the expected +// format, an error is returned. +func parseServiceDomain(target string) (string, string, error) { + sd := strings.SplitN(target, ".", 2) + if len(sd) < 2 || sd[0] == "" || sd[1] == "" { + return "", "", fmt.Errorf("srv: hostname %q contains < 2 labels", target) + } + return sd[0], sd[1], nil +} diff --git a/grpc/internal/resolver/dns/dns_resolver_test.go b/grpc/internal/resolver/dns/dns_resolver_test.go new file mode 100644 index 00000000000..51cb2ddffac --- /dev/null +++ b/grpc/internal/resolver/dns/dns_resolver_test.go @@ -0,0 +1,839 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "context" + "errors" + "fmt" + "net" + "os" + "slices" + "strings" + "sync" + "testing" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" + + "github.com/letsencrypt/boulder/grpc/internal/leakcheck" + "github.com/letsencrypt/boulder/grpc/internal/testutils" + "github.com/letsencrypt/boulder/test" +) + +func TestMain(m *testing.M) { + // Set a non-zero duration only for tests which are actually testing that + // feature. + replaceDNSResRate(time.Duration(0)) // No need to clean up since we os.Exit + overrideDefaultResolver(false) // No need to clean up since we os.Exit + code := m.Run() + os.Exit(code) +} + +const ( + defaultTestTimeout = 10 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + +type testClientConn struct { + resolver.ClientConn // For unimplemented functions + target string + m1 sync.Mutex + state resolver.State + updateStateCalls int + errChan chan error + updateStateErr error +} + +func (t *testClientConn) UpdateState(s resolver.State) error { + t.m1.Lock() + defer t.m1.Unlock() + t.state = s + t.updateStateCalls++ + // This error determines whether DNS Resolver actually decides to exponentially backoff or not. + // This can be any error. + return t.updateStateErr +} + +func (t *testClientConn) getState() (resolver.State, int) { + t.m1.Lock() + defer t.m1.Unlock() + return t.state, t.updateStateCalls +} + +func (t *testClientConn) ReportError(err error) { + t.errChan <- err +} + +type testResolver struct { + // A write to this channel is made when this resolver receives a resolution + // request. Tests can rely on reading from this channel to be notified about + // resolution requests instead of sleeping for a predefined period of time. + lookupHostCh *testutils.Channel +} + +func (tr *testResolver) LookupHost(ctx context.Context, host string) ([]string, error) { + if tr.lookupHostCh != nil { + tr.lookupHostCh.Send(nil) + } + return hostLookup(host) +} + +func (*testResolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return srvLookup(service, proto, name) +} + +// overrideDefaultResolver overrides the defaultResolver used by the code with +// an instance of the testResolver. pushOnLookup controls whether the +// testResolver created here pushes lookupHost events on its channel. +func overrideDefaultResolver(pushOnLookup bool) func() { + oldResolver := defaultResolver + + var lookupHostCh *testutils.Channel + if pushOnLookup { + lookupHostCh = testutils.NewChannel() + } + defaultResolver = &testResolver{lookupHostCh: lookupHostCh} + + return func() { + defaultResolver = oldResolver + } +} + +func replaceDNSResRate(d time.Duration) func() { + oldMinDNSResRate := minDNSResRate + minDNSResRate = d + + return func() { + minDNSResRate = oldMinDNSResRate + } +} + +var hostLookupTbl = struct { + sync.Mutex + tbl map[string][]string +}{ + tbl: map[string][]string{ + "ipv4.single.fake": {"2.4.6.8"}, + "ipv4.multi.fake": {"1.2.3.4", "5.6.7.8", "9.10.11.12"}, + "ipv6.single.fake": {"2607:f8b0:400a:801::1001"}, + "ipv6.multi.fake": {"2607:f8b0:400a:801::1001", "2607:f8b0:400a:801::1002", "2607:f8b0:400a:801::1003"}, + }, +} + +func hostLookup(host string) ([]string, error) { + hostLookupTbl.Lock() + defer hostLookupTbl.Unlock() + if addrs, ok := hostLookupTbl.tbl[host]; ok { + return addrs, nil + } + return nil, &net.DNSError{ + Err: "hostLookup error", + Name: host, + Server: "fake", + IsTemporary: true, + } +} + +var srvLookupTbl = struct { + sync.Mutex + tbl map[string][]*net.SRV +}{ + tbl: map[string][]*net.SRV{ + "_foo._tcp.ipv4.single.fake": {&net.SRV{Target: "ipv4.single.fake", Port: 1234}}, + "_foo._tcp.ipv4.multi.fake": {&net.SRV{Target: "ipv4.multi.fake", Port: 1234}}, + "_foo._tcp.ipv6.single.fake": {&net.SRV{Target: "ipv6.single.fake", Port: 1234}}, + "_foo._tcp.ipv6.multi.fake": {&net.SRV{Target: "ipv6.multi.fake", Port: 1234}}, + }, +} + +func srvLookup(service, proto, name string) (string, []*net.SRV, error) { + cname := "_" + service + "._" + proto + "." + name + srvLookupTbl.Lock() + defer srvLookupTbl.Unlock() + if srvs, cnt := srvLookupTbl.tbl[cname]; cnt { + return cname, srvs, nil + } + return "", nil, &net.DNSError{ + Err: "srvLookup error", + Name: cname, + Server: "fake", + IsTemporary: true, + } +} + +func TestResolve(t *testing.T) { + testDNSResolver(t) + testDNSResolveNow(t) +} + +func testDNSResolver(t *testing.T) { + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(_ time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } + tests := []struct { + target string + addrWant []resolver.Address + }{ + { + "foo.ipv4.single.fake", + []resolver.Address{{Addr: "2.4.6.8:1234", ServerName: "ipv4.single.fake"}}, + }, + { + "foo.ipv4.multi.fake", + []resolver.Address{ + {Addr: "1.2.3.4:1234", ServerName: "ipv4.multi.fake"}, + {Addr: "5.6.7.8:1234", ServerName: "ipv4.multi.fake"}, + {Addr: "9.10.11.12:1234", ServerName: "ipv4.multi.fake"}, + }, + }, + { + "foo.ipv6.single.fake", + []resolver.Address{{Addr: "[2607:f8b0:400a:801::1001]:1234", ServerName: "ipv6.single.fake"}}, + }, + { + "foo.ipv6.multi.fake", + []resolver.Address{ + {Addr: "[2607:f8b0:400a:801::1001]:1234", ServerName: "ipv6.multi.fake"}, + {Addr: "[2607:f8b0:400a:801::1002]:1234", ServerName: "ipv6.multi.fake"}, + {Addr: "[2607:f8b0:400a:801::1003]:1234", ServerName: "ipv6.multi.fake"}, + }, + }, + } + + for _, a := range tests { + b := NewDefaultSRVBuilder() + cc := &testClientConn{target: a.target} + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", a.target))}, cc, resolver.BuildOptions{}) + if err != nil { + t.Fatalf("%v\n", err) + } + var state resolver.State + var cnt int + for range 2000 { + state, cnt = cc.getState() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + if cnt == 0 { + t.Fatalf("UpdateState not called after 2s; aborting") + } + + if !slices.Equal(a.addrWant, state.Addresses) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v", a.target, state.Addresses, a.addrWant) + } + r.Close() + } +} + +// DNS Resolver immediately starts polling on an error from grpc. This should continue until the ClientConn doesn't +// send back an error from updating the DNS Resolver's state. +func TestDNSResolverExponentialBackoff(t *testing.T) { + defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + timerChan := testutils.NewChannel() + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, allows this test to call timer immediately. + t := time.NewTimer(time.Hour) + timerChan.Send(t) + return t + } + target := "foo.ipv4.single.fake" + wantAddr := []resolver.Address{{Addr: "2.4.6.8:1234", ServerName: "ipv4.single.fake"}} + + b := NewDefaultSRVBuilder() + cc := &testClientConn{target: target} + // Cause ClientConn to return an error. + cc.updateStateErr = balancer.ErrBadResolverState + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{}) + if err != nil { + t.Fatalf("Error building resolver for target %v: %v", target, err) + } + defer r.Close() + var state resolver.State + var cnt int + for range 2000 { + state, cnt = cc.getState() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + if cnt == 0 { + t.Fatalf("UpdateState not called after 2s; aborting") + } + if !slices.Equal(wantAddr, state.Addresses) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v", target, state.Addresses, target) + } + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + // Cause timer to go off 10 times, and see if it calls updateState() correctly. + for range 10 { + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) + } + // Poll to see if DNS Resolver updated state the correct number of times, which allows time for the DNS Resolver to call + // ClientConn update state. + deadline := time.Now().Add(defaultTestTimeout) + for { + cc.m1.Lock() + got := cc.updateStateCalls + cc.m1.Unlock() + if got == 11 { + break + } + + if time.Now().After(deadline) { + t.Fatalf("Exponential backoff is not working as expected - should update state 11 times instead of %d", got) + } + + time.Sleep(time.Millisecond) + } + + // Update resolver.ClientConn to not return an error anymore - this should stop it from backing off. + cc.updateStateErr = nil + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) + // Poll to see if DNS Resolver updated state the correct number of times, which allows time for the DNS Resolver to call + // ClientConn update state the final time. The DNS Resolver should then stop polling. + deadline = time.Now().Add(defaultTestTimeout) + for { + cc.m1.Lock() + got := cc.updateStateCalls + cc.m1.Unlock() + if got == 12 { + break + } + + if time.Now().After(deadline) { + t.Fatalf("Exponential backoff is not working as expected - should stop backing off at 12 total UpdateState calls instead of %d", got) + } + + _, err := timerChan.ReceiveOrFail() + if err { + t.Fatalf("Should not poll again after Client Conn stops returning error.") + } + + time.Sleep(time.Millisecond) + } +} + +func mutateTbl(target string) func() { + hostLookupTbl.Lock() + oldHostTblEntry := hostLookupTbl.tbl[target] + + // Remove the last address from the target's entry. + hostLookupTbl.tbl[target] = hostLookupTbl.tbl[target][:len(oldHostTblEntry)-1] + hostLookupTbl.Unlock() + + return func() { + hostLookupTbl.Lock() + hostLookupTbl.tbl[target] = oldHostTblEntry + hostLookupTbl.Unlock() + } +} + +func testDNSResolveNow(t *testing.T) { + defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(_ time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } + tests := []struct { + target string + addrWant []resolver.Address + addrNext []resolver.Address + }{ + { + "foo.ipv4.multi.fake", + []resolver.Address{ + {Addr: "1.2.3.4:1234", ServerName: "ipv4.multi.fake"}, + {Addr: "5.6.7.8:1234", ServerName: "ipv4.multi.fake"}, + {Addr: "9.10.11.12:1234", ServerName: "ipv4.multi.fake"}, + }, + []resolver.Address{ + {Addr: "1.2.3.4:1234", ServerName: "ipv4.multi.fake"}, + {Addr: "5.6.7.8:1234", ServerName: "ipv4.multi.fake"}, + }, + }, + } + + for _, a := range tests { + b := NewDefaultSRVBuilder() + cc := &testClientConn{target: a.target} + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", a.target))}, cc, resolver.BuildOptions{}) + if err != nil { + t.Fatalf("%v\n", err) + } + defer r.Close() + var state resolver.State + var cnt int + for range 2000 { + state, cnt = cc.getState() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + if cnt == 0 { + t.Fatalf("UpdateState not called after 2s; aborting. state=%v", state) + } + if !slices.Equal(a.addrWant, state.Addresses) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v", a.target, state.Addresses, a.addrWant) + } + + revertTbl := mutateTbl(strings.TrimPrefix(a.target, "foo.")) + r.ResolveNow(resolver.ResolveNowOptions{}) + for range 2000 { + state, cnt = cc.getState() + if cnt == 2 { + break + } + time.Sleep(time.Millisecond) + } + if cnt != 2 { + t.Fatalf("UpdateState not called after 2s; aborting. state=%v", state) + } + if !slices.Equal(a.addrNext, state.Addresses) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v", a.target, state.Addresses, a.addrNext) + } + revertTbl() + } +} + +func TestDNSResolverRetry(t *testing.T) { + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } + b := NewDefaultSRVBuilder() + target := "foo.ipv4.single.fake" + cc := &testClientConn{target: target} + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{}) + if err != nil { + t.Fatalf("%v\n", err) + } + defer r.Close() + var state resolver.State + for range 2000 { + state, _ = cc.getState() + if len(state.Addresses) == 1 { + break + } + time.Sleep(time.Millisecond) + } + if len(state.Addresses) != 1 { + t.Fatalf("UpdateState not called with 1 address after 2s; aborting. state=%v", state) + } + want := []resolver.Address{{Addr: "2.4.6.8:1234", ServerName: "ipv4.single.fake"}} + if !slices.Equal(want, state.Addresses) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v", target, state.Addresses, want) + } + // mutate the host lookup table so the target has 0 address returned. + revertTbl := mutateTbl(strings.TrimPrefix(target, "foo.")) + // trigger a resolve that will get empty address list + r.ResolveNow(resolver.ResolveNowOptions{}) + for range 2000 { + state, _ = cc.getState() + if len(state.Addresses) == 0 { + break + } + time.Sleep(time.Millisecond) + } + if len(state.Addresses) != 0 { + t.Fatalf("UpdateState not called with 0 address after 2s; aborting. state=%v", state) + } + revertTbl() + // wait for the retry to happen in two seconds. + r.ResolveNow(resolver.ResolveNowOptions{}) + for range 2000 { + state, _ = cc.getState() + if len(state.Addresses) == 1 { + break + } + time.Sleep(time.Millisecond) + } + if !slices.Equal(want, state.Addresses) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v", target, state.Addresses, want) + } +} + +func TestCustomAuthority(t *testing.T) { + defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } + + tests := []struct { + authority string + authorityWant string + expectError bool + }{ + { + "4.3.2.1:" + defaultDNSSvrPort, + "4.3.2.1:" + defaultDNSSvrPort, + false, + }, + { + "4.3.2.1:123", + "4.3.2.1:123", + false, + }, + { + "4.3.2.1", + "4.3.2.1:" + defaultDNSSvrPort, + false, + }, + { + "::1", + "[::1]:" + defaultDNSSvrPort, + false, + }, + { + "[::1]", + "[::1]:" + defaultDNSSvrPort, + false, + }, + { + "[::1]:123", + "[::1]:123", + false, + }, + { + "dnsserver.com", + "dnsserver.com:" + defaultDNSSvrPort, + false, + }, + { + ":123", + "localhost:123", + false, + }, + { + ":", + "", + true, + }, + { + "[::1]:", + "", + true, + }, + { + "dnsserver.com:", + "", + true, + }, + } + oldcustomAuthorityDialer := customAuthorityDialer + defer func() { + customAuthorityDialer = oldcustomAuthorityDialer + }() + + for _, a := range tests { + errChan := make(chan error, 1) + customAuthorityDialer = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + if authority != a.authorityWant { + errChan <- fmt.Errorf("wrong custom authority passed to resolver. input: %s expected: %s actual: %s", a.authority, a.authorityWant, authority) + } else { + errChan <- nil + } + return func(ctx context.Context, network, address string) (net.Conn, error) { + return nil, errors.New("no need to dial") + } + } + + mockEndpointTarget := "foo.bar.com" + b := NewDefaultSRVBuilder() + cc := &testClientConn{target: mockEndpointTarget, errChan: make(chan error, 1)} + target := resolver.Target{ + URL: *testutils.MustParseURL(fmt.Sprintf("scheme://%s/%s", a.authority, mockEndpointTarget)), + } + r, err := b.Build(target, cc, resolver.BuildOptions{}) + + if err == nil { + r.Close() + + err = <-errChan + if err != nil { + t.Error(err.Error()) + } + + if a.expectError { + t.Errorf("custom authority should have caused an error: %s", a.authority) + } + } else if !a.expectError { + t.Errorf("unexpected error using custom authority %s: %s", a.authority, err) + } + } +} + +// TestRateLimitedResolve exercises the rate limit enforced on re-resolution +// requests. It sets the re-resolution rate to a small value and repeatedly +// calls ResolveNow() and ensures only the expected number of resolution +// requests are made. +func TestRateLimitedResolve(t *testing.T) { + defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential + // backoff. + return time.NewTimer(time.Hour) + } + defer func(nt func(d time.Duration) *time.Timer) { + newTimerDNSResRate = nt + }(newTimerDNSResRate) + + timerChan := testutils.NewChannel() + newTimerDNSResRate = func(d time.Duration) *time.Timer { + // Will never fire on its own, allows this test to call timer + // immediately. + t := time.NewTimer(time.Hour) + timerChan.Send(t) + return t + } + + // Create a new testResolver{} for this test because we want the exact count + // of the number of times the resolver was invoked. + nc := overrideDefaultResolver(true) + defer nc() + + target := "foo.ipv4.single.fake" + b := NewDefaultSRVBuilder() + cc := &testClientConn{target: target} + + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{}) + if err != nil { + t.Fatalf("resolver.Build() returned error: %v\n", err) + } + defer r.Close() + + dnsR, ok := r.(*dnsResolver) + if !ok { + t.Fatalf("resolver.Build() returned unexpected type: %T\n", dnsR) + } + + tr, ok := dnsR.resolver.(*testResolver) + if !ok { + t.Fatalf("delegate resolver returned unexpected type: %T\n", tr) + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Wait for the first resolution request to be done. This happens as part + // of the first iteration of the for loop in watcher(). + if _, err := tr.lookupHostCh.Receive(ctx); err != nil { + t.Fatalf("Timed out waiting for lookup() call.") + } + + // Call Resolve Now 100 times, shouldn't continue onto next iteration of + // watcher, thus shouldn't lookup again. + for range 100 { + r.ResolveNow(resolver.ResolveNowOptions{}) + } + + continueCtx, continueCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer continueCancel() + + if _, err := tr.lookupHostCh.Receive(continueCtx); err == nil { + t.Fatalf("Should not have looked up again as DNS Min Res Rate timer has not gone off.") + } + + // Make the DNSMinResRate timer fire immediately (by receiving it, then + // resetting to 0), this will unblock the resolver which is currently + // blocked on the DNS Min Res Rate timer going off, which will allow it to + // continue to the next iteration of the watcher loop. + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) + + // Now that DNS Min Res Rate timer has gone off, it should lookup again. + if _, err := tr.lookupHostCh.Receive(ctx); err != nil { + t.Fatalf("Timed out waiting for lookup() call.") + } + + // Resolve Now 1000 more times, shouldn't lookup again as DNS Min Res Rate + // timer has not gone off. + for range 1000 { + r.ResolveNow(resolver.ResolveNowOptions{}) + } + + if _, err = tr.lookupHostCh.Receive(continueCtx); err == nil { + t.Fatalf("Should not have looked up again as DNS Min Res Rate timer has not gone off.") + } + + // Make the DNSMinResRate timer fire immediately again. + timer, err = timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer = timer.(*time.Timer) + timerPointer.Reset(0) + + // Now that DNS Min Res Rate timer has gone off, it should lookup again. + if _, err = tr.lookupHostCh.Receive(ctx); err != nil { + t.Fatalf("Timed out waiting for lookup() call.") + } + + wantAddrs := []resolver.Address{{Addr: "2.4.6.8:1234", ServerName: "ipv4.single.fake"}} + var state resolver.State + for { + var cnt int + state, cnt = cc.getState() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + if !slices.Equal(state.Addresses, wantAddrs) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v", target, state.Addresses, wantAddrs) + } +} + +// DNS Resolver immediately starts polling on an error. This will cause the re-resolution to return another error. +// Thus, test that it constantly sends errors to the grpc.ClientConn. +func TestReportError(t *testing.T) { + const target = "not.found" + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + timerChan := testutils.NewChannel() + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, allows this test to call timer immediately. + t := time.NewTimer(time.Hour) + timerChan.Send(t) + return t + } + cc := &testClientConn{target: target, errChan: make(chan error)} + totalTimesCalledError := 0 + b := NewDefaultSRVBuilder() + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{}) + if err != nil { + t.Fatalf("Error building resolver for target %v: %v", target, err) + } + // Should receive first error. + err = <-cc.errChan + if !strings.Contains(err.Error(), "srvLookup error") { + t.Fatalf(`ReportError(err=%v) called; want err contains "srvLookupError"`, err) + } + totalTimesCalledError++ + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) + defer r.Close() + + // Cause timer to go off 10 times, and see if it matches DNS Resolver updating Error. + for range 10 { + // Should call ReportError(). + err = <-cc.errChan + if !strings.Contains(err.Error(), "srvLookup error") { + t.Fatalf(`ReportError(err=%v) called; want err contains "srvLookupError"`, err) + } + totalTimesCalledError++ + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) + } + + if totalTimesCalledError != 11 { + t.Errorf("ReportError() not called 11 times, instead called %d times.", totalTimesCalledError) + } + // Clean up final watcher iteration. + <-cc.errChan + _, err = timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } +} + +func Test_parseServiceDomain(t *testing.T) { + tests := []struct { + target string + expectService string + expectDomain string + wantErr bool + }{ + // valid + {"foo.bar", "foo", "bar", false}, + {"foo.bar.baz", "foo", "bar.baz", false}, + {"foo.bar.baz.", "foo", "bar.baz.", false}, + + // invalid + {"", "", "", true}, + {".", "", "", true}, + {"foo", "", "", true}, + {".foo", "", "", true}, + {"foo.", "", "", true}, + {".foo.bar.baz", "", "", true}, + {".foo.bar.baz.", "", "", true}, + } + for _, tt := range tests { + t.Run(tt.target, func(t *testing.T) { + gotService, gotDomain, err := parseServiceDomain(tt.target) + if tt.wantErr { + test.AssertError(t, err, "expect err got nil") + } else { + test.AssertNotError(t, err, "expect nil err") + test.AssertEquals(t, gotService, tt.expectService) + test.AssertEquals(t, gotDomain, tt.expectDomain) + } + }) + } +} diff --git a/grpc/internal/testutils/channel.go b/grpc/internal/testutils/channel.go new file mode 100644 index 00000000000..991d05cdde7 --- /dev/null +++ b/grpc/internal/testutils/channel.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package testutils + +import ( + "context" +) + +// DefaultChanBufferSize is the default buffer size of the underlying channel. +const DefaultChanBufferSize = 1 + +// Channel wraps a generic channel and provides a timed receive operation. +type Channel struct { + ch chan any +} + +// Send sends value on the underlying channel. +func (c *Channel) Send(value any) { + c.ch <- value +} + +// SendContext sends value on the underlying channel, or returns an error if +// the context expires. +func (c *Channel) SendContext(ctx context.Context, value any) error { + select { + case c.ch <- value: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// SendOrFail attempts to send value on the underlying channel. Returns true +// if successful or false if the channel was full. +func (c *Channel) SendOrFail(value any) bool { + select { + case c.ch <- value: + return true + default: + return false + } +} + +// ReceiveOrFail returns the value on the underlying channel and true, or nil +// and false if the channel was empty. +func (c *Channel) ReceiveOrFail() (any, bool) { + select { + case got := <-c.ch: + return got, true + default: + return nil, false + } +} + +// Receive returns the value received on the underlying channel, or the error +// returned by ctx if it is closed or cancelled. +func (c *Channel) Receive(ctx context.Context) (any, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case got := <-c.ch: + return got, nil + } +} + +// Replace clears the value on the underlying channel, and sends the new value. +// +// It's expected to be used with a size-1 channel, to only keep the most +// up-to-date item. This method is inherently racy when invoked concurrently +// from multiple goroutines. +func (c *Channel) Replace(value any) { + for { + select { + case c.ch <- value: + return + case <-c.ch: + } + } +} + +// NewChannel returns a new Channel. +func NewChannel() *Channel { + return NewChannelWithSize(DefaultChanBufferSize) +} + +// NewChannelWithSize returns a new Channel with a buffer of bufSize. +func NewChannelWithSize(bufSize int) *Channel { + return &Channel{ch: make(chan any, bufSize)} +} diff --git a/grpc/internal/testutils/parse_url.go b/grpc/internal/testutils/parse_url.go new file mode 100644 index 00000000000..ff276e4d0c3 --- /dev/null +++ b/grpc/internal/testutils/parse_url.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package testutils + +import ( + "fmt" + "net/url" +) + +// MustParseURL attempts to parse the provided target using url.Parse() +// and panics if parsing fails. +func MustParseURL(target string) *url.URL { + u, err := url.Parse(target) + if err != nil { + panic(fmt.Sprintf("Error parsing target(%s): %v", target, err)) + } + return u +} diff --git a/grpc/noncebalancer/noncebalancer.go b/grpc/noncebalancer/noncebalancer.go new file mode 100644 index 00000000000..4867e400dd5 --- /dev/null +++ b/grpc/noncebalancer/noncebalancer.go @@ -0,0 +1,124 @@ +package noncebalancer + +import ( + "errors" + "sync" + + "github.com/letsencrypt/boulder/nonce" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // Name is the name used to register the nonce balancer with the gRPC + // runtime. + Name = "nonce" + + // SRVResolverScheme is the scheme used to invoke an instance of the SRV + // resolver which will use the noncebalancer to pick backends. It would be + // ideal to export this from the SRV resolver package but that package is + // internal. + SRVResolverScheme = "nonce-srv" +) + +// ErrNoBackendsMatchPrefix indicates that no backends were found which match +// the nonce prefix provided in the RPC context. This can happen when the +// provided nonce is stale, valid but the backend has since been removed from +// the balancer, or valid but the backend has not yet been added to the +// balancer. +// +// In any case, when the WFE receives this error it will return a badNonce error +// to the ACME client. Note that the WFE uses exact pointer comparison to +// detect that the status it receives is this exact status object, so don't +// wrap this with fmt.Errorf when returning it. +var ErrNoBackendsMatchPrefix = status.New(codes.Unavailable, "no backends match the nonce prefix") +var errMissingPrefixCtxKey = errors.New("nonce.PrefixCtxKey value required in RPC context") +var errMissingHMACKeyCtxKey = errors.New("nonce.HMACKeyCtxKey value required in RPC context") +var errInvalidPrefixCtxKeyType = errors.New("nonce.PrefixCtxKey value in RPC context must be a string") +var errInvalidHMACKeyCtxKeyType = errors.New("nonce.HMACKeyCtxKey value in RPC context must be a byte slice") + +// pickerBuilder implements the base.PickerBuilder interface. It's used to +// create new Picker instances. It should only be used by nonce-service clients. +type pickerBuilder struct{} + +// Build implements the base.PickerBuilder interface. It is called by the gRPC +// runtime when the balancer is first initialized and when the set of backend +// (SubConn) addresses changes. +func (b *pickerBuilder) Build(buildInfo base.PickerBuildInfo) balancer.Picker { + if len(buildInfo.ReadySCs) == 0 { + // The Picker must be rebuilt if there are no backends available. + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + return &picker{ + backends: buildInfo.ReadySCs, + } +} + +// picker implements the balancer.Picker interface. It picks a backend (SubConn) +// based on the nonce prefix contained in each request's Context. +type picker struct { + backends map[balancer.SubConn]base.SubConnInfo + prefixToBackend map[string]balancer.SubConn + prefixToBackendOnce sync.Once +} + +// Pick implements the balancer.Picker interface. It is called by the gRPC +// runtime for each RPC message. It is responsible for picking a backend +// (SubConn) based on the context of each RPC message. +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + if len(p.backends) == 0 { + // This should never happen, the Picker should only be built when there + // are backends available. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + + // Get the HMAC key from the RPC context. + hmacKeyVal := info.Ctx.Value(nonce.HMACKeyCtxKey{}) + if hmacKeyVal == nil { + // This should never happen. + return balancer.PickResult{}, errMissingHMACKeyCtxKey + } + hmacKey, ok := hmacKeyVal.([]byte) + if !ok { + // This should never happen. + return balancer.PickResult{}, errInvalidHMACKeyCtxKeyType + } + + p.prefixToBackendOnce.Do(func() { + // First call to Pick with a new Picker. + prefixToBackend := make(map[string]balancer.SubConn) + for sc, scInfo := range p.backends { + scPrefix := nonce.DerivePrefix(scInfo.Address.Addr, hmacKey) + prefixToBackend[scPrefix] = sc + } + p.prefixToBackend = prefixToBackend + }) + + // Get the destination prefix from the RPC context. + destPrefixVal := info.Ctx.Value(nonce.PrefixCtxKey{}) + if destPrefixVal == nil { + // This should never happen. + return balancer.PickResult{}, errMissingPrefixCtxKey + } + destPrefix, ok := destPrefixVal.(string) + if !ok { + // This should never happen. + return balancer.PickResult{}, errInvalidPrefixCtxKeyType + } + + sc, ok := p.prefixToBackend[destPrefix] + if !ok { + // No backend SubConn was found for the destination prefix. + return balancer.PickResult{}, ErrNoBackendsMatchPrefix.Err() + } + return balancer.PickResult{SubConn: sc}, nil +} + +func init() { + balancer.Register( + base.NewBalancerBuilder(Name, &pickerBuilder{}, base.Config{}), + ) +} diff --git a/grpc/noncebalancer/noncebalancer_test.go b/grpc/noncebalancer/noncebalancer_test.go new file mode 100644 index 00000000000..1cade2f52c5 --- /dev/null +++ b/grpc/noncebalancer/noncebalancer_test.go @@ -0,0 +1,126 @@ +package noncebalancer + +import ( + "context" + "testing" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/resolver" + + "github.com/letsencrypt/boulder/nonce" + "github.com/letsencrypt/boulder/test" +) + +func TestPickerPicksCorrectBackend(t *testing.T) { + _, p, subConns := setupTest(false) + prefix := nonce.DerivePrefix(subConns[0].addrs[0].Addr, []byte("Kala namak")) + + testCtx := context.WithValue(context.Background(), nonce.PrefixCtxKey{}, "HNmOnt8w") + testCtx = context.WithValue(testCtx, nonce.HMACKeyCtxKey{}, []byte(prefix)) + info := balancer.PickInfo{Ctx: testCtx} + + gotPick, err := p.Pick(info) + test.AssertNotError(t, err, "Pick failed") + test.AssertDeepEquals(t, subConns[0], gotPick.SubConn) +} + +func TestPickerMissingPrefixInCtx(t *testing.T) { + _, p, subConns := setupTest(false) + prefix := nonce.DerivePrefix(subConns[0].addrs[0].Addr, []byte("Kala namak")) + + testCtx := context.WithValue(context.Background(), nonce.HMACKeyCtxKey{}, []byte(prefix)) + info := balancer.PickInfo{Ctx: testCtx} + + gotPick, err := p.Pick(info) + test.AssertErrorIs(t, err, errMissingPrefixCtxKey) + test.AssertNil(t, gotPick.SubConn, "subConn should be nil") +} + +func TestPickerInvalidPrefixInCtx(t *testing.T) { + _, p, _ := setupTest(false) + + testCtx := context.WithValue(context.Background(), nonce.PrefixCtxKey{}, 9) + testCtx = context.WithValue(testCtx, nonce.HMACKeyCtxKey{}, []byte("foobar")) + info := balancer.PickInfo{Ctx: testCtx} + + gotPick, err := p.Pick(info) + test.AssertErrorIs(t, err, errInvalidPrefixCtxKeyType) + test.AssertNil(t, gotPick.SubConn, "subConn should be nil") +} + +func TestPickerMissingHMACKeyInCtx(t *testing.T) { + _, p, _ := setupTest(false) + + testCtx := context.WithValue(context.Background(), nonce.PrefixCtxKey{}, "HNmOnt8w") + info := balancer.PickInfo{Ctx: testCtx} + + gotPick, err := p.Pick(info) + test.AssertErrorIs(t, err, errMissingHMACKeyCtxKey) + test.AssertNil(t, gotPick.SubConn, "subConn should be nil") +} + +func TestPickerInvalidHMACKeyInCtx(t *testing.T) { + _, p, _ := setupTest(false) + + testCtx := context.WithValue(context.Background(), nonce.PrefixCtxKey{}, "HNmOnt8w") + testCtx = context.WithValue(testCtx, nonce.HMACKeyCtxKey{}, 9) + info := balancer.PickInfo{Ctx: testCtx} + + gotPick, err := p.Pick(info) + test.AssertErrorIs(t, err, errInvalidHMACKeyCtxKeyType) + test.AssertNil(t, gotPick.SubConn, "subConn should be nil") +} + +func TestPickerNoMatchingSubConnAvailable(t *testing.T) { + _, p, subConns := setupTest(false) + prefix := nonce.DerivePrefix(subConns[0].addrs[0].Addr, []byte("Kala namak")) + + testCtx := context.WithValue(context.Background(), nonce.PrefixCtxKey{}, "rUsTrUin") + testCtx = context.WithValue(testCtx, nonce.HMACKeyCtxKey{}, []byte(prefix)) + info := balancer.PickInfo{Ctx: testCtx} + + gotPick, err := p.Pick(info) + test.AssertErrorIs(t, err, ErrNoBackendsMatchPrefix.Err()) + test.AssertNil(t, gotPick.SubConn, "subConn should be nil") +} + +func TestPickerNoSubConnsAvailable(t *testing.T) { + b, p, _ := setupTest(true) + b.Build(base.PickerBuildInfo{}) + info := balancer.PickInfo{Ctx: context.Background()} + + gotPick, err := p.Pick(info) + test.AssertErrorIs(t, err, balancer.ErrNoSubConnAvailable) + test.AssertNil(t, gotPick.SubConn, "subConn should be nil") +} + +func setupTest(noSubConns bool) (*pickerBuilder, balancer.Picker, []*subConn) { + var subConns []*subConn + bi := base.PickerBuildInfo{ + ReadySCs: make(map[balancer.SubConn]base.SubConnInfo), + } + + sc := &subConn{} + addr := resolver.Address{Addr: "10.77.77.77:8080"} + sc.UpdateAddresses([]resolver.Address{addr}) + + if !noSubConns { + bi.ReadySCs[sc] = base.SubConnInfo{Address: addr} + subConns = append(subConns, sc) + } + + b := &pickerBuilder{} + p := b.Build(bi) + return b, p, subConns +} + +// subConn is a test mock which implements the balancer.SubConn interface. +type subConn struct { + balancer.SubConn + addrs []resolver.Address +} + +func (s *subConn) UpdateAddresses(addrs []resolver.Address) { + s.addrs = addrs +} diff --git a/grpc/pb-marshalling.go b/grpc/pb-marshalling.go index f97ae53b917..5bfb699d584 100644 --- a/grpc/pb-marshalling.go +++ b/grpc/pb-marshalling.go @@ -6,36 +6,38 @@ package grpc import ( - "net" + "fmt" + "net/netip" "time" + "github.com/go-jose/go-jose/v4" "google.golang.org/grpc/codes" - "gopkg.in/square/go-jose.v2" + "google.golang.org/protobuf/types/known/timestamppb" "github.com/letsencrypt/boulder/core" corepb "github.com/letsencrypt/boulder/core/proto" "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/probs" - "github.com/letsencrypt/boulder/revocation" sapb "github.com/letsencrypt/boulder/sa/proto" vapb "github.com/letsencrypt/boulder/va/proto" ) var ErrMissingParameters = CodedError(codes.FailedPrecondition, "required RPC parameter was missing") +var ErrInvalidParameters = CodedError(codes.InvalidArgument, "RPC parameter was invalid") // This file defines functions to translate between the protobuf types and the // code types. -func ProblemDetailsToPB(prob *probs.ProblemDetails) (*corepb.ProblemDetails, error) { +func ProblemDetailsToPB(prob *probs.ProblemDetails) *corepb.ProblemDetails { if prob == nil { // nil problemDetails is valid - return nil, nil + return nil } return &corepb.ProblemDetails{ ProblemType: string(prob.Type), Detail: prob.Detail, - HttpStatus: int32(prob.HTTPStatus), - }, nil + HttpStatus: int32(prob.HTTPStatus), //nolint: gosec // HTTP status codes are guaranteed to be small, no risk of overflow. + } } func PBToProblemDetails(in *corepb.ProblemDetails) (*probs.ProblemDetails, error) { @@ -57,26 +59,28 @@ func PBToProblemDetails(in *corepb.ProblemDetails) (*probs.ProblemDetails, error } func ChallengeToPB(challenge core.Challenge) (*corepb.Challenge, error) { - prob, err := ProblemDetailsToPB(challenge.Error) - if err != nil { - return nil, err - } + prob := ProblemDetailsToPB(challenge.Error) recordAry := make([]*corepb.ValidationRecord, len(challenge.ValidationRecord)) for i, v := range challenge.ValidationRecord { + var err error recordAry[i], err = ValidationRecordToPB(v) if err != nil { return nil, err } } - var validated int64 + + var validated *timestamppb.Timestamp if challenge.Validated != nil { - validated = challenge.Validated.UTC().UnixNano() + validated = timestamppb.New(challenge.Validated.UTC()) + if !validated.IsValid() { + return nil, fmt.Errorf("error creating *timestamppb.Timestamp for *corepb.Challenge object") + } } + return &corepb.Challenge{ Type: string(challenge.Type), Status: string(challenge.Status), Token: challenge.Token, - KeyAuthorization: challenge.ProvidedKeyAuthorization, Error: prob, Validationrecords: recordAry, Validated: validated, @@ -105,8 +109,8 @@ func PBToChallenge(in *corepb.Challenge) (challenge core.Challenge, err error) { return core.Challenge{}, err } var validated *time.Time - if in.Validated != 0 { - val := time.Unix(0, in.Validated).UTC() + if !core.IsAnyNilOrZero(in.Validated) { + val := in.Validated.AsTime() validated = &val } ch := core.Challenge{ @@ -117,9 +121,6 @@ func PBToChallenge(in *corepb.Challenge) (challenge core.Challenge, err error) { ValidationRecord: recordAry, Validated: validated, } - if in.KeyAuthorization != "" { - ch.ProvidedKeyAuthorization = in.KeyAuthorization - } return ch, nil } @@ -128,10 +129,10 @@ func ValidationRecordToPB(record core.ValidationRecord) (*corepb.ValidationRecor addrsTried := make([][]byte, len(record.AddressesTried)) var err error for i, v := range record.AddressesResolved { - addrs[i] = []byte(v) + addrs[i] = v.AsSlice() } for i, v := range record.AddressesTried { - addrsTried[i] = []byte(v) + addrsTried[i] = v.AsSlice() } addrUsed, err := record.AddressUsed.MarshalText() if err != nil { @@ -144,6 +145,7 @@ func ValidationRecordToPB(record core.ValidationRecord) (*corepb.ValidationRecor AddressUsed: addrUsed, Url: record.URL, AddressesTried: addrsTried, + ResolverAddrs: record.ResolverAddrs, }, nil } @@ -151,15 +153,23 @@ func PBToValidationRecord(in *corepb.ValidationRecord) (record core.ValidationRe if in == nil { return core.ValidationRecord{}, ErrMissingParameters } - addrs := make([]net.IP, len(in.AddressesResolved)) + addrs := make([]netip.Addr, len(in.AddressesResolved)) for i, v := range in.AddressesResolved { - addrs[i] = net.IP(v) + netIP, ok := netip.AddrFromSlice(v) + if !ok { + return core.ValidationRecord{}, ErrInvalidParameters + } + addrs[i] = netIP } - addrsTried := make([]net.IP, len(in.AddressesTried)) + addrsTried := make([]netip.Addr, len(in.AddressesTried)) for i, v := range in.AddressesTried { - addrsTried[i] = net.IP(v) + netIP, ok := netip.AddrFromSlice(v) + if !ok { + return core.ValidationRecord{}, ErrInvalidParameters + } + addrsTried[i] = netIP } - var addrUsed net.IP + var addrUsed netip.Addr err = addrUsed.UnmarshalText(in.AddressUsed) if err != nil { return @@ -171,10 +181,11 @@ func PBToValidationRecord(in *corepb.ValidationRecord) (record core.ValidationRe AddressUsed: addrUsed, URL: in.Url, AddressesTried: addrsTried, + ResolverAddrs: in.ResolverAddrs, }, nil } -func ValidationResultToPB(records []core.ValidationRecord, prob *probs.ProblemDetails) (*vapb.ValidationResult, error) { +func ValidationResultToPB(records []core.ValidationRecord, prob *probs.ProblemDetails, perspective, rir string) (*vapb.ValidationResult, error) { recordAry := make([]*corepb.ValidationRecord, len(records)) var err error for i, v := range records { @@ -183,13 +194,12 @@ func ValidationResultToPB(records []core.ValidationRecord, prob *probs.ProblemDe return nil, err } } - marshalledProbs, err := ProblemDetailsToPB(prob) - if err != nil { - return nil, err - } + marshalledProb := ProblemDetailsToPB(prob) return &vapb.ValidationResult{ - Records: recordAry, - Problems: marshalledProbs, + Records: recordAry, + Problem: marshalledProb, + Perspective: perspective, + Rir: rir, }, nil } @@ -205,43 +215,41 @@ func pbToValidationResult(in *vapb.ValidationResult) ([]core.ValidationRecord, * return nil, nil, err } } - prob, err := PBToProblemDetails(in.Problems) + prob, err := PBToProblemDetails(in.Problem) if err != nil { return nil, nil, err } return recordAry, prob, nil } +func CAAResultToPB(prob *probs.ProblemDetails, perspective, rir string) (*vapb.IsCAAValidResponse, error) { + marshalledProb := ProblemDetailsToPB(prob) + return &vapb.IsCAAValidResponse{ + Problem: marshalledProb, + Perspective: perspective, + Rir: rir, + }, nil +} + func RegistrationToPB(reg core.Registration) (*corepb.Registration, error) { keyBytes, err := reg.Key.MarshalJSON() if err != nil { return nil, err } - ipBytes, err := reg.InitialIP.MarshalText() - if err != nil { - return nil, err - } - var contacts []string - // Since the default value of corepb.Registration.Contact is a slice - // we need a indicator as to if the value is actually important on - // the other side (pb -> reg). - contactsPresent := reg.Contact != nil - if reg.Contact != nil { - contacts = *reg.Contact - } - var createdAt int64 + var createdAt *timestamppb.Timestamp if reg.CreatedAt != nil { - createdAt = reg.CreatedAt.UTC().UnixNano() + createdAt = timestamppb.New(reg.CreatedAt.UTC()) + if !createdAt.IsValid() { + return nil, fmt.Errorf("error creating *timestamppb.Timestamp for *corepb.Authorization object") + } } + return &corepb.Registration{ - Id: reg.ID, - Key: keyBytes, - Contact: contacts, - ContactsPresent: contactsPresent, - Agreement: reg.Agreement, - InitialIP: ipBytes, - CreatedAt: createdAt, - Status: string(reg.Status), + Id: reg.ID, + Key: keyBytes, + Agreement: reg.Agreement, + CreatedAt: createdAt, + Status: string(reg.Status), }, nil } @@ -251,36 +259,15 @@ func PbToRegistration(pb *corepb.Registration) (core.Registration, error) { if err != nil { return core.Registration{}, err } - var initialIP net.IP - err = initialIP.UnmarshalText(pb.InitialIP) - if err != nil { - return core.Registration{}, err - } var createdAt *time.Time - if pb.CreatedAt != 0 { - c := time.Unix(0, pb.CreatedAt).UTC() + if !core.IsAnyNilOrZero(pb.CreatedAt) { + c := pb.CreatedAt.AsTime() createdAt = &c } - var contacts *[]string - if pb.ContactsPresent { - if len(pb.Contact) != 0 { - contacts = &pb.Contact - } else { - // When gRPC creates an empty slice it is actually a nil slice. Since - // certain things boulder uses, like encoding/json, differentiate between - // these we need to de-nil these slices. Without this we are unable to - // properly do registration updates as contacts would always be removed - // as we use the difference between a nil and empty slice in ra.mergeUpdate. - empty := []string{} - contacts = &empty - } - } return core.Registration{ ID: pb.Id, Key: &key, - Contact: contacts, Agreement: pb.Agreement, - InitialIP: initialIP, CreatedAt: createdAt, Status: core.AcmeStatus(pb.Status), }, nil @@ -295,17 +282,22 @@ func AuthzToPB(authz core.Authorization) (*corepb.Authorization, error) { } challs[i] = pbChall } - var expires int64 + var expires *timestamppb.Timestamp if authz.Expires != nil { - expires = authz.Expires.UTC().UnixNano() + expires = timestamppb.New(authz.Expires.UTC()) + if !expires.IsValid() { + return nil, fmt.Errorf("error creating *timestamppb.Timestamp for *corepb.Authorization object") + } } + return &corepb.Authorization{ - Id: authz.ID, - Identifier: authz.Identifier.Value, - RegistrationID: authz.RegistrationID, - Status: string(authz.Status), - Expires: expires, - Challenges: challs, + Id: authz.ID, + Identifier: authz.Identifier.ToProto(), + RegistrationID: authz.RegistrationID, + Status: string(authz.Status), + Expires: expires, + Challenges: challs, + CertificateProfileName: authz.CertificateProfileName, }, nil } @@ -318,22 +310,28 @@ func PBToAuthz(pb *corepb.Authorization) (core.Authorization, error) { } challs[i] = chall } - expires := time.Unix(0, pb.Expires).UTC() + var expires *time.Time + if !core.IsAnyNilOrZero(pb.Expires) { + c := pb.Expires.AsTime() + expires = &c + } authz := core.Authorization{ - ID: pb.Id, - Identifier: identifier.ACMEIdentifier{Type: identifier.DNS, Value: pb.Identifier}, - RegistrationID: pb.RegistrationID, - Status: core.AcmeStatus(pb.Status), - Expires: &expires, - Challenges: challs, + ID: pb.Id, + Identifier: identifier.FromProto(pb.Identifier), + RegistrationID: pb.RegistrationID, + Status: core.AcmeStatus(pb.Status), + Expires: expires, + Challenges: challs, + CertificateProfileName: pb.CertificateProfileName, } return authz, nil } // orderValid checks that a corepb.Order is valid. In addition to the checks -// from `newOrderValid` it ensures the order ID and the Created field are not nil. +// from `newOrderValid` it ensures the order ID and the Created fields are not +// the zero value. func orderValid(order *corepb.Order) bool { - return order.Id != 0 && order.Created != 0 && newOrderValid(order) + return order.Id != 0 && order.Created != nil && newOrderValid(order) } // newOrderValid checks that a corepb.Order is valid. It allows for a nil @@ -344,71 +342,19 @@ func orderValid(order *corepb.Order) bool { // `order.CertificateSerial` to be nil such that it can be used in places where // the order has not been finalized yet. func newOrderValid(order *corepb.Order) bool { - return !(order.RegistrationID == 0 || order.Expires == 0 || len(order.Names) == 0) -} - -func CertToPB(cert core.Certificate) *corepb.Certificate { - return &corepb.Certificate{ - RegistrationID: cert.RegistrationID, - Serial: cert.Serial, - Digest: cert.Digest, - Der: cert.DER, - Issued: cert.Issued.UnixNano(), - Expires: cert.Expires.UnixNano(), - } -} - -func PBToCert(pb *corepb.Certificate) (core.Certificate, error) { - return core.Certificate{ - RegistrationID: pb.RegistrationID, - Serial: pb.Serial, - Digest: pb.Digest, - DER: pb.Der, - Issued: time.Unix(0, pb.Issued), - Expires: time.Unix(0, pb.Expires), - }, nil -} - -func CertStatusToPB(certStatus core.CertificateStatus) *corepb.CertificateStatus { - return &corepb.CertificateStatus{ - Serial: certStatus.Serial, - Status: string(certStatus.Status), - OcspLastUpdated: certStatus.OCSPLastUpdated.UnixNano(), - RevokedDate: certStatus.RevokedDate.UnixNano(), - RevokedReason: int64(certStatus.RevokedReason), - LastExpirationNagSent: certStatus.LastExpirationNagSent.UnixNano(), - OcspResponse: certStatus.OCSPResponse, - NotAfter: certStatus.NotAfter.UnixNano(), - IsExpired: certStatus.IsExpired, - IssuerID: certStatus.IssuerID, - } -} - -func PBToCertStatus(pb *corepb.CertificateStatus) (core.CertificateStatus, error) { - return core.CertificateStatus{ - Serial: pb.Serial, - Status: core.OCSPStatus(pb.Status), - OCSPLastUpdated: time.Unix(0, pb.OcspLastUpdated), - RevokedDate: time.Unix(0, pb.RevokedDate), - RevokedReason: revocation.Reason(pb.RevokedReason), - LastExpirationNagSent: time.Unix(0, pb.LastExpirationNagSent), - OCSPResponse: pb.OcspResponse, - NotAfter: time.Unix(0, pb.NotAfter), - IsExpired: pb.IsExpired, - IssuerID: pb.IssuerID, - }, nil + return !(order.RegistrationID == 0 || order.Expires == nil || len(order.Identifiers) == 0) } -// PBToAuthzMap converts a protobuf map of domains mapped to protobuf authorizations to a -// golang map[string]*core.Authorization. -func PBToAuthzMap(pb *sapb.Authorizations) (map[string]*core.Authorization, error) { - m := make(map[string]*core.Authorization, len(pb.Authz)) - for _, v := range pb.Authz { - authz, err := PBToAuthz(v.Authz) +// PBToAuthzMap converts a protobuf map of identifiers mapped to protobuf +// authorizations to a golang map[string]*core.Authorization. +func PBToAuthzMap(pb *sapb.Authorizations) (map[identifier.ACMEIdentifier]*core.Authorization, error) { + m := make(map[identifier.ACMEIdentifier]*core.Authorization, len(pb.Authzs)) + for _, v := range pb.Authzs { + authz, err := PBToAuthz(v) if err != nil { return nil, err } - m[v.Domain] = &authz + m[authz.Identifier] = &authz } return m, nil } diff --git a/grpc/pb-marshalling_test.go b/grpc/pb-marshalling_test.go index e6e1829103e..167afb7c9f6 100644 --- a/grpc/pb-marshalling_test.go +++ b/grpc/pb-marshalling_test.go @@ -2,11 +2,12 @@ package grpc import ( "encoding/json" - "net" + "net/netip" "testing" "time" - "gopkg.in/square/go-jose.v2" + "github.com/go-jose/go-jose/v4" + "google.golang.org/protobuf/types/known/timestamppb" "github.com/letsencrypt/boulder/core" corepb "github.com/letsencrypt/boulder/core/proto" @@ -18,13 +19,11 @@ import ( const JWK1JSON = `{"kty":"RSA","n":"vuc785P8lBj3fUxyZchF_uZw6WtbxcorqgTyq-qapF5lrO1U82Tp93rpXlmctj6fyFHBVVB5aXnUHJ7LZeVPod7Wnfl8p5OyhlHQHC8BnzdzCqCMKmWZNX5DtETDId0qzU7dPzh0LP0idt5buU7L9QNaabChw3nnaL47iu_1Di5Wp264p2TwACeedv2hfRDjDlJmaQXuS8Rtv9GnRWyC9JBu7XmGvGDziumnJH7Hyzh3VNu-kSPQD3vuAFgMZS6uUzOztCkT0fpOalZI6hqxtWLvXUMj-crXrn-Maavz8qRhpAyp5kcYk3jiHGgQIi7QSK2JIdRJ8APyX9HlmTN5AQ","e":"AQAB"}` func TestProblemDetails(t *testing.T) { - pb, err := ProblemDetailsToPB(nil) - test.AssertNotEquals(t, err, "problemDetailToPB failed") + pb := ProblemDetailsToPB(nil) test.Assert(t, pb == nil, "Returned corepb.ProblemDetails is not nil") prob := &probs.ProblemDetails{Type: probs.TLSProblem, Detail: "asd", HTTPStatus: 200} - pb, err = ProblemDetailsToPB(prob) - test.AssertNotError(t, err, "problemDetailToPB failed") + pb = ProblemDetailsToPB(prob) test.Assert(t, pb != nil, "return corepb.ProblemDetails is nill") test.AssertDeepEquals(t, pb.ProblemType, string(prob.Type)) test.AssertEquals(t, pb.Detail, prob.Detail) @@ -54,11 +53,10 @@ func TestChallenge(t *testing.T) { test.AssertNotError(t, err, "Failed to unmarshal test key") validated := time.Now().Round(0).UTC() chall := core.Challenge{ - Type: core.ChallengeTypeDNS01, - Status: core.StatusValid, - Token: "asd", - ProvidedKeyAuthorization: "keyauth", - Validated: &validated, + Type: core.ChallengeTypeDNS01, + Status: core.StatusValid, + Token: "asd", + Validated: &validated, } pb, err := ChallengeToPB(chall) @@ -69,15 +67,15 @@ func TestChallenge(t *testing.T) { test.AssertNotError(t, err, "PBToChallenge failed") test.AssertDeepEquals(t, recon, chall) - ip := net.ParseIP("1.1.1.1") + ip := netip.MustParseAddr("1.1.1.1") chall.ValidationRecord = []core.ValidationRecord{ { - Hostname: "host", + Hostname: "example.com", Port: "2020", - AddressesResolved: []net.IP{ip}, + AddressesResolved: []netip.Addr{ip}, AddressUsed: ip, - URL: "url", - AddressesTried: []net.IP{ip}, + URL: "https://example.com:2020", + AddressesTried: []netip.Addr{ip}, }, } chall.Error = &probs.ProblemDetails{Type: probs.TLSProblem, Detail: "asd", HTTPStatus: 200} @@ -95,17 +93,31 @@ func TestChallenge(t *testing.T) { _, err = PBToChallenge(&corepb.Challenge{}) test.AssertError(t, err, "PBToChallenge did not fail") test.AssertEquals(t, err, ErrMissingParameters) + + challNilValidation := core.Challenge{ + Type: core.ChallengeTypeDNS01, + Status: core.StatusValid, + Token: "asd", + Validated: nil, + } + pb, err = ChallengeToPB(challNilValidation) + test.AssertNotError(t, err, "ChallengeToPB failed") + test.Assert(t, pb != nil, "Returned corepb.Challenge is nil") + recon, err = PBToChallenge(pb) + test.AssertNotError(t, err, "PBToChallenge failed") + test.AssertDeepEquals(t, recon, challNilValidation) } func TestValidationRecord(t *testing.T) { - ip := net.ParseIP("1.1.1.1") + ip := netip.MustParseAddr("1.1.1.1") vr := core.ValidationRecord{ - Hostname: "host", - Port: "2020", - AddressesResolved: []net.IP{ip}, + Hostname: "exampleA.com", + Port: "80", + AddressesResolved: []netip.Addr{ip}, AddressUsed: ip, - URL: "url", - AddressesTried: []net.IP{ip}, + URL: "http://exampleA.com", + AddressesTried: []netip.Addr{ip}, + ResolverAddrs: []string{"resolver:5353"}, } pb, err := ValidationRecordToPB(vr) @@ -118,29 +130,33 @@ func TestValidationRecord(t *testing.T) { } func TestValidationResult(t *testing.T) { - ip := net.ParseIP("1.1.1.1") + ip := netip.MustParseAddr("1.1.1.1") vrA := core.ValidationRecord{ - Hostname: "hostA", - Port: "2020", - AddressesResolved: []net.IP{ip}, + Hostname: "exampleA.com", + Port: "443", + AddressesResolved: []netip.Addr{ip}, AddressUsed: ip, - URL: "urlA", - AddressesTried: []net.IP{ip}, + URL: "https://exampleA.com", + AddressesTried: []netip.Addr{ip}, + ResolverAddrs: []string{"resolver:5353"}, } vrB := core.ValidationRecord{ - Hostname: "hostB", - Port: "2020", - AddressesResolved: []net.IP{ip}, + Hostname: "exampleB.com", + Port: "443", + AddressesResolved: []netip.Addr{ip}, AddressUsed: ip, - URL: "urlB", - AddressesTried: []net.IP{ip}, + URL: "https://exampleB.com", + AddressesTried: []netip.Addr{ip}, + ResolverAddrs: []string{"resolver:5353"}, } result := []core.ValidationRecord{vrA, vrB} prob := &probs.ProblemDetails{Type: probs.TLSProblem, Detail: "asd", HTTPStatus: 200} - pb, err := ValidationResultToPB(result, prob) + pb, err := ValidationResultToPB(result, prob, "surreal", "ARIN") test.AssertNotError(t, err, "ValidationResultToPB failed") test.Assert(t, pb != nil, "Returned vapb.ValidationResult is nil") + test.AssertEquals(t, pb.Perspective, "surreal") + test.AssertEquals(t, pb.Rir, "ARIN") reconResult, reconProb, err := pbToValidationResult(pb) test.AssertNotError(t, err, "pbToValidationResult failed") @@ -149,7 +165,6 @@ func TestValidationResult(t *testing.T) { } func TestRegistration(t *testing.T) { - contacts := []string{"email"} var key jose.JSONWebKey err := json.Unmarshal([]byte(` { @@ -163,9 +178,7 @@ func TestRegistration(t *testing.T) { inReg := core.Registration{ ID: 1, Key: &key, - Contact: &contacts, Agreement: "yup", - InitialIP: net.ParseIP("1.1.1.1"), CreatedAt: &createdAt, Status: core.StatusValid, } @@ -175,72 +188,65 @@ func TestRegistration(t *testing.T) { test.AssertNotError(t, err, "PbToRegistration failed") test.AssertDeepEquals(t, inReg, outReg) - inReg.Contact = nil - pbReg, err = RegistrationToPB(inReg) - test.AssertNotError(t, err, "registrationToPB failed") - pbReg.Contact = []string{} - outReg, err = PbToRegistration(pbReg) - test.AssertNotError(t, err, "PbToRegistration failed") - test.AssertDeepEquals(t, inReg, outReg) - - var empty []string - inReg.Contact = &empty - pbReg, err = RegistrationToPB(inReg) + inRegNilCreatedAt := core.Registration{ + ID: 1, + Key: &key, + Agreement: "yup", + CreatedAt: nil, + Status: core.StatusValid, + } + pbReg, err = RegistrationToPB(inRegNilCreatedAt) test.AssertNotError(t, err, "registrationToPB failed") outReg, err = PbToRegistration(pbReg) test.AssertNotError(t, err, "PbToRegistration failed") - test.Assert(t, *outReg.Contact != nil, "Empty slice was converted to a nil slice") + test.AssertDeepEquals(t, inRegNilCreatedAt, outReg) } func TestAuthz(t *testing.T) { exp := time.Now().AddDate(0, 0, 1).UTC() - identifier := identifier.ACMEIdentifier{Type: identifier.DNS, Value: "example.com"} + ident := identifier.NewDNS("example.com") challA := core.Challenge{ - Type: core.ChallengeTypeDNS01, - Status: core.StatusPending, - Token: "asd", - ProvidedKeyAuthorization: "keyauth", + Type: core.ChallengeTypeDNS01, + Status: core.StatusPending, + Token: "asd", } challB := core.Challenge{ - Type: core.ChallengeTypeDNS01, - Status: core.StatusPending, - Token: "asd2", - ProvidedKeyAuthorization: "keyauth4", + Type: core.ChallengeTypeDNS01, + Status: core.StatusPending, + Token: "asd2", } inAuthz := core.Authorization{ ID: "1", - Identifier: identifier, + Identifier: ident, RegistrationID: 5, Status: core.StatusPending, Expires: &exp, Challenges: []core.Challenge{challA, challB}, } - pbAuthz, err := AuthzToPB(inAuthz) test.AssertNotError(t, err, "AuthzToPB failed") outAuthz, err := PBToAuthz(pbAuthz) - test.AssertNotError(t, err, "pbToAuthz failed") + test.AssertNotError(t, err, "PBToAuthz failed") test.AssertDeepEquals(t, inAuthz, outAuthz) -} -func TestCert(t *testing.T) { - now := time.Now().Round(0) - cert := core.Certificate{ - RegistrationID: 1, - Serial: "serial", - Digest: "digest", - DER: []byte{255}, - Issued: now, - Expires: now.Add(time.Hour), + inAuthzNilExpires := core.Authorization{ + ID: "1", + Identifier: ident, + RegistrationID: 5, + Status: core.StatusPending, + Expires: nil, + Challenges: []core.Challenge{challA, challB}, } - - certPB := CertToPB(cert) - outCert, _ := PBToCert(certPB) - - test.AssertDeepEquals(t, cert, outCert) + pbAuthz2, err := AuthzToPB(inAuthzNilExpires) + test.AssertNotError(t, err, "AuthzToPB failed") + outAuthz2, err := PBToAuthz(pbAuthz2) + test.AssertNotError(t, err, "PBToAuthz failed") + test.AssertDeepEquals(t, inAuthzNilExpires, outAuthz2) } func TestOrderValid(t *testing.T) { + created := time.Now() + expires := created.Add(1 * time.Hour) testCases := []struct { Name string Order *corepb.Order @@ -251,12 +257,12 @@ func TestOrderValid(t *testing.T) { Order: &corepb.Order{ Id: 1, RegistrationID: 1, - Expires: 1, + Expires: timestamppb.New(expires), CertificateSerial: "", V2Authorizations: []int64{}, - Names: []string{"example.com"}, + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, BeganProcessing: false, - Created: 1, + Created: timestamppb.New(created), }, ExpectedValid: true, }, @@ -265,11 +271,11 @@ func TestOrderValid(t *testing.T) { Order: &corepb.Order{ Id: 1, RegistrationID: 1, - Expires: 1, + Expires: timestamppb.New(expires), V2Authorizations: []int64{}, - Names: []string{"example.com"}, + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, BeganProcessing: false, - Created: 1, + Created: timestamppb.New(created), }, ExpectedValid: true, }, @@ -282,10 +288,10 @@ func TestOrderValid(t *testing.T) { Order: &corepb.Order{ Id: 0, RegistrationID: 1, - Expires: 1, + Expires: timestamppb.New(expires), CertificateSerial: "", V2Authorizations: []int64{}, - Names: []string{"example.com"}, + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, BeganProcessing: false, }, }, @@ -294,10 +300,10 @@ func TestOrderValid(t *testing.T) { Order: &corepb.Order{ Id: 1, RegistrationID: 0, - Expires: 1, + Expires: timestamppb.New(expires), CertificateSerial: "", V2Authorizations: []int64{}, - Names: []string{"example.com"}, + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, BeganProcessing: false, }, }, @@ -306,10 +312,10 @@ func TestOrderValid(t *testing.T) { Order: &corepb.Order{ Id: 1, RegistrationID: 1, - Expires: 0, + Expires: nil, CertificateSerial: "", V2Authorizations: []int64{}, - Names: []string{"example.com"}, + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, BeganProcessing: false, }, }, @@ -318,10 +324,10 @@ func TestOrderValid(t *testing.T) { Order: &corepb.Order{ Id: 1, RegistrationID: 1, - Expires: 1, + Expires: timestamppb.New(expires), CertificateSerial: "", V2Authorizations: []int64{}, - Names: []string{}, + Identifiers: []*corepb.Identifier{}, BeganProcessing: false, }, }, diff --git a/grpc/protogen.sh b/grpc/protogen.sh index 497ab3c19aa..8e5701d00ce 100755 --- a/grpc/protogen.sh +++ b/grpc/protogen.sh @@ -19,5 +19,6 @@ do # --go-grpc_out="${proto_dir}" does the same for _grpc.pb.go # --go_opt=paths=source_relative derives output filenames from input filenames # --go-grpc_opt=paths=source_relative does the same for _grpc.pb.go - protoc -I "${proto_dir}" -I "${root_dir}" --go_out="${proto_dir}" --go-grpc_out="${proto_dir}" --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative "${proto_file}" + # --go-grpc_opt=use_generic_streams=true causes protoc-gen-go-grpc to use generics for its stream objects, rather than generating a new impl for each one + protoc -I "${proto_dir}" -I "${root_dir}" --go_out="${proto_dir}" --go-grpc_out="${proto_dir}" --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative,use_generic_streams_experimental=true "${proto_file}" done diff --git a/grpc/server.go b/grpc/server.go index 470cafb7179..3c8b8b653e6 100644 --- a/grpc/server.go +++ b/grpc/server.go @@ -1,19 +1,29 @@ package grpc import ( + "context" "crypto/tls" "errors" + "fmt" "net" + "slices" + "strings" + "time" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/honeycombio/beeline-go/wrappers/hnygrpc" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus" "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/cmd" - bcreds "github.com/letsencrypt/boulder/grpc/creds" "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/filters" "google.golang.org/grpc" + "google.golang.org/grpc/health" + healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" + + "github.com/letsencrypt/boulder/cmd" + bcreds "github.com/letsencrypt/boulder/grpc/creds" + blog "github.com/letsencrypt/boulder/log" ) // CodedError is a alias required to appease go vet @@ -21,65 +31,298 @@ var CodedError = status.Errorf var errNilTLS = errors.New("boulder/grpc: received nil tls.Config") -// NewServer creates a gRPC server that uses the provided *tls.Config, and -// verifies that clients present a certificate that (a) is signed by one of -// the configured ClientCAs, and (b) contains at least one -// subjectAlternativeName matching the accepted list from GRPCServerConfig. -func NewServer(c *cmd.GRPCServerConfig, tlsConfig *tls.Config, metrics serverMetrics, clk clock.Clock, interceptors ...grpc.UnaryServerInterceptor) (*grpc.Server, net.Listener, error) { +// checker is an interface for checking the health of a grpc service +// implementation. +type checker interface { + // Health returns nil if the service is healthy, or an error if it is not. + // If the passed context is canceled, it should return immediately with an + // error. + Health(context.Context) error +} + +// service represents a single gRPC service that can be registered with a gRPC +// server. +type service struct { + desc *grpc.ServiceDesc + impl any +} + +// serverBuilder implements a builder pattern for constructing new gRPC servers +// and registering gRPC services on those servers. +type serverBuilder struct { + cfg *cmd.GRPCServerConfig + services map[string]service + healthSrv *health.Server + checkInterval time.Duration + logger blog.Logger + err error +} + +// NewServer returns an object which can be used to build gRPC servers. It takes +// the server's configuration to perform initialization and a logger for deep +// health checks. +func NewServer(c *cmd.GRPCServerConfig, logger blog.Logger) *serverBuilder { + return &serverBuilder{cfg: c, services: make(map[string]service), logger: logger} +} + +// WithCheckInterval sets the interval at which the server will check the health +// of its registered services. If this is not called, a default interval of 5 +// seconds will be used. +func (sb *serverBuilder) WithCheckInterval(i time.Duration) *serverBuilder { + sb.checkInterval = i + return sb +} + +// Add registers a new service (consisting of its description and its +// implementation) to the set of services which will be exposed by this server. +// It returns the modified-in-place serverBuilder so that calls can be chained. +// If there is an error adding this service, it will be exposed when .Build() is +// called. +func (sb *serverBuilder) Add(desc *grpc.ServiceDesc, impl any) *serverBuilder { + if _, found := sb.services[desc.ServiceName]; found { + // We've already registered a service with this same name, error out. + sb.err = fmt.Errorf("attempted double-registration of gRPC service %q", desc.ServiceName) + return sb + } + sb.services[desc.ServiceName] = service{desc: desc, impl: impl} + return sb +} + +// Build creates a gRPC server that uses the provided *tls.Config and exposes +// all of the services added to the builder. It also exposes a health check +// service. It returns one functions, start(), which should be used to start +// the server. It spawns a goroutine which will listen for OS signals and +// gracefully stop the server if one is caught, causing the start() function to +// exit. +func (sb *serverBuilder) Build(tlsConfig *tls.Config, statsRegistry prometheus.Registerer, clk clock.Clock) (func() error, error) { + // Register the health service with the server. + sb.healthSrv = health.NewServer() + sb.Add(&healthpb.Health_ServiceDesc, sb.healthSrv) + + // Check to see if any of the calls to .Add() resulted in an error. + if sb.err != nil { + return nil, sb.err + } + + // Ensure that every configured service also got added. + var registeredServices []string + for r := range sb.services { + registeredServices = append(registeredServices, r) + } + for serviceName := range sb.cfg.Services { + _, ok := sb.services[serviceName] + if !ok { + return nil, fmt.Errorf("gRPC service %q in config does not match any service: %s", serviceName, strings.Join(registeredServices, ", ")) + } + } + if tlsConfig == nil { - return nil, nil, errNilTLS + return nil, errNilTLS } + + // Collect all names which should be allowed to connect to the server at all. + // This is the names which are allowlisted at the server level, plus the union + // of all names which are allowlisted for any individual service. acceptedSANs := make(map[string]struct{}) - for _, name := range c.ClientNames { - acceptedSANs[name] = struct{}{} + var acceptedSANsSlice []string + for _, service := range sb.cfg.Services { + for _, name := range service.ClientNames { + acceptedSANs[name] = struct{}{} + if !slices.Contains(acceptedSANsSlice, name) { + acceptedSANsSlice = append(acceptedSANsSlice, name) + } + } } + // Ensure that the health service has the same ClientNames as the other + // services, so that health checks can be performed by clients which are + // allowed to connect to the server. + sb.cfg.Services[healthpb.Health_ServiceDesc.ServiceName].ClientNames = acceptedSANsSlice + creds, err := bcreds.NewServerCredentials(tlsConfig, acceptedSANs) if err != nil { - return nil, nil, err + return nil, err } - l, err := net.Listen("tcp", c.Address) + // Set up all of our interceptors which handle metrics, traces, error + // propagation, and more. + metrics, err := newServerMetrics(statsRegistry) if err != nil { - return nil, nil, err + return nil, err + } + + var ai serverInterceptor + if len(sb.cfg.Services) > 0 { + ai = newServiceAuthChecker(sb.cfg) + } else { + ai = &noopServerInterceptor{} + } + + mi := newServerMetadataInterceptor(metrics, clk) + + unaryInterceptors := []grpc.UnaryServerInterceptor{ + mi.metrics.grpcMetrics.UnaryServerInterceptor(), + ai.Unary, + mi.Unary, } - si := newServerInterceptor(metrics, clk) - allInterceptors := []grpc.UnaryServerInterceptor{ - si.intercept, - si.metrics.grpcMetrics.UnaryServerInterceptor(), - hnygrpc.UnaryServerInterceptor(), + streamInterceptors := []grpc.StreamServerInterceptor{ + mi.metrics.grpcMetrics.StreamServerInterceptor(), + ai.Stream, + mi.Stream, } - allInterceptors = append(allInterceptors, interceptors...) + options := []grpc.ServerOption{ grpc.Creds(creds), - grpc.ChainUnaryInterceptor(allInterceptors...), + grpc.ChainUnaryInterceptor(unaryInterceptors...), + grpc.ChainStreamInterceptor(streamInterceptors...), + grpc.StatsHandler(otelgrpc.NewServerHandler(otelgrpc.WithFilter(filters.Not(filters.HealthCheck())))), } - if c.MaxConnectionAge.Duration > 0 { + if sb.cfg.MaxConnectionAge.Duration > 0 { options = append(options, grpc.KeepaliveParams(keepalive.ServerParameters{ - MaxConnectionAge: c.MaxConnectionAge.Duration, + MaxConnectionAge: sb.cfg.MaxConnectionAge.Duration, })) } - return grpc.NewServer(options...), l, nil + + // Create the server itself and register all of our services on it. + server := grpc.NewServer(options...) + for _, service := range sb.services { + server.RegisterService(service.desc, service.impl) + } + + if sb.cfg.Address == "" { + return nil, errors.New("GRPC listen address not configured") + } + sb.logger.Infof("grpc listening on %s", sb.cfg.Address) + + // Finally return the functions which will start and stop the server. + listener, err := net.Listen("tcp", sb.cfg.Address) + if err != nil { + return nil, err + } + + start := func() error { + return server.Serve(listener) + } + + // Initialize long-running health checks of all services which implement the + // checker interface. + if sb.checkInterval <= 0 { + sb.checkInterval = 5 * time.Second + } + healthCtx, stopHealthChecks := context.WithCancel(context.Background()) + for _, s := range sb.services { + check, ok := s.impl.(checker) + if !ok { + continue + } + sb.initLongRunningCheck(healthCtx, s.desc.ServiceName, check.Health) + } + + // Start a goroutine which listens for a termination signal, and then + // gracefully stops the gRPC server. This in turn causes the start() function + // to exit, allowing its caller (generally a main() function) to exit. + go cmd.CatchSignals(func() { + stopHealthChecks() + sb.healthSrv.Shutdown() + server.GracefulStop() + }) + + return start, nil +} + +// initLongRunningCheck initializes a goroutine which will periodically check +// the health of the provided service and update the health server accordingly. +// +// TODO(#8255): Remove the service parameter and instead rely on transitioning +// the overall health of the server (e.g. "") instead of individual services. +func (sb *serverBuilder) initLongRunningCheck(shutdownCtx context.Context, service string, checkImpl func(context.Context) error) { + // Set the initial health status for the service. + sb.healthSrv.SetServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING) + sb.healthSrv.SetServingStatus(service, healthpb.HealthCheckResponse_NOT_SERVING) + + // check is a helper function that checks the health of the service and, if + // necessary, updates its status in the health server. + checkAndMaybeUpdate := func(checkCtx context.Context, last healthpb.HealthCheckResponse_ServingStatus) healthpb.HealthCheckResponse_ServingStatus { + // Make a context with a timeout at 90% of the interval. + checkImplCtx, cancel := context.WithTimeout(checkCtx, sb.checkInterval*9/10) + defer cancel() + + var next healthpb.HealthCheckResponse_ServingStatus + err := checkImpl(checkImplCtx) + if err != nil { + sb.logger.Infof("health check of gRPC service %q failed: %s", service, err) + next = healthpb.HealthCheckResponse_NOT_SERVING + } else { + next = healthpb.HealthCheckResponse_SERVING + } + + if last == next { + // No change in health status. + return next + } + + if next != healthpb.HealthCheckResponse_SERVING { + sb.logger.Warningf("transitioning overall health from %q to %q, due to: %s", last, next, err) + sb.logger.Warningf("transitioning health of %q from %q to %q, due to: %s", service, last, next, err) + } else { + sb.logger.Infof("transitioning overall health from %q to %q", last, next) + sb.logger.Infof("transitioning health of %q from %q to %q", service, last, next) + } + sb.healthSrv.SetServingStatus("", next) + sb.healthSrv.SetServingStatus(service, next) + return next + } + + go func() { + ticker := time.NewTicker(sb.checkInterval) + defer ticker.Stop() + + // Assume the service is not healthy to start. + last := healthpb.HealthCheckResponse_NOT_SERVING + + // Check immediately, and then at the specified interval. + last = checkAndMaybeUpdate(shutdownCtx, last) + for { + select { + case <-shutdownCtx.Done(): + // The server is shutting down. + return + case <-ticker.C: + last = checkAndMaybeUpdate(shutdownCtx, last) + } + } + }() } // serverMetrics is a struct type used to return a few registered metrics from -// `NewServerMetrics` +// `newServerMetrics` type serverMetrics struct { grpcMetrics *grpc_prometheus.ServerMetrics rpcLag prometheus.Histogram } -// NewServerMetrics registers metrics with a registry. It must be called a -// maximum of once per registry, or there will be conflicting names. -// It constructs and registers a *grpc_prometheus.ServerMetrics with timing -// histogram enabled as well as a prometheus Histogram for RPC latency. -func NewServerMetrics(stats registry) serverMetrics { +// newServerMetrics registers metrics with a registry. It constructs and +// registers a *grpc_prometheus.ServerMetrics with timing histogram enabled as +// well as a prometheus Histogram for RPC latency. If called more than once on a +// single registry, it will gracefully avoid registering duplicate metrics. +func newServerMetrics(stats prometheus.Registerer) (serverMetrics, error) { // Create the grpc prometheus server metrics instance and register it - grpcMetrics := grpc_prometheus.NewServerMetrics() - grpcMetrics.EnableHandlingTimeHistogram() - stats.MustRegister(grpcMetrics) + grpcMetrics := grpc_prometheus.NewServerMetrics( + grpc_prometheus.WithServerHandlingTimeHistogram( + grpc_prometheus.WithHistogramBuckets([]float64{.01, .025, .05, .1, .5, 1, 2.5, 5, 10, 45, 90}), + ), + ) + err := stats.Register(grpcMetrics) + if err != nil { + are := prometheus.AlreadyRegisteredError{} + if errors.As(err, &are) { + grpcMetrics = are.ExistingCollector.(*grpc_prometheus.ServerMetrics) + } else { + return serverMetrics{}, err + } + } // rpcLag is a prometheus histogram tracking the difference between the time // the client sent an RPC and the time the server received it. Create and @@ -89,10 +332,18 @@ func NewServerMetrics(stats registry) serverMetrics { Name: "grpc_lag", Help: "Delta between client RPC send time and server RPC receipt time", }) - stats.MustRegister(rpcLag) + err = stats.Register(rpcLag) + if err != nil { + are := prometheus.AlreadyRegisteredError{} + if errors.As(err, &are) { + rpcLag = are.ExistingCollector.(prometheus.Histogram) + } else { + return serverMetrics{}, err + } + } return serverMetrics{ grpcMetrics: grpcMetrics, rpcLag: rpcLag, - } + }, nil } diff --git a/grpc/server_test.go b/grpc/server_test.go new file mode 100644 index 00000000000..16c2e86a4ec --- /dev/null +++ b/grpc/server_test.go @@ -0,0 +1,72 @@ +package grpc + +import ( + "context" + "errors" + "testing" + "time" + + blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/test" + "google.golang.org/grpc/health" +) + +func TestServerBuilderInitLongRunningCheck(t *testing.T) { + t.Parallel() + hs := health.NewServer() + mockLogger := blog.NewMock() + sb := &serverBuilder{ + healthSrv: hs, + logger: mockLogger, + checkInterval: time.Millisecond * 50, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + count := 0 + failEveryThirdCheck := func(context.Context) error { + count++ + if count%3 == 0 { + return errors.New("oops") + } + return nil + } + sb.initLongRunningCheck(ctx, "test", failEveryThirdCheck) + time.Sleep(time.Millisecond * 110) + cancel() + + // We expect the following transition timeline: + // - ~0ms 1st check passed, NOT_SERVING to SERVING + // - ~50ms 2nd check passed, [no transition] + // - ~100ms 3rd check failed, SERVING to NOT_SERVING + serving := mockLogger.GetAllMatching(".*\"NOT_SERVING\" to \"SERVING\"") + notServing := mockLogger.GetAllMatching((".*\"SERVING\" to \"NOT_SERVING\"")) + test.Assert(t, len(serving) == 2, "expected two serving log lines") + test.Assert(t, len(notServing) == 2, "expected two not serving log lines") + + mockLogger.Clear() + + ctx, cancel = context.WithCancel(context.Background()) + defer cancel() + + count = 0 + failEveryOtherCheck := func(context.Context) error { + count++ + if count%2 == 0 { + return errors.New("oops") + } + return nil + } + sb.initLongRunningCheck(ctx, "test", failEveryOtherCheck) + time.Sleep(time.Millisecond * 110) + cancel() + + // We expect the following transition timeline: + // - ~0ms 1st check passed, NOT_SERVING to SERVING + // - ~50ms 2nd check failed, SERVING to NOT_SERVING + // - ~100ms 3rd check passed, NOT_SERVING to SERVING + serving = mockLogger.GetAllMatching(".*\"NOT_SERVING\" to \"SERVING\"") + notServing = mockLogger.GetAllMatching((".*\"SERVING\" to \"NOT_SERVING\"")) + test.Assert(t, len(serving) == 4, "expected four serving log lines") + test.Assert(t, len(notServing) == 2, "expected two not serving log lines") +} diff --git a/grpc/skew.go b/grpc/skew.go new file mode 100644 index 00000000000..653a9ccef8d --- /dev/null +++ b/grpc/skew.go @@ -0,0 +1,13 @@ +//go:build !integration + +package grpc + +import "time" + +// tooSkewed returns true if the absolute value of the input duration is more +// than ten minutes. We break this out into a separate function so that it can +// be disabled in the integration tests, which make extensive use of fake +// clocks. +func tooSkewed(skew time.Duration) bool { + return skew > 10*time.Minute || skew < -10*time.Minute +} diff --git a/grpc/skew_integration.go b/grpc/skew_integration.go new file mode 100644 index 00000000000..5bb946be249 --- /dev/null +++ b/grpc/skew_integration.go @@ -0,0 +1,12 @@ +//go:build integration + +package grpc + +import "time" + +// tooSkewed always returns false, but is only built when the integration build +// flag is set. We use this to replace the real tooSkewed function in the +// integration tests, which make extensive use of fake clocks. +func tooSkewed(_ time.Duration) bool { + return false +} diff --git a/grpc/test_proto/interceptors_test.pb.go b/grpc/test_proto/interceptors_test.pb.go index b7ca50eddcc..eb2f680dded 100644 --- a/grpc/test_proto/interceptors_test.pb.go +++ b/grpc/test_proto/interceptors_test.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.6 +// protoc-gen-go v1.36.5 +// protoc v3.20.1 // source: interceptors_test.proto package test_proto @@ -9,8 +9,10 @@ package test_proto import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -21,20 +23,17 @@ const ( ) type Time struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` unknownFields protoimpl.UnknownFields - - Time int64 `protobuf:"varint,1,opt,name=time,proto3" json:"time,omitempty"` // In nanoseconds + sizeCache protoimpl.SizeCache } func (x *Time) Reset() { *x = Time{} - if protoimpl.UnsafeEnabled { - mi := &file_interceptors_test_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_interceptors_test_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Time) String() string { @@ -45,7 +44,7 @@ func (*Time) ProtoMessage() {} func (x *Time) ProtoReflect() protoreflect.Message { mi := &file_interceptors_test_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -60,52 +59,58 @@ func (*Time) Descriptor() ([]byte, []int) { return file_interceptors_test_proto_rawDescGZIP(), []int{0} } -func (x *Time) GetTime() int64 { +func (x *Time) GetDuration() *durationpb.Duration { if x != nil { - return x.Time + return x.Duration } - return 0 + return nil } var File_interceptors_test_proto protoreflect.FileDescriptor -var file_interceptors_test_proto_rawDesc = []byte{ +var file_interceptors_test_proto_rawDesc = string([]byte{ 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x63, 0x65, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x74, - 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x1a, 0x0a, 0x04, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x04, 0x74, 0x69, 0x6d, 0x65, 0x32, 0x22, 0x0a, 0x07, 0x43, 0x68, 0x69, 0x6c, 0x6c, 0x65, 0x72, - 0x12, 0x17, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, 0x6c, 0x12, 0x05, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x1a, 0x05, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x00, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, - 0x2f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} + 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x43, 0x0a, 0x04, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x32, 0x22, + 0x0a, 0x07, 0x43, 0x68, 0x69, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x05, 0x43, 0x68, 0x69, + 0x6c, 0x6c, 0x12, 0x05, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x05, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x22, 0x00, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, + 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) var ( file_interceptors_test_proto_rawDescOnce sync.Once - file_interceptors_test_proto_rawDescData = file_interceptors_test_proto_rawDesc + file_interceptors_test_proto_rawDescData []byte ) func file_interceptors_test_proto_rawDescGZIP() []byte { file_interceptors_test_proto_rawDescOnce.Do(func() { - file_interceptors_test_proto_rawDescData = protoimpl.X.CompressGZIP(file_interceptors_test_proto_rawDescData) + file_interceptors_test_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_interceptors_test_proto_rawDesc), len(file_interceptors_test_proto_rawDesc))) }) return file_interceptors_test_proto_rawDescData } var file_interceptors_test_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_interceptors_test_proto_goTypes = []interface{}{ - (*Time)(nil), // 0: Time +var file_interceptors_test_proto_goTypes = []any{ + (*Time)(nil), // 0: Time + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration } var file_interceptors_test_proto_depIdxs = []int32{ - 0, // 0: Chiller.Chill:input_type -> Time - 0, // 1: Chiller.Chill:output_type -> Time - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 1, // 0: Time.duration:type_name -> google.protobuf.Duration + 0, // 1: Chiller.Chill:input_type -> Time + 0, // 2: Chiller.Chill:output_type -> Time + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_interceptors_test_proto_init() } @@ -113,25 +118,11 @@ func file_interceptors_test_proto_init() { if File_interceptors_test_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_interceptors_test_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Time); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_interceptors_test_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_interceptors_test_proto_rawDesc), len(file_interceptors_test_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -142,7 +133,6 @@ func file_interceptors_test_proto_init() { MessageInfos: file_interceptors_test_proto_msgTypes, }.Build() File_interceptors_test_proto = out.File - file_interceptors_test_proto_rawDesc = nil file_interceptors_test_proto_goTypes = nil file_interceptors_test_proto_depIdxs = nil } diff --git a/grpc/test_proto/interceptors_test.proto b/grpc/test_proto/interceptors_test.proto index ef6f1e567db..f53468fd945 100644 --- a/grpc/test_proto/interceptors_test.proto +++ b/grpc/test_proto/interceptors_test.proto @@ -2,11 +2,15 @@ syntax = "proto3"; option go_package = "github.com/letsencrypt/boulder/grpc/test_proto"; +import "google/protobuf/duration.proto"; + service Chiller { // Sleep for the given amount of time, and return the amount of time slept. rpc Chill(Time) returns (Time) {} } message Time { - int64 time = 1; // In nanoseconds -} + // Next unused field number: 3 + reserved 1; // previously timeNS + google.protobuf.Duration duration = 2; + } diff --git a/grpc/test_proto/interceptors_test_grpc.pb.go b/grpc/test_proto/interceptors_test_grpc.pb.go index e0c931e8390..d44529e5a1d 100644 --- a/grpc/test_proto/interceptors_test_grpc.pb.go +++ b/grpc/test_proto/interceptors_test_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.20.1 +// source: interceptors_test.proto package test_proto @@ -11,8 +15,12 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Chiller_Chill_FullMethodName = "/Chiller/Chill" +) // ChillerClient is the client API for Chiller service. // @@ -31,8 +39,9 @@ func NewChillerClient(cc grpc.ClientConnInterface) ChillerClient { } func (c *chillerClient) Chill(ctx context.Context, in *Time, opts ...grpc.CallOption) (*Time, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Time) - err := c.cc.Invoke(ctx, "/Chiller/Chill", in, out, opts...) + err := c.cc.Invoke(ctx, Chiller_Chill_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -41,21 +50,25 @@ func (c *chillerClient) Chill(ctx context.Context, in *Time, opts ...grpc.CallOp // ChillerServer is the server API for Chiller service. // All implementations must embed UnimplementedChillerServer -// for forward compatibility +// for forward compatibility. type ChillerServer interface { // Sleep for the given amount of time, and return the amount of time slept. Chill(context.Context, *Time) (*Time, error) mustEmbedUnimplementedChillerServer() } -// UnimplementedChillerServer must be embedded to have forward compatible implementations. -type UnimplementedChillerServer struct { -} +// UnimplementedChillerServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedChillerServer struct{} func (UnimplementedChillerServer) Chill(context.Context, *Time) (*Time, error) { return nil, status.Errorf(codes.Unimplemented, "method Chill not implemented") } func (UnimplementedChillerServer) mustEmbedUnimplementedChillerServer() {} +func (UnimplementedChillerServer) testEmbeddedByValue() {} // UnsafeChillerServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ChillerServer will @@ -65,6 +78,13 @@ type UnsafeChillerServer interface { } func RegisterChillerServer(s grpc.ServiceRegistrar, srv ChillerServer) { + // If the following call pancis, it indicates UnimplementedChillerServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Chiller_ServiceDesc, srv) } @@ -78,7 +98,7 @@ func _Chiller_Chill_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/Chiller/Chill", + FullMethod: Chiller_Chill_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChillerServer).Chill(ctx, req.(*Time)) diff --git a/iana/data/iana-ipv4-special-registry-1.csv b/iana/data/iana-ipv4-special-registry-1.csv new file mode 100644 index 00000000000..99458ca36cb --- /dev/null +++ b/iana/data/iana-ipv4-special-registry-1.csv @@ -0,0 +1,27 @@ +Address Block,Name,RFC,Allocation Date,Termination Date,Source,Destination,Forwardable,Globally Reachable,Reserved-by-Protocol +0.0.0.0/8,"""This network""","[RFC791], Section 3.2",1981-09,N/A,True,False,False,False,True +0.0.0.0/32,"""This host on this network""","[RFC1122], Section 3.2.1.3",1981-09,N/A,True,False,False,False,True +10.0.0.0/8,Private-Use,[RFC1918],1996-02,N/A,True,True,True,False,False +100.64.0.0/10,Shared Address Space,[RFC6598],2012-04,N/A,True,True,True,False,False +127.0.0.0/8,Loopback,"[RFC1122], Section 3.2.1.3",1981-09,N/A,False [1],False [1],False [1],False [1],True +169.254.0.0/16,Link Local,[RFC3927],2005-05,N/A,True,True,False,False,True +172.16.0.0/12,Private-Use,[RFC1918],1996-02,N/A,True,True,True,False,False +192.0.0.0/24 [2],IETF Protocol Assignments,"[RFC6890], Section 2.1",2010-01,N/A,False,False,False,False,False +192.0.0.0/29,IPv4 Service Continuity Prefix,[RFC7335],2011-06,N/A,True,True,True,False,False +192.0.0.8/32,IPv4 dummy address,[RFC7600],2015-03,N/A,True,False,False,False,False +192.0.0.9/32,Port Control Protocol Anycast,[RFC7723],2015-10,N/A,True,True,True,True,False +192.0.0.10/32,Traversal Using Relays around NAT Anycast,[RFC8155],2017-02,N/A,True,True,True,True,False +"192.0.0.170/32, 192.0.0.171/32",NAT64/DNS64 Discovery,"[RFC8880][RFC7050], Section 2.2",2013-02,N/A,False,False,False,False,True +192.0.2.0/24,Documentation (TEST-NET-1),[RFC5737],2010-01,N/A,False,False,False,False,False +192.31.196.0/24,AS112-v4,[RFC7535],2014-12,N/A,True,True,True,True,False +192.52.193.0/24,AMT,[RFC7450],2014-12,N/A,True,True,True,True,False +192.88.99.0/24,Deprecated (6to4 Relay Anycast),[RFC7526],2001-06,2015-03,,,,, +192.88.99.2/32,6a44-relay anycast address,[RFC6751],2012-10,N/A,True,True,True,False,False +192.168.0.0/16,Private-Use,[RFC1918],1996-02,N/A,True,True,True,False,False +192.175.48.0/24,Direct Delegation AS112 Service,[RFC7534],1996-01,N/A,True,True,True,True,False +198.18.0.0/15,Benchmarking,[RFC2544],1999-03,N/A,True,True,True,False,False +198.51.100.0/24,Documentation (TEST-NET-2),[RFC5737],2010-01,N/A,False,False,False,False,False +203.0.113.0/24,Documentation (TEST-NET-3),[RFC5737],2010-01,N/A,False,False,False,False,False +240.0.0.0/4,Reserved,"[RFC1112], Section 4",1989-08,N/A,False,False,False,False,True +255.255.255.255/32,Limited Broadcast,"[RFC8190] + [RFC919], Section 7",1984-10,N/A,False,True,False,False,True diff --git a/iana/data/iana-ipv6-special-registry-1.csv b/iana/data/iana-ipv6-special-registry-1.csv new file mode 100644 index 00000000000..f5bf9c073e8 --- /dev/null +++ b/iana/data/iana-ipv6-special-registry-1.csv @@ -0,0 +1,28 @@ +Address Block,Name,RFC,Allocation Date,Termination Date,Source,Destination,Forwardable,Globally Reachable,Reserved-by-Protocol +::1/128,Loopback Address,[RFC4291],2006-02,N/A,False,False,False,False,True +::/128,Unspecified Address,[RFC4291],2006-02,N/A,True,False,False,False,True +::ffff:0:0/96,IPv4-mapped Address,[RFC4291],2006-02,N/A,False,False,False,False,True +64:ff9b::/96,IPv4-IPv6 Translat.,[RFC6052],2010-10,N/A,True,True,True,True,False +64:ff9b:1::/48,IPv4-IPv6 Translat.,[RFC8215],2017-06,N/A,True,True,True,False,False +100::/64,Discard-Only Address Block,[RFC6666],2012-06,N/A,True,True,True,False,False +100:0:0:1::/64,Dummy IPv6 Prefix,[RFC9780],2025-04,N/A,True,False,False,False,False +2001::/23,IETF Protocol Assignments,[RFC2928],2000-09,N/A,False [1],False [1],False [1],False [1],False +2001::/32,TEREDO,"[RFC4380] + [RFC8190]",2006-01,N/A,True,True,True,N/A [2],False +2001:1::1/128,Port Control Protocol Anycast,[RFC7723],2015-10,N/A,True,True,True,True,False +2001:1::2/128,Traversal Using Relays around NAT Anycast,[RFC8155],2017-02,N/A,True,True,True,True,False +2001:1::3/128,DNS-SD Service Registration Protocol Anycast,[RFC9665],2024-04,N/A,True,True,True,True,False +2001:2::/48,Benchmarking,[RFC5180][RFC Errata 1752],2008-04,N/A,True,True,True,False,False +2001:3::/32,AMT,[RFC7450],2014-12,N/A,True,True,True,True,False +2001:4:112::/48,AS112-v6,[RFC7535],2014-12,N/A,True,True,True,True,False +2001:10::/28,Deprecated (previously ORCHID),[RFC4843],2007-03,2014-03,,,,, +2001:20::/28,ORCHIDv2,[RFC7343],2014-07,N/A,True,True,True,True,False +2001:30::/28,Drone Remote ID Protocol Entity Tags (DETs) Prefix,[RFC9374],2022-12,N/A,True,True,True,True,False +2001:db8::/32,Documentation,[RFC3849],2004-07,N/A,False,False,False,False,False +2002::/16 [3],6to4,[RFC3056],2001-02,N/A,True,True,True,N/A [3],False +2620:4f:8000::/48,Direct Delegation AS112 Service,[RFC7534],2011-05,N/A,True,True,True,True,False +3fff::/20,Documentation,[RFC9637],2024-07,N/A,False,False,False,False,False +5f00::/16,Segment Routing (SRv6) SIDs,[RFC9602],2024-04,N/A,True,True,True,False,False +fc00::/7,Unique-Local,"[RFC4193] + [RFC8190]",2005-10,N/A,True,True,True,False [4],False +fe80::/10,Link-Local Unicast,[RFC4291],2006-02,N/A,True,True,False,False,True diff --git a/iana/ip.go b/iana/ip.go new file mode 100644 index 00000000000..1791852c08f --- /dev/null +++ b/iana/ip.go @@ -0,0 +1,162 @@ +package iana + +import ( + "bytes" + "encoding/csv" + "errors" + "fmt" + "io" + "net/netip" + "regexp" + "slices" + "strings" + + _ "embed" +) + +type reservedPrefix struct { + // addressFamily is "IPv4" or "IPv6". + addressFamily string + // The other fields are defined in: + // https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml + // https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml + addressBlock netip.Prefix + name string + rfc string + // The BRs' requirement that we not issue for Reserved IP Addresses only + // cares about presence in one of these registries, not any of the other + // metadata fields tracked by the registries. Therefore, we ignore the + // Allocation Date, Termination Date, Source, Destination, Forwardable, + // Globally Reachable, and Reserved By Protocol columns. +} + +var ( + reservedPrefixes []reservedPrefix + + // https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml + //go:embed data/iana-ipv4-special-registry-1.csv + ipv4Registry []byte + // https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml + //go:embed data/iana-ipv6-special-registry-1.csv + ipv6Registry []byte +) + +// init parses and loads the embedded IANA special-purpose address registry CSV +// files for all address families, panicking if any one fails. +func init() { + ipv4Prefixes, err := parseReservedPrefixFile(ipv4Registry, "IPv4") + if err != nil { + panic(err) + } + + ipv6Prefixes, err := parseReservedPrefixFile(ipv6Registry, "IPv6") + if err != nil { + panic(err) + } + + reservedPrefixes = slices.Concat(ipv4Prefixes, ipv6Prefixes) + + // Sort the list of reserved prefixes in descending order of prefix size, so + // that checks will match the most-specific reserved prefix first. + slices.SortFunc(reservedPrefixes, func(a, b reservedPrefix) int { + if a.addressBlock.Bits() == b.addressBlock.Bits() { + return 0 + } + if a.addressBlock.Bits() > b.addressBlock.Bits() { + return -1 + } + return 1 + }) +} + +// Define regexps we'll use to clean up poorly formatted registry entries. +var ( + // 2+ sequential whitespace characters. The csv package takes care of + // newlines automatically. + ianaWhitespacesRE = regexp.MustCompile(`\s{2,}`) + // Footnotes at the end, like `[2]`. + ianaFootnotesRE = regexp.MustCompile(`\[\d+\]$`) +) + +// parseReservedPrefixFile parses and returns the IANA special-purpose address +// registry CSV data for a single address family, or returns an error if parsing +// fails. +func parseReservedPrefixFile(registryData []byte, addressFamily string) ([]reservedPrefix, error) { + if addressFamily != "IPv4" && addressFamily != "IPv6" { + return nil, fmt.Errorf("failed to parse reserved address registry: invalid address family %q", addressFamily) + } + if registryData == nil { + return nil, fmt.Errorf("failed to parse reserved %s address registry: empty", addressFamily) + } + + reader := csv.NewReader(bytes.NewReader(registryData)) + + // Parse the header row. + record, err := reader.Read() + if err != nil { + return nil, fmt.Errorf("failed to parse reserved %s address registry header: %w", addressFamily, err) + } + if record[0] != "Address Block" || record[1] != "Name" || record[2] != "RFC" { + return nil, fmt.Errorf("failed to parse reserved %s address registry header: must begin with \"Address Block\", \"Name\" and \"RFC\"", addressFamily) + } + + // Parse the records. + var prefixes []reservedPrefix + for { + row, err := reader.Read() + if errors.Is(err, io.EOF) { + // Finished parsing the file. + if len(prefixes) < 1 { + return nil, fmt.Errorf("failed to parse reserved %s address registry: no rows after header", addressFamily) + } + break + } else if err != nil { + return nil, err + } else if len(row) < 3 { + return nil, fmt.Errorf("failed to parse reserved %s address registry: incomplete row", addressFamily) + } + + // Remove any footnotes, then handle each comma-separated prefix. + for prefixStr := range strings.SplitSeq(ianaFootnotesRE.ReplaceAllLiteralString(row[0], ""), ",") { + prefix, err := netip.ParsePrefix(strings.TrimSpace(prefixStr)) + if err != nil { + return nil, fmt.Errorf("failed to parse reserved %s address registry: couldn't parse entry %q as an IP address prefix: %s", addressFamily, prefixStr, err) + } + + prefixes = append(prefixes, reservedPrefix{ + addressFamily: addressFamily, + addressBlock: prefix, + name: row[1], + // Replace any whitespace sequences with a single space. + rfc: ianaWhitespacesRE.ReplaceAllLiteralString(row[2], " "), + }) + } + } + + return prefixes, nil +} + +// IsReservedAddr returns an error if an IP address is part of a reserved range. +func IsReservedAddr(ip netip.Addr) error { + // Strip zone from IPv6 addresses before checking + ip = ip.WithZone("") + for _, rpx := range reservedPrefixes { + if rpx.addressBlock.Contains(ip) { + return fmt.Errorf("IP address is in a reserved address block: %s: %s", rpx.rfc, rpx.name) + } + } + + return nil +} + +// IsReservedPrefix returns an error if an IP address prefix overlaps with a +// reserved range. +func IsReservedPrefix(prefix netip.Prefix) error { + for _, rpx := range reservedPrefixes { + if rpx.addressBlock.Overlaps(prefix) { + return fmt.Errorf("IP address is in a reserved address block: %s: %s", rpx.rfc, rpx.name) + } + } + + return nil +} diff --git a/iana/ip_test.go b/iana/ip_test.go new file mode 100644 index 00000000000..251e1692e72 --- /dev/null +++ b/iana/ip_test.go @@ -0,0 +1,93 @@ +package iana + +import ( + "net/netip" + "strings" + "testing" +) + +func TestIsReservedAddr(t *testing.T) { + t.Parallel() + + cases := []struct { + ip string + want string + }{ + {"127.0.0.1", "Loopback"}, // second-lowest IP in a reserved /8, common mistaken request + {"128.0.0.1", ""}, // second-lowest IP just above a reserved /8 + {"192.168.254.254", "Private-Use"}, // highest IP in a reserved /16 + {"192.169.255.255", ""}, // highest IP in the /16 above a reserved /16 + + {"::", "Unspecified Address"}, // lowest possible IPv6 address, reserved, possible parsing edge case + {"::1", "Loopback Address"}, // reserved, common mistaken request + {"::2", ""}, // surprisingly unreserved + + {"fe80::1", "Link-Local Unicast"}, // second-lowest IP in a reserved /10 + {"febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "Link-Local Unicast"}, // highest IP in a reserved /10 + {"fec0::1", ""}, // second-lowest IP just above a reserved /10 + + {"fe80::1%eth0", "Link-Local Unicast"}, // IPv6 link-local with zone + {"::1%lo", "Loopback Address"}, // IPv6 loopback with zone + + {"192.0.0.170", "NAT64/DNS64 Discovery"}, // first of two reserved IPs that are comma-split in IANA's CSV; also a more-specific of a larger reserved block that comes first + {"192.0.0.171", "NAT64/DNS64 Discovery"}, // second of two reserved IPs that are comma-split in IANA's CSV; also a more-specific of a larger reserved block that comes first + {"2001:1::1", "Port Control Protocol Anycast"}, // reserved IP that comes after a line with a line break in IANA's CSV; also a more-specific of a larger reserved block that comes first + {"2002::", "6to4"}, // lowest IP in a reserved /16 that has a footnote in IANA's CSV + {"2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "6to4"}, // highest IP in a reserved /16 that has a footnote in IANA's CSV + + {"0100::", "Discard-Only Address Block"}, // part of a reserved block in a non-canonical IPv6 format + {"0100::0000:ffff:ffff:ffff:ffff", "Discard-Only Address Block"}, // part of a reserved block in a non-canonical IPv6 format + {"0100::0002:0000:0000:0000:0000", ""}, // non-reserved but in a non-canonical IPv6 format + } + + for _, tc := range cases { + t.Run(tc.ip, func(t *testing.T) { + t.Parallel() + err := IsReservedAddr(netip.MustParseAddr(tc.ip)) + if err == nil && tc.want != "" { + t.Errorf("Got success, wanted error for %#v", tc.ip) + } + if err != nil && !strings.Contains(err.Error(), tc.want) { + t.Errorf("%#v: got %q, want %q", tc.ip, err.Error(), tc.want) + } + }) + } +} + +func TestIsReservedPrefix(t *testing.T) { + t.Parallel() + + cases := []struct { + cidr string + want bool + }{ + {"172.16.0.0/12", true}, + {"172.16.0.0/32", true}, + {"172.16.0.1/32", true}, + {"172.31.255.0/24", true}, + {"172.31.255.255/24", true}, + {"172.31.255.255/32", true}, + {"172.32.0.0/24", false}, + {"172.32.0.1/32", false}, + + {"100::/64", true}, + {"100::/128", true}, + {"100::1/128", true}, + {"100::1:ffff:ffff:ffff:ffff/128", true}, + {"100:0:0:2::/64", false}, + {"100:0:0:2::1/128", false}, + } + + for _, tc := range cases { + t.Run(tc.cidr, func(t *testing.T) { + t.Parallel() + err := IsReservedPrefix(netip.MustParsePrefix(tc.cidr)) + if err != nil && !tc.want { + t.Error(err) + } + if err == nil && tc.want { + t.Errorf("Wanted error for %#v, got success", tc.cidr) + } + }) + } +} diff --git a/identifier/identifier.go b/identifier/identifier.go index cbf228f869f..9a6bb96bf7b 100644 --- a/identifier/identifier.go +++ b/identifier/identifier.go @@ -1,15 +1,45 @@ // The identifier package defines types for RFC 8555 ACME identifiers. +// +// It exists as a separate package to prevent an import loop between the core +// and probs packages. +// +// Function naming conventions: +// - "New" creates a new instance from one or more simple base type inputs. +// - "From" and "To" extract information from, or compose, a more complex object. package identifier +import ( + "crypto/x509" + "fmt" + "net" + "net/netip" + "slices" + "strings" + + corepb "github.com/letsencrypt/boulder/core/proto" +) + // IdentifierType is a named string type for registered ACME identifier types. // See https://tools.ietf.org/html/rfc8555#section-9.7.7 type IdentifierType string const ( - // DNS is specified in RFC 8555 for DNS type identifiers. - DNS = IdentifierType("dns") + // TypeDNS is specified in RFC 8555 for TypeDNS type identifiers. + TypeDNS = IdentifierType("dns") + // TypeIP is specified in RFC 8738 + TypeIP = IdentifierType("ip") ) +// IsValid tests whether the identifier type is known +func (i IdentifierType) IsValid() bool { + switch i { + case TypeDNS, TypeIP: + return true + default: + return false + } +} + // ACMEIdentifier is a struct encoding an identifier that can be validated. The // protocol allows for different types of identifier to be supported (DNS // names, IP addresses, etc.), but currently we only support RFC 8555 DNS type @@ -22,11 +52,181 @@ type ACMEIdentifier struct { Value string `json:"value"` } -// DNSIdentifier is a convenience function for creating an ACMEIdentifier with -// Type DNS for a given domain name. -func DNSIdentifier(domain string) ACMEIdentifier { +// ACMEIdentifiers is a named type for a slice of ACME identifiers, so that +// methods can be applied to these slices. +type ACMEIdentifiers []ACMEIdentifier + +func (i ACMEIdentifier) ToProto() *corepb.Identifier { + return &corepb.Identifier{ + Type: string(i.Type), + Value: i.Value, + } +} + +func FromProto(ident *corepb.Identifier) ACMEIdentifier { + return ACMEIdentifier{ + Type: IdentifierType(ident.Type), + Value: ident.Value, + } +} + +// ToProtoSlice is a convenience function for converting a slice of +// ACMEIdentifier into a slice of *corepb.Identifier, to use for RPCs. +func (idents ACMEIdentifiers) ToProtoSlice() []*corepb.Identifier { + var pbIdents []*corepb.Identifier + for _, ident := range idents { + pbIdents = append(pbIdents, ident.ToProto()) + } + return pbIdents +} + +// FromProtoSlice is a convenience function for converting a slice of +// *corepb.Identifier from RPCs into a slice of ACMEIdentifier. +func FromProtoSlice(pbIdents []*corepb.Identifier) ACMEIdentifiers { + var idents ACMEIdentifiers + + for _, pbIdent := range pbIdents { + idents = append(idents, FromProto(pbIdent)) + } + return idents +} + +// NewDNS is a convenience function for creating an ACMEIdentifier with Type +// "dns" for a given domain name. +func NewDNS(domain string) ACMEIdentifier { return ACMEIdentifier{ - Type: DNS, + Type: TypeDNS, Value: domain, } } + +// NewDNSSlice is a convenience function for creating a slice of ACMEIdentifier +// with Type "dns" for a given slice of domain names. +func NewDNSSlice(input []string) ACMEIdentifiers { + var out ACMEIdentifiers + for _, in := range input { + out = append(out, NewDNS(in)) + } + return out +} + +// NewIP is a convenience function for creating an ACMEIdentifier with Type "ip" +// for a given IP address. +func NewIP(ip netip.Addr) ACMEIdentifier { + return ACMEIdentifier{ + Type: TypeIP, + // RFC 8738, Sec. 3: The identifier value MUST contain the textual form + // of the address as defined in RFC 1123, Sec. 2.1 for IPv4 and in RFC + // 5952, Sec. 4 for IPv6. + Value: ip.WithZone("").String(), + } +} + +// FromString converts a string to an ACMEIdentifier. +func FromString(identStr string) ACMEIdentifier { + ip, err := netip.ParseAddr(identStr) + if err == nil { + return NewIP(ip) + } + return NewDNS(identStr) +} + +// FromStringSlice converts a slice of strings to a slice of ACMEIdentifier. +func FromStringSlice(identStrs []string) ACMEIdentifiers { + var idents ACMEIdentifiers + for _, identStr := range identStrs { + idents = append(idents, FromString(identStr)) + } + return idents +} + +// fromX509 extracts the Subject Alternative Names from a certificate or CSR's fields, and +// returns a slice of ACMEIdentifiers. +func fromX509(commonName string, dnsNames []string, ipAddresses []net.IP) ACMEIdentifiers { + var sans ACMEIdentifiers + for _, name := range dnsNames { + sans = append(sans, NewDNS(name)) + } + if commonName != "" { + // Boulder won't generate certificates with a CN that's not also present + // in the SANs, but such a certificate is possible. If appended, this is + // deduplicated later with Normalize(). We assume the CN is a DNSName, + // because CNs are untyped strings without metadata, and we will never + // configure a Boulder profile to issue a certificate that contains both + // an IP address identifier and a CN. + sans = append(sans, NewDNS(commonName)) + } + + for _, ip := range ipAddresses { + sans = append(sans, ACMEIdentifier{ + Type: TypeIP, + Value: ip.String(), + }) + } + + return Normalize(sans) +} + +// FromCert extracts the Subject Common Name and Subject Alternative Names from +// a certificate, and returns a slice of ACMEIdentifiers. +func FromCert(cert *x509.Certificate) ACMEIdentifiers { + return fromX509(cert.Subject.CommonName, cert.DNSNames, cert.IPAddresses) +} + +// FromCSR extracts the Subject Common Name and Subject Alternative Names from a +// CSR, and returns a slice of ACMEIdentifiers. +func FromCSR(csr *x509.CertificateRequest) ACMEIdentifiers { + return fromX509(csr.Subject.CommonName, csr.DNSNames, csr.IPAddresses) +} + +// Normalize returns the set of all unique ACME identifiers in the input after +// all of them are lowercased. The returned identifier values will be in their +// lowercased form and sorted alphabetically by value. DNS identifiers will +// precede IP address identifiers. +func Normalize(idents ACMEIdentifiers) ACMEIdentifiers { + for i := range idents { + idents[i].Value = strings.ToLower(idents[i].Value) + } + + slices.SortFunc(idents, func(a, b ACMEIdentifier) int { + if a.Type == b.Type { + if a.Value == b.Value { + return 0 + } + if a.Value < b.Value { + return -1 + } + return 1 + } + if a.Type == "dns" && b.Type == "ip" { + return -1 + } + return 1 + }) + + return slices.Compact(idents) +} + +// ToValues returns a slice of DNS names and a slice of IP addresses in the +// input. If an identifier type or IP address is invalid, it returns an error. +func (idents ACMEIdentifiers) ToValues() ([]string, []net.IP, error) { + var dnsNames []string + var ipAddresses []net.IP + + for _, ident := range idents { + switch ident.Type { + case TypeDNS: + dnsNames = append(dnsNames, ident.Value) + case TypeIP: + ip := net.ParseIP(ident.Value) + if ip == nil { + return nil, nil, fmt.Errorf("parsing IP address: %s", ident.Value) + } + ipAddresses = append(ipAddresses, ip) + default: + return nil, nil, fmt.Errorf("evaluating identifier type: %s for %s", ident.Type, ident.Value) + } + } + + return dnsNames, ipAddresses, nil +} diff --git a/identifier/identifier_test.go b/identifier/identifier_test.go new file mode 100644 index 00000000000..b247466a900 --- /dev/null +++ b/identifier/identifier_test.go @@ -0,0 +1,263 @@ +package identifier + +import ( + "crypto/x509" + "crypto/x509/pkix" + "net" + "net/netip" + "reflect" + "slices" + "testing" +) + +func TestNewIP(t *testing.T) { + cases := []struct { + name string + ip netip.Addr + want ACMEIdentifier + }{ + { + name: "IPv4 address", + ip: netip.MustParseAddr("9.9.9.9"), + want: ACMEIdentifier{Type: TypeIP, Value: "9.9.9.9"}, + }, + { + name: "IPv6 address", + ip: netip.MustParseAddr("fe80::cafe"), + want: ACMEIdentifier{Type: TypeIP, Value: "fe80::cafe"}, + }, + { + name: "IPv6 address with scope zone", + ip: netip.MustParseAddr("fe80::cafe%lo"), + want: ACMEIdentifier{Type: TypeIP, Value: "fe80::cafe"}, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + got := NewIP(tc.ip) + if got != tc.want { + t.Errorf("NewIP(%#v) = %#v, but want %#v", tc.ip, got, tc.want) + } + }) + } +} + +// TestFromX509 tests FromCert and FromCSR, which are fromX509's public +// wrappers. +func TestFromX509(t *testing.T) { + cases := []struct { + name string + subject pkix.Name + dnsNames []string + ipAddresses []net.IP + want ACMEIdentifiers + }{ + { + name: "no explicit CN", + dnsNames: []string{"a.com"}, + want: ACMEIdentifiers{NewDNS("a.com")}, + }, + { + name: "explicit uppercase CN", + subject: pkix.Name{CommonName: "A.com"}, + dnsNames: []string{"a.com"}, + want: ACMEIdentifiers{NewDNS("a.com")}, + }, + { + name: "no explicit CN, uppercase SAN", + dnsNames: []string{"A.com"}, + want: ACMEIdentifiers{NewDNS("a.com")}, + }, + { + name: "duplicate SANs", + dnsNames: []string{"b.com", "b.com", "a.com", "a.com"}, + want: ACMEIdentifiers{NewDNS("a.com"), NewDNS("b.com")}, + }, + { + name: "explicit CN not found in SANs", + subject: pkix.Name{CommonName: "a.com"}, + dnsNames: []string{"b.com"}, + want: ACMEIdentifiers{NewDNS("a.com"), NewDNS("b.com")}, + }, + { + name: "mix of DNSNames and IPAddresses", + dnsNames: []string{"a.com"}, + ipAddresses: []net.IP{{192, 168, 1, 1}}, + want: ACMEIdentifiers{NewDNS("a.com"), NewIP(netip.MustParseAddr("192.168.1.1"))}, + }, + } + for _, tc := range cases { + t.Run("cert/"+tc.name, func(t *testing.T) { + t.Parallel() + got := FromCert(&x509.Certificate{Subject: tc.subject, DNSNames: tc.dnsNames, IPAddresses: tc.ipAddresses}) + if !slices.Equal(got, tc.want) { + t.Errorf("FromCert() got %#v, but want %#v", got, tc.want) + } + }) + t.Run("csr/"+tc.name, func(t *testing.T) { + t.Parallel() + got := FromCSR(&x509.CertificateRequest{Subject: tc.subject, DNSNames: tc.dnsNames, IPAddresses: tc.ipAddresses}) + if !slices.Equal(got, tc.want) { + t.Errorf("FromCSR() got %#v, but want %#v", got, tc.want) + } + }) + } +} + +func TestNormalize(t *testing.T) { + cases := []struct { + name string + idents ACMEIdentifiers + want ACMEIdentifiers + }{ + { + name: "convert to lowercase", + idents: ACMEIdentifiers{ + {Type: TypeDNS, Value: "AlPha.example.coM"}, + {Type: TypeIP, Value: "fe80::CAFE"}, + }, + want: ACMEIdentifiers{ + {Type: TypeDNS, Value: "alpha.example.com"}, + {Type: TypeIP, Value: "fe80::cafe"}, + }, + }, + { + name: "sort", + idents: ACMEIdentifiers{ + {Type: TypeDNS, Value: "foobar.com"}, + {Type: TypeDNS, Value: "bar.com"}, + {Type: TypeDNS, Value: "baz.com"}, + {Type: TypeDNS, Value: "a.com"}, + {Type: TypeIP, Value: "fe80::cafe"}, + {Type: TypeIP, Value: "2001:db8::1dea"}, + {Type: TypeIP, Value: "192.168.1.1"}, + }, + want: ACMEIdentifiers{ + {Type: TypeDNS, Value: "a.com"}, + {Type: TypeDNS, Value: "bar.com"}, + {Type: TypeDNS, Value: "baz.com"}, + {Type: TypeDNS, Value: "foobar.com"}, + {Type: TypeIP, Value: "192.168.1.1"}, + {Type: TypeIP, Value: "2001:db8::1dea"}, + {Type: TypeIP, Value: "fe80::cafe"}, + }, + }, + { + name: "de-duplicate", + idents: ACMEIdentifiers{ + {Type: TypeDNS, Value: "AlPha.example.coM"}, + {Type: TypeIP, Value: "fe80::CAFE"}, + {Type: TypeDNS, Value: "alpha.example.com"}, + {Type: TypeIP, Value: "fe80::cafe"}, + NewIP(netip.MustParseAddr("fe80:0000:0000:0000:0000:0000:0000:cafe")), + }, + want: ACMEIdentifiers{ + {Type: TypeDNS, Value: "alpha.example.com"}, + {Type: TypeIP, Value: "fe80::cafe"}, + }, + }, + { + name: "DNS before IP", + idents: ACMEIdentifiers{ + {Type: TypeIP, Value: "fe80::cafe"}, + {Type: TypeDNS, Value: "alpha.example.com"}, + }, + want: ACMEIdentifiers{ + {Type: TypeDNS, Value: "alpha.example.com"}, + {Type: TypeIP, Value: "fe80::cafe"}, + }, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + got := Normalize(tc.idents) + if !slices.Equal(got, tc.want) { + t.Errorf("Got %#v, but want %#v", got, tc.want) + } + }) + } +} + +func TestToValues(t *testing.T) { + cases := []struct { + name string + idents ACMEIdentifiers + wantErr string + wantDnsNames []string + wantIpAddresses []net.IP + }{ + { + name: "DNS names and IP addresses", + // These are deliberately out of alphabetical and type order, to + // ensure ToValues doesn't do normalization, which ought to be done + // explicitly. + idents: ACMEIdentifiers{ + {Type: TypeDNS, Value: "beta.example.com"}, + {Type: TypeIP, Value: "fe80::cafe"}, + {Type: TypeDNS, Value: "alpha.example.com"}, + {Type: TypeIP, Value: "127.0.0.1"}, + }, + wantErr: "", + wantDnsNames: []string{"beta.example.com", "alpha.example.com"}, + wantIpAddresses: []net.IP{net.ParseIP("fe80::cafe"), net.ParseIP("127.0.0.1")}, + }, + { + name: "DNS names only", + idents: ACMEIdentifiers{ + {Type: TypeDNS, Value: "alpha.example.com"}, + {Type: TypeDNS, Value: "beta.example.com"}, + }, + wantErr: "", + wantDnsNames: []string{"alpha.example.com", "beta.example.com"}, + wantIpAddresses: nil, + }, + { + name: "IP addresses only", + idents: ACMEIdentifiers{ + {Type: TypeIP, Value: "127.0.0.1"}, + {Type: TypeIP, Value: "fe80::cafe"}, + }, + wantErr: "", + wantDnsNames: nil, + wantIpAddresses: []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("fe80::cafe")}, + }, + { + name: "invalid IP address", + idents: ACMEIdentifiers{ + {Type: TypeIP, Value: "fe80::c0ffee"}, + }, + wantErr: "parsing IP address: fe80::c0ffee", + wantDnsNames: nil, + wantIpAddresses: nil, + }, + { + name: "invalid identifier type", + idents: ACMEIdentifiers{ + {Type: "fnord", Value: "panic.example.com"}, + }, + wantErr: "evaluating identifier type: fnord for panic.example.com", + wantDnsNames: nil, + wantIpAddresses: nil, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + gotDnsNames, gotIpAddresses, gotErr := tc.idents.ToValues() + if !slices.Equal(gotDnsNames, tc.wantDnsNames) { + t.Errorf("Got DNS names %#v, but want %#v", gotDnsNames, tc.wantDnsNames) + } + if !reflect.DeepEqual(gotIpAddresses, tc.wantIpAddresses) { + t.Errorf("Got IP addresses %#v, but want %#v", gotIpAddresses, tc.wantIpAddresses) + } + if tc.wantErr != "" && (gotErr.Error() != tc.wantErr) { + t.Errorf("Got error %#v, but want %#v", gotErr.Error(), tc.wantErr) + } + if tc.wantErr == "" && gotErr != nil { + t.Errorf("Got error %#v, but didn't want one", gotErr.Error()) + } + }) + } +} diff --git a/issuance/cert.go b/issuance/cert.go new file mode 100644 index 00000000000..9be35237c9a --- /dev/null +++ b/issuance/cert.go @@ -0,0 +1,428 @@ +package issuance + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/json" + "errors" + "fmt" + "math/big" + "net" + "sync" + "time" + + ct "github.com/google/certificate-transparency-go" + cttls "github.com/google/certificate-transparency-go/tls" + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/jmhodges/clock" + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/linter" + "github.com/letsencrypt/boulder/precert" +) + +// ProfileConfig describes the certificate issuance constraints for all issuers. +type ProfileConfig struct { + // OmitCommonName causes the CN field to be excluded from the resulting + // certificate, regardless of its inclusion in the IssuanceRequest. + OmitCommonName bool + // OmitKeyEncipherment causes the keyEncipherment bit to be omitted from the + // Key Usage field of all certificates (instead of only from ECDSA certs). + OmitKeyEncipherment bool + // OmitClientAuth causes the id-kp-clientAuth OID (TLS Client Authentication) + // to be omitted from the EKU extension. + OmitClientAuth bool + // OmitSKID causes the Subject Key Identifier extension to be omitted. + OmitSKID bool + + MaxValidityPeriod config.Duration + MaxValidityBackdate config.Duration + + // LintConfig is a path to a zlint config file, which can be used to control + // the behavior of zlint's "customizable lints". + LintConfig string + // IgnoredLints is a list of lint names that we know will fail for this + // profile, and which we know it is safe to ignore. + IgnoredLints []string +} + +// Profile is the validated structure created by reading in a ProfileConfig +type Profile struct { + omitCommonName bool + omitKeyEncipherment bool + omitClientAuth bool + omitSKID bool + + maxBackdate time.Duration + maxValidity time.Duration + + lints lint.Registry +} + +// NewProfile converts the profile config into a usable profile. +func NewProfile(profileConfig ProfileConfig) (*Profile, error) { + // The Baseline Requirements, Section 7.1.2.7, says that the notBefore time + // must be "within 48 hours of the time of signing". We can be even stricter. + if profileConfig.MaxValidityBackdate.Duration >= 24*time.Hour { + return nil, fmt.Errorf("backdate %q is too large", profileConfig.MaxValidityBackdate.Duration) + } + + // Our CP/CPS, Section 7.1, says that our Subscriber Certificates have a + // validity period of "up to 100 days". + if profileConfig.MaxValidityPeriod.Duration >= 100*24*time.Hour { + return nil, fmt.Errorf("validity period %q is too large", profileConfig.MaxValidityPeriod.Duration) + } + + lints, err := linter.NewRegistry(profileConfig.IgnoredLints) + cmd.FailOnError(err, "Failed to create zlint registry") + if profileConfig.LintConfig != "" { + lintconfig, err := lint.NewConfigFromFile(profileConfig.LintConfig) + cmd.FailOnError(err, "Failed to load zlint config file") + lints.SetConfiguration(lintconfig) + } + + sp := &Profile{ + omitCommonName: profileConfig.OmitCommonName, + omitKeyEncipherment: profileConfig.OmitKeyEncipherment, + omitClientAuth: profileConfig.OmitClientAuth, + omitSKID: profileConfig.OmitSKID, + maxBackdate: profileConfig.MaxValidityBackdate.Duration, + maxValidity: profileConfig.MaxValidityPeriod.Duration, + lints: lints, + } + + return sp, nil +} + +// GenerateValidity returns a notBefore/notAfter pair bracketing the input time, +// based on the profile's configured backdate and validity. +func (p *Profile) GenerateValidity(now time.Time) (time.Time, time.Time) { + // Don't use the full maxBackdate, to ensure that the actual backdate remains + // acceptable throughout the rest of the issuance process. + backdate := time.Duration(float64(p.maxBackdate.Nanoseconds()) * 0.9) + notBefore := now.Add(-1 * backdate).Truncate(time.Second) + // Subtract one second, because certificate validity periods are *inclusive* + // of their final second (Baseline Requirements, Section 1.6.1). + notAfter := notBefore.Add(p.maxValidity).Add(-1 * time.Second).Truncate(time.Second) + return notBefore, notAfter +} + +// requestValid verifies the passed IssuanceRequest against the profile. If the +// request doesn't match the signing profile an error is returned. +func (i *Issuer) requestValid(clk clock.Clock, prof *Profile, req *IssuanceRequest) error { + switch req.PublicKey.PublicKey.(type) { + case *rsa.PublicKey, *ecdsa.PublicKey: + default: + return errors.New("unsupported public key type") + } + + if len(req.precertDER) == 0 && !i.IsActive() { + return errors.New("inactive issuer cannot issue precert") + } + + if len(req.SubjectKeyId) != 0 && len(req.SubjectKeyId) != 20 { + return errors.New("unexpected subject key ID length") + } + + if req.IncludeCTPoison && req.sctList != nil { + return errors.New("cannot include both ct poison and sct list extensions") + } + + // The validity period is calculated inclusive of the whole second represented + // by the notAfter timestamp. + validity := req.NotAfter.Add(time.Second).Sub(req.NotBefore) + if validity <= 0 { + return errors.New("NotAfter must be after NotBefore") + } + if validity > prof.maxValidity { + return fmt.Errorf("validity period is more than the maximum allowed period (%s>%s)", validity, prof.maxValidity) + } + backdatedBy := clk.Now().Sub(req.NotBefore) + if backdatedBy > prof.maxBackdate { + return fmt.Errorf("NotBefore is backdated more than the maximum allowed period (%s>%s)", backdatedBy, prof.maxBackdate) + } + if backdatedBy < 0 { + return errors.New("NotBefore is in the future") + } + + // We use 19 here because a 20-byte serial could produce >20 octets when + // encoded in ASN.1. That happens when the first byte is >0x80. See + // https://letsencrypt.org/docs/a-warm-welcome-to-asn1-and-der/#integer-encoding + if len(req.Serial) > 19 || len(req.Serial) < 9 { + return errors.New("serial must be between 9 and 19 bytes") + } + + return nil +} + +// Baseline Requirements, Section 7.1.6.1: domain-validated +var domainValidatedOID = func() x509.OID { + x509OID, err := x509.OIDFromInts([]uint64{2, 23, 140, 1, 2, 1}) + if err != nil { + // This should never happen, as the OID is hardcoded. + panic(fmt.Errorf("failed to create OID using ints %v: %s", x509OID, err)) + } + return x509OID +}() + +func (i *Issuer) generateTemplate() *x509.Certificate { + template := &x509.Certificate{ + SignatureAlgorithm: i.sigAlg, + IssuingCertificateURL: []string{i.issuerURL}, + BasicConstraintsValid: true, + // Baseline Requirements, Section 7.1.6.1: domain-validated + Policies: []x509.OID{domainValidatedOID}, + } + + return template +} + +var ctPoisonExt = pkix.Extension{ + // OID for CT poison, RFC 6962 (was never assigned a proper id-pe- name) + Id: asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}, + Value: asn1.NullBytes, + Critical: true, +} + +// OID for SCT list, RFC 6962 (was never assigned a proper id-pe- name) +var sctListOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} + +func generateSCTListExt(scts []ct.SignedCertificateTimestamp) (pkix.Extension, error) { + list := ctx509.SignedCertificateTimestampList{} + for _, sct := range scts { + sctBytes, err := cttls.Marshal(sct) + if err != nil { + return pkix.Extension{}, err + } + list.SCTList = append(list.SCTList, ctx509.SerializedSCT{Val: sctBytes}) + } + listBytes, err := cttls.Marshal(list) + if err != nil { + return pkix.Extension{}, err + } + extBytes, err := asn1.Marshal(listBytes) + if err != nil { + return pkix.Extension{}, err + } + return pkix.Extension{ + Id: sctListOID, + Value: extBytes, + }, nil +} + +// MarshalablePublicKey is a wrapper for crypto.PublicKey with a custom JSON +// marshaller that encodes the public key as a DER-encoded SubjectPublicKeyInfo. +type MarshalablePublicKey struct { + crypto.PublicKey +} + +func (pk MarshalablePublicKey) MarshalJSON() ([]byte, error) { + keyDER, err := x509.MarshalPKIXPublicKey(pk.PublicKey) + if err != nil { + return nil, err + } + return json.Marshal(keyDER) +} + +type HexMarshalableBytes []byte + +func (h HexMarshalableBytes) MarshalJSON() ([]byte, error) { + return json.Marshal(fmt.Sprintf("%x", h)) +} + +// IssuanceRequest describes a certificate issuance request +// +// It can be marshaled as JSON for logging purposes, though note that sctList and precertDER +// will be omitted from the marshaled output because they are unexported. +type IssuanceRequest struct { + // PublicKey is of type MarshalablePublicKey so we can log an IssuanceRequest as a JSON object. + PublicKey MarshalablePublicKey + SubjectKeyId HexMarshalableBytes + + Serial HexMarshalableBytes + + NotBefore time.Time + NotAfter time.Time + + CommonName string + DNSNames []string + IPAddresses []net.IP + + IncludeCTPoison bool + + // sctList is a list of SCTs to include in a final certificate. + // If it is non-empty, PrecertDER must also be non-empty. + sctList []ct.SignedCertificateTimestamp + // precertDER is the encoded bytes of the precertificate that a + // final certificate is expected to correspond to. If it is non-empty, + // SCTList must also be non-empty. + precertDER []byte +} + +// An issuanceToken represents an assertion that Issuer.Lint has generated +// a linting certificate for a given input and run the linter over it with no +// errors. The token may be redeemed (at most once) to sign a certificate or +// precertificate with the same Issuer's private key, containing the same +// contents that were linted. +type issuanceToken struct { + mu sync.Mutex + template *x509.Certificate + pubKey MarshalablePublicKey + // A pointer to the issuer that created this token. This token may only + // be redeemed by the same issuer. + issuer *Issuer +} + +// Prepare combines the given profile and request with the Issuer's information +// to create a template certificate. It then generates a linting certificate +// from that template and runs the linter over it. If successful, returns both +// the linting certificate (which can be stored) and an issuanceToken. The +// issuanceToken can be used to sign a matching certificate with this Issuer's +// private key. +func (i *Issuer) Prepare(prof *Profile, req *IssuanceRequest) ([]byte, *issuanceToken, error) { + // check request is valid according to the issuance profile + err := i.requestValid(i.clk, prof, req) + if err != nil { + return nil, nil, err + } + + // generate template from the issuer's data + template := i.generateTemplate() + + ekus := []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + } + if prof.omitClientAuth { + ekus = []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + } + } + template.ExtKeyUsage = ekus + + // populate template from the issuance request + template.NotBefore, template.NotAfter = req.NotBefore, req.NotAfter + template.SerialNumber = big.NewInt(0).SetBytes(req.Serial) + if req.CommonName != "" && !prof.omitCommonName { + template.Subject.CommonName = req.CommonName + } + template.DNSNames = req.DNSNames + template.IPAddresses = req.IPAddresses + + switch req.PublicKey.PublicKey.(type) { + case *rsa.PublicKey: + if prof.omitKeyEncipherment { + template.KeyUsage = x509.KeyUsageDigitalSignature + } else { + template.KeyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment + } + case *ecdsa.PublicKey: + template.KeyUsage = x509.KeyUsageDigitalSignature + } + + if !prof.omitSKID { + template.SubjectKeyId = req.SubjectKeyId + } + + if req.IncludeCTPoison { + template.ExtraExtensions = append(template.ExtraExtensions, ctPoisonExt) + } else if len(req.sctList) > 0 { + if len(req.precertDER) == 0 { + return nil, nil, errors.New("inconsistent request contains sctList but no precertDER") + } + sctListExt, err := generateSCTListExt(req.sctList) + if err != nil { + return nil, nil, err + } + template.ExtraExtensions = append(template.ExtraExtensions, sctListExt) + } else { + return nil, nil, errors.New("invalid request contains neither sctList nor precertDER") + } + + // Pick a CRL shard based on the serial number modulo the number of shards. + // This gives us random distribution that is nonetheless consistent between + // precert and cert. + shardZeroBased := big.NewInt(0).Mod(template.SerialNumber, big.NewInt(int64(i.crlShards))) + shard := int(shardZeroBased.Int64()) + 1 + template.CRLDistributionPoints = []string{i.crlURL(shard)} + + // check that the tbsCertificate is properly formed by signing it + // with a throwaway key and then linting it using zlint + lintCertBytes, err := i.Linter.Check(template, req.PublicKey.PublicKey, prof.lints) + if err != nil { + return nil, nil, fmt.Errorf("tbsCertificate linting failed: %w", err) + } + + if len(req.precertDER) > 0 { + err = precert.Correspond(req.precertDER, lintCertBytes) + if err != nil { + return nil, nil, fmt.Errorf("precert does not correspond to linted final cert: %w", err) + } + } + + token := &issuanceToken{sync.Mutex{}, template, req.PublicKey, i} + return lintCertBytes, token, nil +} + +// Issue performs a real issuance using an issuanceToken resulting from a +// previous call to Prepare(). Call this at most once per token. Calls after +// the first will receive an error. +func (i *Issuer) Issue(token *issuanceToken) ([]byte, error) { + if token == nil { + return nil, errors.New("nil issuanceToken") + } + token.mu.Lock() + defer token.mu.Unlock() + if token.template == nil { + return nil, errors.New("issuance token already redeemed") + } + template := token.template + token.template = nil + + if token.issuer != i { + return nil, errors.New("tried to redeem issuance token with the wrong issuer") + } + + return x509.CreateCertificate(rand.Reader, template, i.Cert.Certificate, token.pubKey.PublicKey, i.Signer) +} + +// containsCTPoison returns true if the provided set of extensions includes +// an entry whose OID and value both match the expected values for the CT +// Poison extension. +func containsCTPoison(extensions []pkix.Extension) bool { + for _, ext := range extensions { + if ext.Id.Equal(ctPoisonExt.Id) && bytes.Equal(ext.Value, asn1.NullBytes) { + return true + } + } + return false +} + +// RequestFromPrecert constructs a final certificate IssuanceRequest matching +// the provided precertificate. It returns an error if the precertificate doesn't +// contain the CT poison extension. +func RequestFromPrecert(precert *x509.Certificate, scts []ct.SignedCertificateTimestamp) (*IssuanceRequest, error) { + if !containsCTPoison(precert.Extensions) { + return nil, errors.New("provided certificate doesn't contain the CT poison extension") + } + return &IssuanceRequest{ + PublicKey: MarshalablePublicKey{precert.PublicKey}, + SubjectKeyId: precert.SubjectKeyId, + Serial: precert.SerialNumber.Bytes(), + NotBefore: precert.NotBefore, + NotAfter: precert.NotAfter, + CommonName: precert.Subject.CommonName, + DNSNames: precert.DNSNames, + IPAddresses: precert.IPAddresses, + sctList: scts, + precertDER: precert.Raw, + }, nil +} diff --git a/issuance/cert_test.go b/issuance/cert_test.go new file mode 100644 index 00000000000..4e986290d39 --- /dev/null +++ b/issuance/cert_test.go @@ -0,0 +1,960 @@ +package issuance + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "net" + "reflect" + "strings" + "testing" + "time" + + ct "github.com/google/certificate-transparency-go" + "github.com/jmhodges/clock" + + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/ctpolicy/loglist" + "github.com/letsencrypt/boulder/linter" + "github.com/letsencrypt/boulder/test" +) + +var ( + goodSKID = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9} +) + +func defaultProfile() *Profile { + p, _ := NewProfile(defaultProfileConfig()) + return p +} + +func TestGenerateValidity(t *testing.T) { + fc := clock.NewFake() + fc.Set(time.Date(2015, time.June, 04, 11, 04, 38, 0, time.UTC)) + + tests := []struct { + name string + backdate time.Duration + validity time.Duration + notBefore time.Time + notAfter time.Time + }{ + { + name: "normal usage", + backdate: time.Hour, // 90% of one hour is 54 minutes + validity: 7 * 24 * time.Hour, + notBefore: time.Date(2015, time.June, 04, 10, 10, 38, 0, time.UTC), + notAfter: time.Date(2015, time.June, 11, 10, 10, 37, 0, time.UTC), + }, + { + name: "zero backdate", + backdate: 0, + validity: 7 * 24 * time.Hour, + notBefore: time.Date(2015, time.June, 04, 11, 04, 38, 0, time.UTC), + notAfter: time.Date(2015, time.June, 11, 11, 04, 37, 0, time.UTC), + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + p := Profile{maxBackdate: tc.backdate, maxValidity: tc.validity} + notBefore, notAfter := p.GenerateValidity(fc.Now()) + test.AssertEquals(t, notBefore, tc.notBefore) + test.AssertEquals(t, notAfter, tc.notAfter) + }) + } +} + +func TestCRLURL(t *testing.T) { + issuer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, clock.NewFake()) + if err != nil { + t.Fatalf("newIssuer: %s", err) + } + url := issuer.crlURL(4928) + want := "http://crl-url.example.org/4928.crl" + if url != want { + t.Errorf("crlURL(4928)=%s, want %s", url, want) + } +} + +func TestRequestValid(t *testing.T) { + fc := clock.NewFake() + fc.Add(time.Hour * 24) + + tests := []struct { + name string + issuer *Issuer + profile *Profile + request *IssuanceRequest + expectedError string + }{ + { + name: "unsupported key type", + issuer: &Issuer{}, + profile: &Profile{}, + request: &IssuanceRequest{PublicKey: MarshalablePublicKey{&dsa.PublicKey{}}}, + expectedError: "unsupported public key type", + }, + { + name: "inactive (rsa)", + issuer: &Issuer{}, + profile: &Profile{}, + request: &IssuanceRequest{PublicKey: MarshalablePublicKey{&rsa.PublicKey{}}}, + expectedError: "inactive issuer cannot issue precert", + }, + { + name: "inactive (ecdsa)", + issuer: &Issuer{}, + profile: &Profile{}, + request: &IssuanceRequest{PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}}, + expectedError: "inactive issuer cannot issue precert", + }, + { + name: "skid too short", + issuer: &Issuer{ + profiles: []string{"modern"}, + }, + profile: &Profile{}, + request: &IssuanceRequest{ + PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}, + SubjectKeyId: []byte{0, 1, 2, 3, 4}, + }, + expectedError: "unexpected subject key ID length", + }, + { + name: "both sct list and ct poison provided", + issuer: &Issuer{ + profiles: []string{"modern"}, + }, + profile: &Profile{}, + request: &IssuanceRequest{ + PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}, + SubjectKeyId: goodSKID, + IncludeCTPoison: true, + sctList: []ct.SignedCertificateTimestamp{}, + }, + expectedError: "cannot include both ct poison and sct list extensions", + }, + { + name: "negative validity", + issuer: &Issuer{ + profiles: []string{"modern"}, + }, + profile: &Profile{}, + request: &IssuanceRequest{ + PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}, + SubjectKeyId: goodSKID, + NotBefore: fc.Now().Add(time.Hour), + NotAfter: fc.Now(), + }, + expectedError: "NotAfter must be after NotBefore", + }, + { + name: "validity larger than max", + issuer: &Issuer{ + profiles: []string{"modern"}, + }, + profile: &Profile{ + maxValidity: time.Minute, + }, + request: &IssuanceRequest{ + PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}, + SubjectKeyId: goodSKID, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + }, + expectedError: "validity period is more than the maximum allowed period (1h0m0s>1m0s)", + }, + { + name: "validity larger than max due to inclusivity", + issuer: &Issuer{ + profiles: []string{"modern"}, + }, + profile: &Profile{ + maxValidity: time.Hour, + }, + request: &IssuanceRequest{ + PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}, + SubjectKeyId: goodSKID, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour), + }, + expectedError: "validity period is more than the maximum allowed period (1h0m1s>1h0m0s)", + }, + { + name: "validity backdated more than max", + issuer: &Issuer{ + profiles: []string{"modern"}, + }, + profile: &Profile{ + maxValidity: time.Hour * 2, + maxBackdate: time.Hour, + }, + request: &IssuanceRequest{ + PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}, + SubjectKeyId: goodSKID, + NotBefore: fc.Now().Add(-time.Hour * 2), + NotAfter: fc.Now().Add(-time.Hour), + }, + expectedError: "NotBefore is backdated more than the maximum allowed period (2h0m0s>1h0m0s)", + }, + { + name: "validity is forward dated", + issuer: &Issuer{ + profiles: []string{"modern"}, + }, + profile: &Profile{ + maxValidity: time.Hour * 2, + maxBackdate: time.Hour, + }, + request: &IssuanceRequest{ + PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}, + SubjectKeyId: goodSKID, + NotBefore: fc.Now().Add(time.Hour), + NotAfter: fc.Now().Add(time.Hour * 2), + }, + expectedError: "NotBefore is in the future", + }, + { + name: "serial too short", + issuer: &Issuer{ + profiles: []string{"modern"}, + }, + profile: &Profile{ + maxValidity: time.Hour * 2, + }, + request: &IssuanceRequest{ + PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}, + SubjectKeyId: goodSKID, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour), + Serial: []byte{0, 1, 2, 3, 4, 5, 6, 7}, + }, + expectedError: "serial must be between 9 and 19 bytes", + }, + { + name: "serial too long", + issuer: &Issuer{ + profiles: []string{"modern"}, + }, + profile: &Profile{ + maxValidity: time.Hour * 2, + }, + request: &IssuanceRequest{ + PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}, + SubjectKeyId: goodSKID, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour), + Serial: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + }, + expectedError: "serial must be between 9 and 19 bytes", + }, + { + name: "good with poison", + issuer: &Issuer{ + profiles: []string{"modern"}, + }, + profile: &Profile{ + maxValidity: time.Hour * 2, + }, + request: &IssuanceRequest{ + PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}, + SubjectKeyId: goodSKID, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour), + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + IncludeCTPoison: true, + }, + }, + { + name: "good with scts", + issuer: &Issuer{ + profiles: []string{"modern"}, + }, + profile: &Profile{ + maxValidity: time.Hour * 2, + }, + request: &IssuanceRequest{ + PublicKey: MarshalablePublicKey{&ecdsa.PublicKey{}}, + SubjectKeyId: goodSKID, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour), + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + sctList: []ct.SignedCertificateTimestamp{}, + }, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + err := tc.issuer.requestValid(fc, tc.profile, tc.request) + if err != nil { + if tc.expectedError == "" { + t.Errorf("failed with unexpected error: %s", err) + } else if tc.expectedError != err.Error() { + t.Errorf("failed with unexpected error, wanted: %q, got: %q", tc.expectedError, err.Error()) + } + return + } else if tc.expectedError != "" { + t.Errorf("didn't fail, expected %q", tc.expectedError) + } + }) + } +} + +func TestGenerateTemplate(t *testing.T) { + issuer := &Issuer{ + issuerURL: "http://issuer", + crlURLBase: "http://crl/", + sigAlg: x509.SHA256WithRSA, + } + + actual := issuer.generateTemplate() + + expected := &x509.Certificate{ + BasicConstraintsValid: true, + SignatureAlgorithm: x509.SHA256WithRSA, + IssuingCertificateURL: []string{"http://issuer"}, + Policies: []x509.OID{domainValidatedOID}, + // This field is computed based on the serial, so is not included in the template. + CRLDistributionPoints: nil, + } + + test.AssertDeepEquals(t, actual, expected) +} + +func TestIssue(t *testing.T) { + for _, tc := range []struct { + name string + generateFunc func() (crypto.Signer, error) + ku x509.KeyUsage + }{ + { + name: "RSA", + generateFunc: func() (crypto.Signer, error) { + return rsa.GenerateKey(rand.Reader, 2048) + }, + ku: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + }, + { + name: "ECDSA", + generateFunc: func() (crypto.Signer, error) { + return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + }, + ku: x509.KeyUsageDigitalSignature, + }, + } { + t.Run(tc.name, func(t *testing.T) { + fc := clock.NewFake() + fc.Set(time.Now()) + signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + test.AssertNotError(t, err, "NewIssuer failed") + pk, err := tc.generateFunc() + test.AssertNotError(t, err, "failed to generate test key") + lintCertBytes, issuanceToken, err := signer.Prepare(defaultProfile(), &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DNSNames: []string{"example.com"}, + IPAddresses: []net.IP{net.ParseIP("128.101.101.101"), net.ParseIP("3fff:aaa:a:c0ff:ee:a:bad:deed")}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + IncludeCTPoison: true, + }) + test.AssertNotError(t, err, "Prepare failed") + _, err = x509.ParseCertificate(lintCertBytes) + test.AssertNotError(t, err, "failed to parse certificate") + certBytes, err := signer.Issue(issuanceToken) + test.AssertNotError(t, err, "Issue failed") + cert, err := x509.ParseCertificate(certBytes) + test.AssertNotError(t, err, "failed to parse certificate") + err = cert.CheckSignatureFrom(issuerCert.Certificate) + test.AssertNotError(t, err, "signature validation failed") + test.AssertDeepEquals(t, cert.DNSNames, []string{"example.com"}) + // net.ParseIP always returns a 16-byte address; IPv4 addresses are + // returned in IPv4-mapped IPv6 form. But RFC 5280, Sec. 4.2.1.6 + // requires that IPv4 addresses be encoded as 4 bytes. + // + // The issuance pipeline calls x509.marshalSANs, which reduces IPv4 + // addresses back to 4 bytes. Adding .To4() both allows this test to + // succeed, and covers this requirement. + test.AssertDeepEquals(t, cert.IPAddresses, []net.IP{net.ParseIP("128.101.101.101").To4(), net.ParseIP("3fff:aaa:a:c0ff:ee:a:bad:deed")}) + test.AssertByteEquals(t, cert.SerialNumber.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}) + test.AssertDeepEquals(t, cert.PublicKey, pk.Public()) + test.AssertEquals(t, len(cert.Extensions), 10) // Constraints, KU, EKU, SKID, AKID, AIA, CRLDP, SAN, Policies, Poison + test.AssertEquals(t, cert.KeyUsage, tc.ku) + if len(cert.CRLDistributionPoints) != 1 || !strings.HasPrefix(cert.CRLDistributionPoints[0], "http://crl-url.example.org/") { + t.Errorf("want CRLDistributionPoints=[http://crl-url.example.org/x.crl], got %v", cert.CRLDistributionPoints) + } + }) + } +} + +func TestIssueDNSNamesOnly(t *testing.T) { + fc := clock.NewFake() + signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + if err != nil { + t.Fatalf("newIssuer: %s", err) + } + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("ecdsa.GenerateKey: %s", err) + } + _, issuanceToken, err := signer.Prepare(defaultProfile(), &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DNSNames: []string{"example.com"}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + IncludeCTPoison: true, + }) + if err != nil { + t.Fatalf("signer.Prepare: %s", err) + } + certBytes, err := signer.Issue(issuanceToken) + if err != nil { + t.Fatalf("signer.Issue: %s", err) + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + t.Fatalf("x509.ParseCertificate: %s", err) + } + if !reflect.DeepEqual(cert.DNSNames, []string{"example.com"}) { + t.Errorf("got DNSNames %s, wanted example.com", cert.DNSNames) + } + // BRs 7.1.2.7.12 requires iPAddress, if present, to contain an entry. + if cert.IPAddresses != nil { + t.Errorf("got IPAddresses %s, wanted nil", cert.IPAddresses) + } +} + +func TestIssueIPAddressesOnly(t *testing.T) { + fc := clock.NewFake() + signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + if err != nil { + t.Fatalf("newIssuer: %s", err) + } + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("ecdsa.GenerateKey: %s", err) + } + _, issuanceToken, err := signer.Prepare(defaultProfile(), &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + IPAddresses: []net.IP{net.ParseIP("128.101.101.101"), net.ParseIP("3fff:aaa:a:c0ff:ee:a:bad:deed")}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + IncludeCTPoison: true, + }) + if err != nil { + t.Fatalf("signer.Prepare: %s", err) + } + certBytes, err := signer.Issue(issuanceToken) + if err != nil { + t.Fatalf("signer.Issue: %s", err) + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + t.Fatalf("x509.ParseCertificate: %s", err) + } + // BRs 7.1.2.7.12 requires dNSName, if present, to contain an entry. + if cert.DNSNames != nil { + t.Errorf("got DNSNames %s, wanted nil", cert.DNSNames) + } + if !reflect.DeepEqual(cert.IPAddresses, []net.IP{net.ParseIP("128.101.101.101").To4(), net.ParseIP("3fff:aaa:a:c0ff:ee:a:bad:deed")}) { + t.Errorf("got IPAddresses %s, wanted 128.101.101.101 (4-byte) & 3fff:aaa:a:c0ff:ee:a:bad:deed (16-byte)", cert.IPAddresses) + } +} + +func TestIssueWithCRLDP(t *testing.T) { + fc := clock.NewFake() + issuerConfig := defaultIssuerConfig() + issuerConfig.CRLURLBase = "http://crls.example.net/" + issuerConfig.CRLShards = 999 + signer, err := newIssuer(issuerConfig, issuerCert, issuerSigner, fc) + if err != nil { + t.Fatalf("newIssuer: %s", err) + } + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("ecdsa.GenerateKey: %s", err) + } + profile := defaultProfile() + _, issuanceToken, err := signer.Prepare(profile, &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DNSNames: []string{"example.com"}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + IncludeCTPoison: true, + }) + if err != nil { + t.Fatalf("signer.Prepare: %s", err) + } + certBytes, err := signer.Issue(issuanceToken) + if err != nil { + t.Fatalf("signer.Issue: %s", err) + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + t.Fatalf("x509.ParseCertificate: %s", err) + } + // Because CRL shard is calculated deterministically from serial, we know which shard will be chosen. + expectedCRLDP := []string{"http://crls.example.net/919.crl"} + if !reflect.DeepEqual(cert.CRLDistributionPoints, expectedCRLDP) { + t.Errorf("CRLDP=%+v, want %+v", cert.CRLDistributionPoints, expectedCRLDP) + } +} + +func TestIssueCommonName(t *testing.T) { + fc := clock.NewFake() + fc.Set(time.Now()) + + prof := defaultProfileConfig() + prof.IgnoredLints = append(prof.IgnoredLints, "w_subject_common_name_included") + cnProfile, err := NewProfile(prof) + test.AssertNotError(t, err, "NewProfile failed") + signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + test.AssertNotError(t, err, "NewIssuer failed") + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "failed to generate test key") + ir := &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DNSNames: []string{"example.com", "www.example.com"}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + IncludeCTPoison: true, + } + + // In the default profile, the common name is allowed if requested. + ir.CommonName = "example.com" + _, issuanceToken, err := signer.Prepare(cnProfile, ir) + test.AssertNotError(t, err, "Prepare failed") + certBytes, err := signer.Issue(issuanceToken) + test.AssertNotError(t, err, "Issue failed") + cert, err := x509.ParseCertificate(certBytes) + test.AssertNotError(t, err, "failed to parse certificate") + test.AssertEquals(t, cert.Subject.CommonName, "example.com") + + // But not including the common name should be acceptable as well. + ir.CommonName = "" + _, issuanceToken, err = signer.Prepare(cnProfile, ir) + test.AssertNotError(t, err, "Prepare failed") + certBytes, err = signer.Issue(issuanceToken) + test.AssertNotError(t, err, "Issue failed") + cert, err = x509.ParseCertificate(certBytes) + test.AssertNotError(t, err, "failed to parse certificate") + test.AssertEquals(t, cert.Subject.CommonName, "") + + // And the common name should be omitted if the profile is so configured. + ir.CommonName = "example.com" + cnProfile.omitCommonName = true + _, issuanceToken, err = signer.Prepare(cnProfile, ir) + test.AssertNotError(t, err, "Prepare failed") + certBytes, err = signer.Issue(issuanceToken) + test.AssertNotError(t, err, "Issue failed") + cert, err = x509.ParseCertificate(certBytes) + test.AssertNotError(t, err, "failed to parse certificate") + test.AssertEquals(t, cert.Subject.CommonName, "") +} + +func TestIssueOmissions(t *testing.T) { + fc := clock.NewFake() + fc.Set(time.Now()) + + pc := defaultProfileConfig() + pc.OmitCommonName = true + pc.OmitKeyEncipherment = true + pc.OmitClientAuth = true + pc.OmitSKID = true + pc.IgnoredLints = []string{ + // Reduce the lint ignores to just the minimal (SCT-related) set. + "w_ct_sct_policy_count_unsatisfied", + "e_scts_from_same_operator", + // Ignore the warning about *not* including the SubjectKeyIdentifier extension: + // zlint has both lints (one enforcing RFC5280, the other the BRs). + "w_ext_subject_key_identifier_missing_sub_cert", + } + prof, err := NewProfile(pc) + test.AssertNotError(t, err, "building test profile") + + signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + test.AssertNotError(t, err, "NewIssuer failed") + + pk, err := rsa.GenerateKey(rand.Reader, 2048) + test.AssertNotError(t, err, "failed to generate test key") + _, issuanceToken, err := signer.Prepare(prof, &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DNSNames: []string{"example.com"}, + CommonName: "example.com", + IncludeCTPoison: true, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + }) + test.AssertNotError(t, err, "Prepare failed") + certBytes, err := signer.Issue(issuanceToken) + test.AssertNotError(t, err, "Issue failed") + cert, err := x509.ParseCertificate(certBytes) + test.AssertNotError(t, err, "failed to parse certificate") + + test.AssertEquals(t, cert.Subject.CommonName, "") + test.AssertEquals(t, cert.KeyUsage, x509.KeyUsageDigitalSignature) + test.AssertDeepEquals(t, cert.ExtKeyUsage, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}) + test.AssertEquals(t, len(cert.SubjectKeyId), 0) +} + +func TestIssueCTPoison(t *testing.T) { + fc := clock.NewFake() + fc.Set(time.Now()) + signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + test.AssertNotError(t, err, "NewIssuer failed") + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "failed to generate test key") + _, issuanceToken, err := signer.Prepare(defaultProfile(), &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DNSNames: []string{"example.com"}, + IncludeCTPoison: true, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + }) + test.AssertNotError(t, err, "Prepare failed") + certBytes, err := signer.Issue(issuanceToken) + test.AssertNotError(t, err, "Issue failed") + cert, err := x509.ParseCertificate(certBytes) + test.AssertNotError(t, err, "failed to parse certificate") + err = cert.CheckSignatureFrom(issuerCert.Certificate) + test.AssertNotError(t, err, "signature validation failed") + test.AssertByteEquals(t, cert.SerialNumber.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}) + test.AssertDeepEquals(t, cert.PublicKey, pk.Public()) + test.AssertEquals(t, len(cert.Extensions), 10) // Constraints, KU, EKU, SKID, AKID, AIA, CRLDP, SAN, Policies, Poison + test.AssertDeepEquals(t, cert.Extensions[9], ctPoisonExt) +} + +func mustDecodeB64(b string) []byte { + out, err := base64.StdEncoding.DecodeString(b) + if err != nil { + panic(err) + } + return out +} + +func TestIssueSCTList(t *testing.T) { + fc := clock.NewFake() + fc.Set(time.Now()) + + err := loglist.InitLintList("../test/ct-test-srv/log_list.json", false) + test.AssertNotError(t, err, "failed to load log list") + + pc := defaultProfileConfig() + pc.IgnoredLints = []string{ + // Only ignore the SKID lint, i.e., don't ignore the "missing SCT" lints. + "w_ext_subject_key_identifier_not_recommended_subscriber", + } + enforceSCTsProfile, err := NewProfile(pc) + test.AssertNotError(t, err, "NewProfile failed") + signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + test.AssertNotError(t, err, "NewIssuer failed") + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "failed to generate test key") + _, issuanceToken, err := signer.Prepare(enforceSCTsProfile, &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DNSNames: []string{"example.com"}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + IncludeCTPoison: true, + }) + test.AssertNotError(t, err, "Prepare failed") + precertBytes, err := signer.Issue(issuanceToken) + test.AssertNotError(t, err, "Issue failed") + precert, err := x509.ParseCertificate(precertBytes) + test.AssertNotError(t, err, "failed to parse certificate") + + sctList := []ct.SignedCertificateTimestamp{ + { + SCTVersion: ct.V1, + LogID: ct.LogID{KeyID: *(*[32]byte)(mustDecodeB64("OJiMlNA1mMOTLd/pI7q68npCDrlsQeFaqAwasPwEvQM="))}, + }, + { + SCTVersion: ct.V1, + LogID: ct.LogID{KeyID: *(*[32]byte)(mustDecodeB64("UtToynGEyMkkXDMQei8Ll54oMwWHI0IieDEKs12/Td4="))}, + }, + } + + request2, err := RequestFromPrecert(precert, sctList) + test.AssertNotError(t, err, "generating request from precert") + + _, issuanceToken2, err := signer.Prepare(enforceSCTsProfile, request2) + test.AssertNotError(t, err, "preparing final cert issuance") + + finalCertBytes, err := signer.Issue(issuanceToken2) + test.AssertNotError(t, err, "Issue failed") + + finalCert, err := x509.ParseCertificate(finalCertBytes) + test.AssertNotError(t, err, "failed to parse certificate") + + err = finalCert.CheckSignatureFrom(issuerCert.Certificate) + test.AssertNotError(t, err, "signature validation failed") + test.AssertByteEquals(t, finalCert.SerialNumber.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}) + test.AssertDeepEquals(t, finalCert.PublicKey, pk.Public()) + test.AssertEquals(t, len(finalCert.Extensions), 10) // Constraints, KU, EKU, SKID, AKID, AIA, CRLDP, SAN, Policies, Poison + test.AssertDeepEquals(t, finalCert.Extensions[9], pkix.Extension{ + Id: sctListOID, + Value: []byte{ + 4, 100, 0, 98, 0, 47, 0, 56, 152, 140, 148, 208, 53, 152, 195, 147, 45, + 223, 233, 35, 186, 186, 242, 122, 66, 14, 185, 108, 65, 225, 90, 168, 12, + 26, 176, 252, 4, 189, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 47, + 0, 82, 212, 232, 202, 113, 132, 200, 201, 36, 92, 51, 16, 122, 47, 11, + 151, 158, 40, 51, 5, 135, 35, 66, 34, 120, 49, 10, 179, 93, 191, 77, 222, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + }, + }) +} + +func TestIssueBadLint(t *testing.T) { + fc := clock.NewFake() + fc.Set(time.Now()) + + pc := defaultProfileConfig() + pc.IgnoredLints = []string{} + noSkipLintsProfile, err := NewProfile(pc) + test.AssertNotError(t, err, "NewProfile failed") + signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + test.AssertNotError(t, err, "NewIssuer failed") + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "failed to generate test key") + _, _, err = signer.Prepare(noSkipLintsProfile, &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DNSNames: []string{"example-com"}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + IncludeCTPoison: true, + }) + test.AssertError(t, err, "Prepare didn't fail") + test.AssertErrorIs(t, err, linter.ErrLinting) + test.AssertContains(t, err.Error(), "tbsCertificate linting failed: failed lint(s)") +} + +func TestIssuanceToken(t *testing.T) { + fc := clock.NewFake() + fc.Set(time.Now()) + + signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + test.AssertNotError(t, err, "NewIssuer failed") + + _, err = signer.Issue(&issuanceToken{}) + test.AssertError(t, err, "expected issuance with a zero token to fail") + + _, err = signer.Issue(nil) + test.AssertError(t, err, "expected issuance with a nil token to fail") + + pk, err := rsa.GenerateKey(rand.Reader, 2048) + test.AssertNotError(t, err, "failed to generate test key") + _, issuanceToken, err := signer.Prepare(defaultProfile(), &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DNSNames: []string{"example.com"}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + IncludeCTPoison: true, + }) + test.AssertNotError(t, err, "expected Prepare to succeed") + _, err = signer.Issue(issuanceToken) + test.AssertNotError(t, err, "expected first issuance to succeed") + + _, err = signer.Issue(issuanceToken) + test.AssertError(t, err, "expected second issuance with the same issuance token to fail") + test.AssertContains(t, err.Error(), "issuance token already redeemed") + + _, issuanceToken, err = signer.Prepare(defaultProfile(), &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DNSNames: []string{"example.com"}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + IncludeCTPoison: true, + }) + test.AssertNotError(t, err, "expected Prepare to succeed") + + signer2, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + test.AssertNotError(t, err, "NewIssuer failed") + + _, err = signer2.Issue(issuanceToken) + test.AssertError(t, err, "expected redeeming an issuance token with the wrong issuer to fail") + test.AssertContains(t, err.Error(), "wrong issuer") +} + +func TestInvalidProfile(t *testing.T) { + fc := clock.NewFake() + fc.Set(time.Now()) + + err := loglist.InitLintList("../test/ct-test-srv/log_list.json", false) + test.AssertNotError(t, err, "failed to load log list") + + signer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + test.AssertNotError(t, err, "NewIssuer failed") + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "failed to generate test key") + _, _, err = signer.Prepare(defaultProfile(), &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DNSNames: []string{"example.com"}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + IncludeCTPoison: true, + precertDER: []byte{6, 6, 6}, + }) + test.AssertError(t, err, "Invalid IssuanceRequest") + + _, _, err = signer.Prepare(defaultProfile(), &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + DNSNames: []string{"example.com"}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + sctList: []ct.SignedCertificateTimestamp{ + { + SCTVersion: ct.V1, + LogID: ct.LogID{KeyID: *(*[32]byte)(mustDecodeB64("OJiMlNA1mMOTLd/pI7q68npCDrlsQeFaqAwasPwEvQM="))}, + }, + }, + precertDER: []byte{}, + }) + test.AssertError(t, err, "Invalid IssuanceRequest") +} + +// Generate a precert from one profile and a final cert from another, and verify +// that the final cert errors out when linted because the lint cert doesn't +// corresponding with the precert. +func TestMismatchedProfiles(t *testing.T) { + fc := clock.NewFake() + fc.Set(time.Now()) + err := loglist.InitLintList("../test/ct-test-srv/log_list.json", false) + test.AssertNotError(t, err, "failed to load log list") + + issuer1, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + test.AssertNotError(t, err, "NewIssuer failed") + + pc := defaultProfileConfig() + pc.IgnoredLints = append(pc.IgnoredLints, "w_subject_common_name_included") + cnProfile, err := NewProfile(pc) + test.AssertNotError(t, err, "NewProfile failed") + + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "failed to generate test key") + _, issuanceToken, err := issuer1.Prepare(cnProfile, &IssuanceRequest{ + PublicKey: MarshalablePublicKey{pk.Public()}, + SubjectKeyId: goodSKID, + Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, + CommonName: "example.com", + DNSNames: []string{"example.com"}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(time.Hour - time.Second), + IncludeCTPoison: true, + }) + test.AssertNotError(t, err, "making IssuanceRequest") + + precertDER, err := issuer1.Issue(issuanceToken) + test.AssertNotError(t, err, "signing precert") + + // Create a new profile that differs slightly (no common name) + pc = defaultProfileConfig() + pc.OmitCommonName = false + test.AssertNotError(t, err, "building test lint registry") + noCNProfile, err := NewProfile(pc) + test.AssertNotError(t, err, "NewProfile failed") + + issuer2, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, fc) + test.AssertNotError(t, err, "NewIssuer failed") + + sctList := []ct.SignedCertificateTimestamp{ + { + SCTVersion: ct.V1, + LogID: ct.LogID{KeyID: *(*[32]byte)(mustDecodeB64("OJiMlNA1mMOTLd/pI7q68npCDrlsQeFaqAwasPwEvQM="))}, + }, + { + SCTVersion: ct.V1, + LogID: ct.LogID{KeyID: *(*[32]byte)(mustDecodeB64("UtToynGEyMkkXDMQei8Ll54oMwWHI0IieDEKs12/Td4="))}, + }, + } + + precert, err := x509.ParseCertificate(precertDER) + test.AssertNotError(t, err, "parsing precert") + + request2, err := RequestFromPrecert(precert, sctList) + test.AssertNotError(t, err, "RequestFromPrecert") + request2.CommonName = "" + + _, _, err = issuer2.Prepare(noCNProfile, request2) + test.AssertError(t, err, "preparing final cert issuance") + test.AssertContains(t, err.Error(), "precert does not correspond to linted final cert") +} + +func TestNewProfile(t *testing.T) { + for _, tc := range []struct { + name string + config ProfileConfig + wantErr string + }{ + { + name: "happy path", + config: ProfileConfig{ + MaxValidityBackdate: config.Duration{Duration: 1 * time.Hour}, + MaxValidityPeriod: config.Duration{Duration: 90 * 24 * time.Hour}, + }, + }, + { + name: "large backdate", + config: ProfileConfig{ + MaxValidityBackdate: config.Duration{Duration: 24 * time.Hour}, + MaxValidityPeriod: config.Duration{Duration: 90 * 24 * time.Hour}, + }, + wantErr: "backdate \"24h0m0s\" is too large", + }, + { + name: "large validity", + config: ProfileConfig{ + MaxValidityBackdate: config.Duration{Duration: 1 * time.Hour}, + MaxValidityPeriod: config.Duration{Duration: 397 * 24 * time.Hour}, + }, + wantErr: "validity period \"9528h0m0s\" is too large", + }, + } { + t.Run(tc.name, func(t *testing.T) { + gotProfile, gotErr := NewProfile(tc.config) + if tc.wantErr != "" { + if gotErr == nil { + t.Errorf("NewProfile(%#v) = %#v, but want err %q", tc.config, gotProfile, tc.wantErr) + } + if !strings.Contains(gotErr.Error(), tc.wantErr) { + t.Errorf("NewProfile(%#v) = %q, but want %q", tc.config, gotErr, tc.wantErr) + } + } else { + if gotErr != nil { + t.Errorf("NewProfile(%#v) = %q, but want no error", tc.config, gotErr) + } + } + }) + } +} diff --git a/issuance/crl.go b/issuance/crl.go new file mode 100644 index 00000000000..f33af188393 --- /dev/null +++ b/issuance/crl.go @@ -0,0 +1,127 @@ +package issuance + +import ( + "crypto/rand" + "crypto/x509" + "fmt" + "math/big" + "time" + + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/crl/idp" + "github.com/letsencrypt/boulder/linter" +) + +type CRLProfileConfig struct { + ValidityInterval config.Duration + MaxBackdate config.Duration + + // LintConfig is a path to a zlint config file, which can be used to control + // the behavior of zlint's "customizable lints". + LintConfig string + // IgnoredLints is a list of lint names that we know will fail for this + // profile, and which we know it is safe to ignore. + IgnoredLints []string +} + +type CRLProfile struct { + validityInterval time.Duration + maxBackdate time.Duration + + lints lint.Registry +} + +func NewCRLProfile(config CRLProfileConfig) (*CRLProfile, error) { + lifetime := config.ValidityInterval.Duration + if lifetime >= 10*24*time.Hour { + return nil, fmt.Errorf("crl lifetime cannot be more than 10 days, got %q", lifetime) + } else if lifetime <= 0*time.Hour { + return nil, fmt.Errorf("crl lifetime must be positive, got %q", lifetime) + } + + if config.MaxBackdate.Duration < 0 { + return nil, fmt.Errorf("crl max backdate must be non-negative, got %q", config.MaxBackdate) + } + + reg, err := linter.NewRegistry(config.IgnoredLints) + if err != nil { + return nil, fmt.Errorf("creating lint registry: %w", err) + } + if config.LintConfig != "" { + lintconfig, err := lint.NewConfigFromFile(config.LintConfig) + if err != nil { + return nil, fmt.Errorf("loading zlint config file: %w", err) + } + reg.SetConfiguration(lintconfig) + } + + return &CRLProfile{ + validityInterval: config.ValidityInterval.Duration, + maxBackdate: config.MaxBackdate.Duration, + lints: reg, + }, nil +} + +type CRLRequest struct { + Number *big.Int + Shard int64 + + ThisUpdate time.Time + + Entries []x509.RevocationListEntry +} + +// crlURL combines the CRL URL base with a shard, and adds a suffix. +func (i *Issuer) crlURL(shard int) string { + return fmt.Sprintf("%s%d.crl", i.crlURLBase, shard) +} + +func (i *Issuer) IssueCRL(prof *CRLProfile, req *CRLRequest) ([]byte, error) { + backdatedBy := i.clk.Now().Sub(req.ThisUpdate) + if backdatedBy > prof.maxBackdate { + return nil, fmt.Errorf("ThisUpdate is too far in the past (%s>%s)", backdatedBy, prof.maxBackdate) + } + if backdatedBy < 0 { + return nil, fmt.Errorf("ThisUpdate is in the future (%s>%s)", req.ThisUpdate, i.clk.Now()) + } + + template := &x509.RevocationList{ + RevokedCertificateEntries: req.Entries, + Number: req.Number, + ThisUpdate: req.ThisUpdate, + NextUpdate: req.ThisUpdate.Add(-time.Second).Add(prof.validityInterval), + } + + if i.crlURLBase == "" { + return nil, fmt.Errorf("CRL must contain an issuingDistributionPoint") + } + + // Concat the base with the shard directly, since we require that the base + // end with a single trailing slash. + idp, err := idp.MakeUserCertsExt([]string{ + i.crlURL(int(req.Shard)), + }) + if err != nil { + return nil, fmt.Errorf("creating IDP extension: %w", err) + } + template.ExtraExtensions = append(template.ExtraExtensions, idp) + + err = i.Linter.CheckCRL(template, prof.lints) + if err != nil { + return nil, err + } + + crlBytes, err := x509.CreateRevocationList( + rand.Reader, + template, + i.Cert.Certificate, + i.Signer, + ) + if err != nil { + return nil, err + } + + return crlBytes, nil +} diff --git a/issuance/crl_test.go b/issuance/crl_test.go new file mode 100644 index 00000000000..df30bd1af7c --- /dev/null +++ b/issuance/crl_test.go @@ -0,0 +1,249 @@ +package issuance + +import ( + "crypto/x509" + "errors" + "math/big" + "testing" + "time" + + "github.com/jmhodges/clock" + "github.com/zmap/zlint/v3/lint" + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" + + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/crl/idp" + "github.com/letsencrypt/boulder/test" +) + +func TestNewCRLProfile(t *testing.T) { + t.Parallel() + tests := []struct { + name string + config CRLProfileConfig + expected *CRLProfile + expectedErr string + }{ + { + name: "validity too long", + config: CRLProfileConfig{ValidityInterval: config.Duration{Duration: 30 * 24 * time.Hour}}, + expected: nil, + expectedErr: "lifetime cannot be more than 10 days", + }, + { + name: "validity too short", + config: CRLProfileConfig{ValidityInterval: config.Duration{Duration: 0}}, + expected: nil, + expectedErr: "lifetime must be positive", + }, + { + name: "negative backdate", + config: CRLProfileConfig{ + ValidityInterval: config.Duration{Duration: 7 * 24 * time.Hour}, + MaxBackdate: config.Duration{Duration: -time.Hour}, + }, + expected: nil, + expectedErr: "backdate must be non-negative", + }, + { + name: "happy path", + config: CRLProfileConfig{ + ValidityInterval: config.Duration{Duration: 7 * 24 * time.Hour}, + MaxBackdate: config.Duration{Duration: time.Hour}, + }, + expected: &CRLProfile{ + validityInterval: 7 * 24 * time.Hour, + maxBackdate: time.Hour, + }, + expectedErr: "", + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + actual, err := NewCRLProfile(tc.config) + if err != nil { + if tc.expectedErr == "" { + t.Errorf("NewCRLProfile expected success but got %q", err) + return + } + test.AssertContains(t, err.Error(), tc.expectedErr) + } else { + if tc.expectedErr != "" { + t.Errorf("NewCRLProfile succeeded but expected error %q", tc.expectedErr) + return + } + test.AssertEquals(t, actual.validityInterval, tc.expected.validityInterval) + test.AssertEquals(t, actual.maxBackdate, tc.expected.maxBackdate) + test.AssertNotNil(t, actual.lints, "lint registry should be populated") + } + }) + } +} + +func TestIssueCRL(t *testing.T) { + clk := clock.NewFake() + clk.Set(time.Now()) + + issuer, err := newIssuer(defaultIssuerConfig(), issuerCert, issuerSigner, clk) + test.AssertNotError(t, err, "creating test issuer") + + defaultProfile := CRLProfile{ + validityInterval: 7 * 24 * time.Hour, + maxBackdate: 1 * time.Hour, + lints: lint.GlobalRegistry(), + } + + defaultRequest := CRLRequest{ + Number: big.NewInt(123), + Shard: 100, + ThisUpdate: clk.Now().Add(-time.Second), + Entries: []x509.RevocationListEntry{ + { + SerialNumber: big.NewInt(987), + RevocationTime: clk.Now().Add(-24 * time.Hour), + ReasonCode: 1, + }, + }, + } + + req := defaultRequest + req.ThisUpdate = clk.Now().Add(-24 * time.Hour) + _, err = issuer.IssueCRL(&defaultProfile, &req) + test.AssertError(t, err, "too old crl issuance should fail") + test.AssertContains(t, err.Error(), "ThisUpdate is too far in the past") + + req = defaultRequest + req.ThisUpdate = clk.Now().Add(time.Second) + _, err = issuer.IssueCRL(&defaultProfile, &req) + test.AssertError(t, err, "future crl issuance should fail") + test.AssertContains(t, err.Error(), "ThisUpdate is in the future") + + req = defaultRequest + req.Entries = append(req.Entries, x509.RevocationListEntry{ + SerialNumber: big.NewInt(876), + RevocationTime: clk.Now().Add(-24 * time.Hour), + ReasonCode: 6, + }) + _, err = issuer.IssueCRL(&defaultProfile, &req) + test.AssertError(t, err, "invalid reason code should result in lint failure") + test.AssertContains(t, err.Error(), "Reason code not included in BR") + + req = defaultRequest + res, err := issuer.IssueCRL(&defaultProfile, &req) + test.AssertNotError(t, err, "crl issuance should have succeeded") + parsedRes, err := x509.ParseRevocationList(res) + test.AssertNotError(t, err, "parsing test crl") + test.AssertEquals(t, parsedRes.Issuer.CommonName, issuer.Cert.Subject.CommonName) + test.AssertDeepEquals(t, parsedRes.Number, big.NewInt(123)) + expectUpdate := req.ThisUpdate.Add(-time.Second).Add(defaultProfile.validityInterval).Truncate(time.Second).UTC() + test.AssertEquals(t, parsedRes.NextUpdate, expectUpdate) + test.AssertEquals(t, len(parsedRes.Extensions), 3) + found, err := revokedCertificatesFieldExists(res) + test.AssertNotError(t, err, "Should have been able to parse CRL") + test.Assert(t, found, "Expected the revokedCertificates field to exist") + + idps, err := idp.GetIDPURIs(parsedRes.Extensions) + test.AssertNotError(t, err, "getting IDP URIs from test CRL") + test.AssertEquals(t, len(idps), 1) + test.AssertEquals(t, idps[0], "http://crl-url.example.org/100.crl") + + req = defaultRequest + crlURLBase := issuer.crlURLBase + issuer.crlURLBase = "" + _, err = issuer.IssueCRL(&defaultProfile, &req) + test.AssertError(t, err, "crl issuance with no IDP should fail") + test.AssertContains(t, err.Error(), "must contain an issuingDistributionPoint") + issuer.crlURLBase = crlURLBase + + // A CRL with no entries must not have the revokedCertificates field + req = defaultRequest + req.Entries = []x509.RevocationListEntry{} + res, err = issuer.IssueCRL(&defaultProfile, &req) + test.AssertNotError(t, err, "issuing crl with no entries") + parsedRes, err = x509.ParseRevocationList(res) + test.AssertNotError(t, err, "parsing test crl") + test.AssertEquals(t, parsedRes.Issuer.CommonName, issuer.Cert.Subject.CommonName) + test.AssertDeepEquals(t, parsedRes.Number, big.NewInt(123)) + test.AssertEquals(t, len(parsedRes.RevokedCertificateEntries), 0) + found, err = revokedCertificatesFieldExists(res) + test.AssertNotError(t, err, "Should have been able to parse CRL") + test.Assert(t, !found, "Violation of RFC 5280 Section 5.1.2.6") +} + +// revokedCertificatesFieldExists is a modified version of +// x509.ParseRevocationList that takes a given sequence of bytes representing a +// CRL and parses away layers until the optional `revokedCertificates` field of +// a TBSCertList is found. It returns a boolean indicating whether the field was +// found or an error if there was an issue processing a CRL. +// +// https://datatracker.ietf.org/doc/html/rfc5280#section-5.1.2.6 +// +// When there are no revoked certificates, the revoked certificates list +// MUST be absent. +// +// https://datatracker.ietf.org/doc/html/rfc5280#appendix-A.1 page 118 +// +// CertificateList ::= SEQUENCE { +// tbsCertList TBSCertList +// .. +// } +// +// TBSCertList ::= SEQUENCE { +// .. +// revokedCertificates SEQUENCE OF SEQUENCE { +// .. +// } OPTIONAL, +// } +func revokedCertificatesFieldExists(der []byte) (bool, error) { + input := cryptobyte.String(der) + + // Extract the CertificateList + if !input.ReadASN1(&input, cryptobyte_asn1.SEQUENCE) { + return false, errors.New("malformed crl") + } + + var tbs cryptobyte.String + // Extract the TBSCertList from the CertificateList + if !input.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) { + return false, errors.New("malformed tbs crl") + } + + // Skip optional version + tbs.SkipOptionalASN1(cryptobyte_asn1.INTEGER) + + // Skip the signature + tbs.SkipASN1(cryptobyte_asn1.SEQUENCE) + + // Skip the issuer + tbs.SkipASN1(cryptobyte_asn1.SEQUENCE) + + // SkipOptionalASN1 is identical to SkipASN1 except that it also does a + // peek. We'll handle the non-optional thisUpdate with these double peeks + // because there's no harm doing so. + skipTime := func(s *cryptobyte.String) { + switch { + case s.PeekASN1Tag(cryptobyte_asn1.UTCTime): + s.SkipOptionalASN1(cryptobyte_asn1.UTCTime) + case s.PeekASN1Tag(cryptobyte_asn1.GeneralizedTime): + s.SkipOptionalASN1(cryptobyte_asn1.GeneralizedTime) + } + } + + // Skip thisUpdate + skipTime(&tbs) + + // Skip optional nextUpdate + skipTime(&tbs) + + // Finally, the field which we care about: revokedCertificates. This will + // not trigger on the next field `crlExtensions` because that has + // context-specific tag [0] and EXPLICIT encoding, not `SEQUENCE` and is + // therefore a safe place to end this venture. + if tbs.PeekASN1Tag(cryptobyte_asn1.SEQUENCE) { + return true, nil + } + + return false, nil +} diff --git a/issuance/issuance.go b/issuance/issuance.go deleted file mode 100644 index ba7fe63c2e6..00000000000 --- a/issuance/issuance.go +++ /dev/null @@ -1,738 +0,0 @@ -package issuance - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "math/big" - "strconv" - "strings" - "time" - - ct "github.com/google/certificate-transparency-go" - cttls "github.com/google/certificate-transparency-go/tls" - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/linter" - "github.com/letsencrypt/boulder/policyasn1" - "github.com/letsencrypt/boulder/privatekey" - "github.com/letsencrypt/pkcs11key/v4" - "golang.org/x/crypto/ocsp" -) - -// ProfileConfig describes the certificate issuance constraints for all issuers. -type ProfileConfig struct { - AllowMustStaple bool - AllowCTPoison bool - AllowSCTList bool - AllowCommonName bool - - Policies []PolicyInformation - MaxValidityPeriod cmd.ConfigDuration - MaxValidityBackdate cmd.ConfigDuration -} - -// PolicyInformation describes a policy -type PolicyInformation struct { - OID string - Qualifiers []PolicyQualifier -} - -// PolicyQualifier describes a policy qualifier -type PolicyQualifier struct { - Type string - Value string -} - -// IssuerConfig describes the constraints on and URLs used by a single issuer. -type IssuerConfig struct { - UseForRSALeaves bool - UseForECDSALeaves bool - - IssuerURL string - OCSPURL string - CRLURL string - - Location IssuerLoc -} - -// IssuerLoc describes the on-disk location and parameters that an issuer -// should use to retrieve its certificate and private key. -// Only one of File, ConfigFile, or PKCS11 should be set. -type IssuerLoc struct { - // A file from which a private key will be read and parsed. - File string - // A file from which a pkcs11key.Config will be read and parsed, if File is not set. - ConfigFile string - // An in-memory pkcs11key.Config, which will be used if ConfigFile is not set. - PKCS11 *pkcs11key.Config - // A file from which a certificate will be read and parsed. - CertFile string - // Number of sessions to open with the HSM. For maximum performance, - // this should be equal to the number of cores in the HSM. Defaults to 1. - NumSessions int -} - -// LoadIssuer loads a signer (private key) and certificate from the locations specified. -func LoadIssuer(location IssuerLoc) (*Certificate, crypto.Signer, error) { - issuerCert, err := LoadCertificate(location.CertFile) - if err != nil { - return nil, nil, err - } - - signer, err := loadSigner(location, issuerCert) - if err != nil { - return nil, nil, err - } - - if !core.KeyDigestEquals(signer.Public(), issuerCert.PublicKey) { - return nil, nil, fmt.Errorf("Issuer key did not match issuer cert %s", location.CertFile) - } - return issuerCert, signer, err -} - -func LoadCertificate(path string) (*Certificate, error) { - cert, err := core.LoadCert(path) - if err != nil { - return nil, err - } - return NewCertificate(cert) -} - -func loadSigner(location IssuerLoc, cert *Certificate) (crypto.Signer, error) { - if location.File != "" { - signer, _, err := privatekey.Load(location.File) - if err != nil { - return nil, err - } - return signer, nil - } - - var pkcs11Config *pkcs11key.Config - if location.ConfigFile != "" { - contents, err := ioutil.ReadFile(location.ConfigFile) - if err != nil { - return nil, err - } - pkcs11Config = new(pkcs11key.Config) - err = json.Unmarshal(contents, pkcs11Config) - if err != nil { - return nil, err - } - } else { - pkcs11Config = location.PKCS11 - } - - if pkcs11Config.Module == "" || - pkcs11Config.TokenLabel == "" || - pkcs11Config.PIN == "" { - return nil, fmt.Errorf("Missing a field in pkcs11Config %#v", pkcs11Config) - } - - numSessions := location.NumSessions - if numSessions <= 0 { - numSessions = 1 - } - - return pkcs11key.NewPool(numSessions, pkcs11Config.Module, - pkcs11Config.TokenLabel, pkcs11Config.PIN, cert.PublicKey) -} - -// Profile is the validated structure created by reading in ProfileConfigs and IssuerConfigs -type Profile struct { - useForRSALeaves bool - useForECDSALeaves bool - - allowMustStaple bool - allowCTPoison bool - allowSCTList bool - allowCommonName bool - - sigAlg x509.SignatureAlgorithm - ocspURL string - crlURL string - issuerURL string - policies *pkix.Extension - - maxBackdate time.Duration - maxValidity time.Duration -} - -func parseOID(oidStr string) (asn1.ObjectIdentifier, error) { - var oid asn1.ObjectIdentifier - for _, a := range strings.Split(oidStr, ".") { - i, err := strconv.Atoi(a) - if err != nil { - return nil, err - } - if i <= 0 { - return nil, errors.New("OID components must be >= 1") - } - oid = append(oid, i) - } - return oid, nil -} - -var stringToQualifierType = map[string]asn1.ObjectIdentifier{ - "id-qt-cps": policyasn1.CPSQualifierOID, -} - -// NewProfile synthesizes the profile config and issuer config into a single -// object, and checks various aspects for correctness. -func NewProfile(profileConfig ProfileConfig, issuerConfig IssuerConfig) (*Profile, error) { - if issuerConfig.IssuerURL == "" { - return nil, errors.New("Issuer URL is required") - } - if issuerConfig.OCSPURL == "" { - return nil, errors.New("OCSP URL is required") - } - sp := &Profile{ - useForRSALeaves: issuerConfig.UseForRSALeaves, - useForECDSALeaves: issuerConfig.UseForECDSALeaves, - allowMustStaple: profileConfig.AllowMustStaple, - allowCTPoison: profileConfig.AllowCTPoison, - allowSCTList: profileConfig.AllowSCTList, - allowCommonName: profileConfig.AllowCommonName, - issuerURL: issuerConfig.IssuerURL, - crlURL: issuerConfig.CRLURL, - ocspURL: issuerConfig.OCSPURL, - maxBackdate: profileConfig.MaxValidityBackdate.Duration, - maxValidity: profileConfig.MaxValidityPeriod.Duration, - } - if len(profileConfig.Policies) > 0 { - var policies []policyasn1.PolicyInformation - for _, policyConfig := range profileConfig.Policies { - id, err := parseOID(policyConfig.OID) - if err != nil { - return nil, fmt.Errorf("failed parsing policy OID %q: %s", policyConfig.OID, err) - } - pi := policyasn1.PolicyInformation{Policy: id} - for _, qualifierConfig := range policyConfig.Qualifiers { - qt, ok := stringToQualifierType[qualifierConfig.Type] - if !ok { - return nil, fmt.Errorf("unknown qualifier type: %s", qualifierConfig.Type) - } - pq := policyasn1.PolicyQualifier{ - OID: qt, - Value: qualifierConfig.Value, - } - pi.Qualifiers = append(pi.Qualifiers, pq) - } - policies = append(policies, pi) - } - policyExtBytes, err := asn1.Marshal(policies) - if err != nil { - return nil, err - } - sp.policies = &pkix.Extension{ - Id: asn1.ObjectIdentifier{2, 5, 29, 32}, - Value: policyExtBytes, - } - } - return sp, nil -} - -// requestValid verifies the passed IssuanceRequest against the profile. If the -// request doesn't match the signing profile an error is returned. -func (p *Profile) requestValid(clk clock.Clock, req *IssuanceRequest) error { - switch req.PublicKey.(type) { - case *rsa.PublicKey: - if !p.useForRSALeaves { - return errors.New("cannot sign RSA public keys") - } - case *ecdsa.PublicKey: - if !p.useForECDSALeaves { - return errors.New("cannot sign ECDSA public keys") - } - default: - return errors.New("unsupported public key type") - } - - if !p.allowMustStaple && req.IncludeMustStaple { - return errors.New("must-staple extension cannot be included") - } - - if !p.allowCTPoison && req.IncludeCTPoison { - return errors.New("ct poison extension cannot be included") - } - - if !p.allowSCTList && req.SCTList != nil { - return errors.New("sct list extension cannot be included") - } - - if req.IncludeCTPoison && req.SCTList != nil { - return errors.New("cannot include both ct poison and sct list extensions") - } - - if !p.allowCommonName && req.CommonName != "" { - return errors.New("common name cannot be included") - } - - // The validity period is calculated inclusive of the whole second represented - // by the notAfter timestamp. - validity := req.NotAfter.Add(time.Second).Sub(req.NotBefore) - if validity <= 0 { - return errors.New("NotAfter must be after NotBefore") - } - if validity > p.maxValidity { - return fmt.Errorf("validity period is more than the maximum allowed period (%s>%s)", validity, p.maxValidity) - } - backdatedBy := clk.Now().Sub(req.NotBefore) - if backdatedBy > p.maxBackdate { - return fmt.Errorf("NotBefore is backdated more than the maximum allowed period (%s>%s)", backdatedBy, p.maxBackdate) - } - if backdatedBy < 0 { - return errors.New("NotBefore is in the future") - } - - // We use 19 here because a 20-byte serial could produce >20 octets when - // encoded in ASN.1. That happens when the first byte is >0x80. See - // https://letsencrypt.org/docs/a-warm-welcome-to-asn1-and-der/#integer-encoding - if len(req.Serial) > 19 || len(req.Serial) < 9 { - return errors.New("serial must be between 9 and 19 bytes") - } - - return nil -} - -var defaultEKU = []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, -} - -func (p *Profile) generateTemplate(clk clock.Clock) *x509.Certificate { - template := &x509.Certificate{ - SignatureAlgorithm: p.sigAlg, - ExtKeyUsage: defaultEKU, - OCSPServer: []string{p.ocspURL}, - IssuingCertificateURL: []string{p.issuerURL}, - BasicConstraintsValid: true, - } - - if p.crlURL != "" { - template.CRLDistributionPoints = []string{p.crlURL} - } - - if p.policies != nil { - template.ExtraExtensions = []pkix.Extension{*p.policies} - } - - return template -} - -// IssuerID is a statistically-unique small ID computed from a hash over the -// entirety of the issuer certificate. -// DEPRECATED: This identifier is being phased out in favor of IssuerNameID. -// It exists in the database in certificateStatus rows for certs issued prior -// to approximately November 2021, but is not being written for new rows. -type IssuerID int64 - -// IssuerNameID is a statistically-unique small ID which can be computed from -// both CA and end-entity certs to link them together into a validation chain. -// It is computed as a truncated hash over the issuer Subject Name bytes, or -// over the end-entity's Issuer Name bytes, which are required to be equal. -// TODO(#5152): Rename this "IssuerID" when we've fully deprecated the old-style -// IssuerIDs and replaced them with NameIDs. -type IssuerNameID int64 - -// Certificate embeds an *x509.Certificate and represents the added semantics -// that this certificate can be used for issuance. -type Certificate struct { - *x509.Certificate - id IssuerID - nameID IssuerNameID - nameHash [20]byte - keyHash [20]byte -} - -// NewCertificate wraps an in-memory cert in an issuance.Certificate, marking it -// as an issuer cert. It may fail if the certificate does not contain the -// attributes expected of an issuer certificate. -func NewCertificate(ic *x509.Certificate) (*Certificate, error) { - res := Certificate{Certificate: ic} - - // Compute ic.ID() - h := sha256.Sum256(ic.Raw) - res.id = IssuerID(big.NewInt(0).SetBytes(h[:4]).Int64()) - - // Compute ic.NameID() - res.nameID = truncatedHash(ic.RawSubject) - - // Compute ic.NameHash() - res.nameHash = sha1.Sum(ic.RawSubject) - - // Compute ic.KeyHash() - // The issuerKeyHash in OCSP requests is constructed over the DER encoding of - // the public key per RFC6960 (defined in RFC4055 for RSA and RFC5480 for - // ECDSA). We can't use MarshalPKIXPublicKey for this since it encodes keys - // using the SPKI structure itself, and we just want the contents of the - // subjectPublicKey for the hash, so we need to extract it ourselves. - var spki struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - _, err := asn1.Unmarshal(ic.RawSubjectPublicKeyInfo, &spki) - if err != nil { - return nil, err - } - res.keyHash = sha1.Sum(spki.PublicKey.RightAlign()) - - return &res, nil -} - -// ID returns the IssuerID (a truncated hash over the raw bytes of the whole -// cert) of this issuer certificate. -// DEPRECATED: Use .NameID() instead. -func (ic *Certificate) ID() IssuerID { - return ic.id -} - -// NameID returns the IssuerNameID (a truncated hash over the raw bytes of the -// Subject Distinguished Name) of this issuer certificate. Useful for storing as -// a lookup key in contexts that don't expect hash collisions. -func (ic *Certificate) NameID() IssuerNameID { - return ic.nameID -} - -// NameHash returns the SHA1 hash over the issuer certificate's Subject -// Distinguished Name. This is one of the values used to uniquely identify the -// issuer cert in an RFC6960 + RFC5019 OCSP request. -func (ic *Certificate) NameHash() [20]byte { - return ic.nameHash -} - -// KeyHash returns the SHA1 hash over the issuer certificate's Subject Public -// Key Info. This is one of the values used to uniquely identify the issuer cert -// in an RFC6960 + RFC5019 OCSP request. -func (ic *Certificate) KeyHash() [20]byte { - return ic.keyHash -} - -// GetIssuerNameID returns the IssuerNameID (a truncated hash over the raw bytes -// of the Issuer Distinguished Name) of the given end-entity certificate. -// Useful for performing lookups in contexts that don't expect hash collisions. -func GetIssuerNameID(ee *x509.Certificate) IssuerNameID { - return truncatedHash(ee.RawIssuer) -} - -// GetOCSPIssuerNameID returns the IssuerNameID (a truncated hash over the raw -// bytes of the Responder Distinguished Name) of the given OCSP Response. -// As per the OCSP spec, it is technically possible for this field to not be -// populated: the OCSP Response can instead contain a SHA-1 hash of the Issuer -// Public Key as the Responder ID. The Go stdlib always uses the DN, though. -func GetOCSPIssuerNameID(resp *ocsp.Response) IssuerNameID { - return truncatedHash(resp.RawResponderName) -} - -// truncatedHash computes a truncated SHA1 hash across arbitrary bytes. Uses -// SHA1 because that is the algorithm most commonly used in OCSP requests. -// PURPOSEFULLY NOT EXPORTED. Exists only to ensure that the implementations of -// Certificate.NameID() and GetIssuerNameID() never diverge. Use those instead. -func truncatedHash(name []byte) IssuerNameID { - h := crypto.SHA1.New() - h.Write(name) - s := h.Sum(nil) - return IssuerNameID(big.NewInt(0).SetBytes(s[:7]).Int64()) -} - -// Issuer is capable of issuing new certificates -// TODO(#5086): make Cert and Signer private when they're no longer needed by ca.internalIssuer -type Issuer struct { - Cert *Certificate - Signer crypto.Signer - Profile *Profile - Linter *linter.Linter - Clk clock.Clock -} - -// NewIssuer constructs an Issuer on the heap, verifying that the profile -// is well-formed. -func NewIssuer(cert *Certificate, signer crypto.Signer, profile *Profile, linter *linter.Linter, clk clock.Clock) (*Issuer, error) { - switch k := cert.PublicKey.(type) { - case *rsa.PublicKey: - profile.sigAlg = x509.SHA256WithRSA - case *ecdsa.PublicKey: - switch k.Curve { - case elliptic.P256(): - profile.sigAlg = x509.ECDSAWithSHA256 - case elliptic.P384(): - profile.sigAlg = x509.ECDSAWithSHA384 - default: - return nil, fmt.Errorf("unsupported ECDSA curve: %s", k.Curve.Params().Name) - } - default: - return nil, errors.New("unsupported issuer key type") - } - - if profile.useForRSALeaves || profile.useForECDSALeaves { - if cert.KeyUsage&x509.KeyUsageCertSign == 0 { - return nil, errors.New("end-entity signing cert does not have keyUsage certSign") - } - } - // TODO(#5086): Only do this check for ocsp-issuing issuers. - if cert.KeyUsage&x509.KeyUsageDigitalSignature == 0 { - return nil, errors.New("end-entity ocsp signing cert does not have keyUsage digitalSignature") - } - - i := &Issuer{ - Cert: cert, - Signer: signer, - Profile: profile, - Linter: linter, - Clk: clk, - } - return i, nil -} - -// Algs provides the list of leaf certificate public key algorithms for which -// this issuer is willing to issue. This is not necessarily the same as the -// public key algorithm or signature algorithm in this issuer's own cert. -func (i *Issuer) Algs() []x509.PublicKeyAlgorithm { - var algs []x509.PublicKeyAlgorithm - if i.Profile.useForRSALeaves { - algs = append(algs, x509.RSA) - } - if i.Profile.useForECDSALeaves { - algs = append(algs, x509.ECDSA) - } - return algs -} - -// Name provides the Common Name specified in the issuer's certificate. -func (i *Issuer) Name() string { - return i.Cert.Subject.CommonName -} - -// ID provides a stable ID for an issuer's certificate. This is used for -// identifying which issuer issued a certificate in the certificateStatus table. -func (i *Issuer) ID() IssuerID { - return i.Cert.ID() -} - -var ctPoisonExt = pkix.Extension{ - // OID for CT poison, RFC 6962 (was never assigned a proper id-pe- name) - Id: asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}, - Value: asn1.NullBytes, - Critical: true, -} - -// OID for SCT list, RFC 6962 (was never assigned a proper id-pe- name) -var sctListOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} - -func generateSCTListExt(scts []ct.SignedCertificateTimestamp) (pkix.Extension, error) { - list := ctx509.SignedCertificateTimestampList{} - for _, sct := range scts { - sctBytes, err := cttls.Marshal(sct) - if err != nil { - return pkix.Extension{}, err - } - list.SCTList = append(list.SCTList, ctx509.SerializedSCT{Val: sctBytes}) - } - listBytes, err := cttls.Marshal(list) - if err != nil { - return pkix.Extension{}, err - } - extBytes, err := asn1.Marshal(listBytes) - if err != nil { - return pkix.Extension{}, err - } - return pkix.Extension{ - Id: sctListOID, - Value: extBytes, - }, nil -} - -var mustStapleExt = pkix.Extension{ - // RFC 7633: id-pe-tlsfeature OBJECT IDENTIFIER ::= { id-pe 24 } - Id: asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24}, - // ASN.1 encoding of: - // SEQUENCE - // INTEGER 5 - // where "5" is the status_request feature (RFC 6066) - Value: []byte{0x30, 0x03, 0x02, 0x01, 0x05}, -} - -func generateSKID(pk crypto.PublicKey) ([]byte, error) { - pkBytes, err := x509.MarshalPKIXPublicKey(pk) - if err != nil { - return nil, err - } - var pkixPublicKey struct { - Algo pkix.AlgorithmIdentifier - BitString asn1.BitString - } - if _, err := asn1.Unmarshal(pkBytes, &pkixPublicKey); err != nil { - return nil, err - } - skid := sha1.Sum(pkixPublicKey.BitString.Bytes) - return skid[:], nil -} - -// IssuanceRequest describes a certificate issuance request -type IssuanceRequest struct { - PublicKey crypto.PublicKey - - Serial []byte - - NotBefore time.Time - NotAfter time.Time - - CommonName string - DNSNames []string - - IncludeMustStaple bool - IncludeCTPoison bool - SCTList []ct.SignedCertificateTimestamp -} - -// Issue generates a certificate from the provided issuance request and -// signs it. Before signing the certificate with the issuer's private -// key, it is signed using a throwaway key so that it can be linted using -// zlint. If the linting fails, an error is returned and the certificate -// is not signed using the issuer's key. -func (i *Issuer) Issue(req *IssuanceRequest) ([]byte, error) { - // check request is valid according to the issuance profile - err := i.Profile.requestValid(i.Clk, req) - if err != nil { - return nil, err - } - - // generate template from the issuance profile - template := i.Profile.generateTemplate(i.Clk) - - // populate template from the issuance request - template.NotBefore, template.NotAfter = req.NotBefore, req.NotAfter - template.SerialNumber = big.NewInt(0).SetBytes(req.Serial) - if req.CommonName != "" { - template.Subject.CommonName = req.CommonName - } - template.DNSNames = req.DNSNames - template.AuthorityKeyId = i.Cert.SubjectKeyId - skid, err := generateSKID(req.PublicKey) - if err != nil { - return nil, err - } - template.SubjectKeyId = skid - switch req.PublicKey.(type) { - case *rsa.PublicKey: - template.KeyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment - case *ecdsa.PublicKey: - template.KeyUsage = x509.KeyUsageDigitalSignature - } - - if req.IncludeCTPoison { - template.ExtraExtensions = append(template.ExtraExtensions, ctPoisonExt) - } else if req.SCTList != nil { - sctListExt, err := generateSCTListExt(req.SCTList) - if err != nil { - return nil, err - } - template.ExtraExtensions = append(template.ExtraExtensions, sctListExt) - } - - if req.IncludeMustStaple { - template.ExtraExtensions = append(template.ExtraExtensions, mustStapleExt) - } - - // check that the tbsCertificate is properly formed by signing it - // with a throwaway key and then linting it using zlint - err = i.Linter.Check(template, req.PublicKey) - if err != nil { - return nil, fmt.Errorf("tbsCertificate linting failed: %w", err) - } - - return x509.CreateCertificate(rand.Reader, template, i.Cert.Certificate, req.PublicKey, i.Signer) -} - -func ContainsMustStaple(extensions []pkix.Extension) bool { - for _, ext := range extensions { - if ext.Id.Equal(mustStapleExt.Id) && bytes.Equal(ext.Value, mustStapleExt.Value) { - return true - } - } - return false -} - -func containsCTPoison(extensions []pkix.Extension) bool { - for _, ext := range extensions { - if ext.Id.Equal(ctPoisonExt.Id) && bytes.Equal(ext.Value, asn1.NullBytes) { - return true - } - } - return false -} - -// RequestFromPrecert constructs a final certificate IssuanceRequest matching -// the provided precertificate. It returns an error if the precertificate doesn't -// contain the CT poison extension. -func RequestFromPrecert(precert *x509.Certificate, scts []ct.SignedCertificateTimestamp) (*IssuanceRequest, error) { - if !containsCTPoison(precert.Extensions) { - return nil, errors.New("provided certificate doesn't contain the CT poison extension") - } - return &IssuanceRequest{ - PublicKey: precert.PublicKey, - Serial: precert.SerialNumber.Bytes(), - NotBefore: precert.NotBefore, - NotAfter: precert.NotAfter, - CommonName: precert.Subject.CommonName, - DNSNames: precert.DNSNames, - IncludeMustStaple: ContainsMustStaple(precert.Extensions), - SCTList: scts, - }, nil -} - -// LoadChain takes a list of filenames containing pem-formatted certificates, -// and returns a chain representing all of those certificates in order. It -// ensures that the resulting chain is valid. The final file is expected to be -// a root certificate, which the chain will be verified against, but which will -// not be included in the resulting chain. -func LoadChain(certFiles []string) ([]*Certificate, error) { - if len(certFiles) < 2 { - return nil, errors.New( - "each chain must have at least two certificates: an intermediate and a root") - } - - // Pre-load all the certificates to make validation easier. - certs := make([]*Certificate, len(certFiles)) - var err error - for i := 0; i < len(certFiles); i++ { - certs[i], err = LoadCertificate(certFiles[i]) - if err != nil { - return nil, fmt.Errorf("failed to load certificate %q: %w", certFiles[i], err) - } - } - - // Iterate over all certs except for the last, checking that their signature - // comes from the next cert in the list. - chain := make([]*Certificate, len(certFiles)-1) - for i := 0; i < len(certs)-1; i++ { - err = certs[i].CheckSignatureFrom(certs[i+1].Certificate) - if err != nil { - return nil, fmt.Errorf("failed to verify chain: %w", err) - } - chain[i] = certs[i] - } - - // Verify that the last cert is self-signed. - err = certs[len(certs)-1].CheckSignatureFrom(certs[len(certs)-1].Certificate) - if err != nil { - return nil, fmt.Errorf( - "final cert in chain must be a self-signed (used only for validation): %w", err) - } - - return chain, nil -} diff --git a/issuance/issuance_test.go b/issuance/issuance_test.go deleted file mode 100644 index 79061a6cb2d..00000000000 --- a/issuance/issuance_test.go +++ /dev/null @@ -1,772 +0,0 @@ -package issuance - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "io/ioutil" - "math/big" - "os" - "testing" - "time" - - ct "github.com/google/certificate-transparency-go" - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/linter" - "github.com/letsencrypt/boulder/policyasn1" - "github.com/letsencrypt/boulder/test" -) - -func defaultProfileConfig() ProfileConfig { - return ProfileConfig{ - AllowCommonName: true, - AllowCTPoison: true, - AllowSCTList: true, - AllowMustStaple: true, - Policies: []PolicyInformation{ - {OID: "1.2.3"}, - }, - MaxValidityPeriod: cmd.ConfigDuration{Duration: time.Hour}, - MaxValidityBackdate: cmd.ConfigDuration{Duration: time.Hour}, - } -} - -func defaultIssuerConfig() IssuerConfig { - return IssuerConfig{ - UseForECDSALeaves: true, - UseForRSALeaves: true, - IssuerURL: "http://issuer-url", - OCSPURL: "http://ocsp-url", - } -} - -func defaultProfile() *Profile { - p, _ := NewProfile(defaultProfileConfig(), defaultIssuerConfig()) - return p -} - -var issuerCert *Certificate -var issuerSigner *ecdsa.PrivateKey - -func TestMain(m *testing.M) { - tk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - cmd.FailOnError(err, "failed to generate test key") - issuerSigner = tk - template := &x509.Certificate{ - SerialNumber: big.NewInt(123), - BasicConstraintsValid: true, - IsCA: true, - Subject: pkix.Name{ - CommonName: "big ca", - }, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature, - } - issuer, err := x509.CreateCertificate(rand.Reader, template, template, tk.Public(), tk) - cmd.FailOnError(err, "failed to generate test issuer") - cert, err := x509.ParseCertificate(issuer) - cmd.FailOnError(err, "failed to parse test issuer") - issuerCert = &Certificate{Certificate: cert} - os.Exit(m.Run()) -} - -func TestNewProfilePolicies(t *testing.T) { - config := defaultProfileConfig() - config.Policies = append(config.Policies, PolicyInformation{ - OID: "1.2.3.4", - Qualifiers: []PolicyQualifier{ - { - Type: "id-qt-cps", - Value: "cps-url", - }, - }, - }) - profile, err := NewProfile(config, defaultIssuerConfig()) - test.AssertNotError(t, err, "NewProfile failed") - test.AssertDeepEquals(t, *profile, Profile{ - useForRSALeaves: true, - useForECDSALeaves: true, - allowMustStaple: true, - allowCTPoison: true, - allowSCTList: true, - allowCommonName: true, - issuerURL: "http://issuer-url", - ocspURL: "http://ocsp-url", - policies: &pkix.Extension{ - Id: asn1.ObjectIdentifier{2, 5, 29, 32}, - Value: []byte{48, 36, 48, 4, 6, 2, 42, 3, 48, 28, 6, 3, 42, 3, 4, 48, 21, 48, 19, 6, 8, 43, 6, 1, 5, 5, 7, 2, 1, 22, 7, 99, 112, 115, 45, 117, 114, 108}, - }, - maxBackdate: time.Hour, - maxValidity: time.Hour, - }) - var policies []policyasn1.PolicyInformation - _, err = asn1.Unmarshal(profile.policies.Value, &policies) - test.AssertNotError(t, err, "failed to parse policies extension") - test.AssertEquals(t, len(policies), 2) - test.AssertDeepEquals(t, policies[0], policyasn1.PolicyInformation{ - Policy: asn1.ObjectIdentifier{1, 2, 3}, - }) - test.AssertDeepEquals(t, policies[1], policyasn1.PolicyInformation{ - Policy: asn1.ObjectIdentifier{1, 2, 3, 4}, - Qualifiers: []policyasn1.PolicyQualifier{{ - OID: asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1}, - Value: "cps-url", - }}, - }) -} - -func TestNewProfileNoIssuerURL(t *testing.T) { - _, err := NewProfile(ProfileConfig{}, IssuerConfig{}) - test.AssertError(t, err, "NewProfile didn't fail with no issuer URL") - test.AssertEquals(t, err.Error(), "Issuer URL is required") -} - -func TestNewProfileNoOCSPURL(t *testing.T) { - _, err := NewProfile(ProfileConfig{}, IssuerConfig{IssuerURL: "issuer-url"}) - test.AssertError(t, err, "NewProfile didn't fail with no OCSP URL") - test.AssertEquals(t, err.Error(), "OCSP URL is required") -} - -func TestNewProfileInvalidOID(t *testing.T) { - _, err := NewProfile(ProfileConfig{ - Policies: []PolicyInformation{{ - OID: "a.b.c", - }}, - }, defaultIssuerConfig()) - test.AssertError(t, err, "NewProfile didn't fail with unknown policy qualifier type") - test.AssertEquals(t, err.Error(), "failed parsing policy OID \"a.b.c\": strconv.Atoi: parsing \"a\": invalid syntax") -} - -func TestNewProfileUnknownQualifierType(t *testing.T) { - _, err := NewProfile(ProfileConfig{ - Policies: []PolicyInformation{{ - OID: "1.2.3", - Qualifiers: []PolicyQualifier{{ - Type: "asd", - Value: "bad", - }}, - }}, - }, defaultIssuerConfig()) - test.AssertError(t, err, "NewProfile didn't fail with unknown policy qualifier type") - test.AssertEquals(t, err.Error(), "unknown qualifier type: asd") -} - -func TestRequestValid(t *testing.T) { - fc := clock.NewFake() - fc.Add(time.Hour * 24) - tests := []struct { - name string - profile *Profile - request *IssuanceRequest - expectedError string - }{ - { - name: "unsupported key type", - profile: &Profile{}, - request: &IssuanceRequest{PublicKey: &dsa.PublicKey{}}, - expectedError: "unsupported public key type", - }, - { - name: "cannot sign rsa", - profile: &Profile{}, - request: &IssuanceRequest{PublicKey: &rsa.PublicKey{}}, - expectedError: "cannot sign RSA public keys", - }, - { - name: "cannot sign ecdsa", - profile: &Profile{}, - request: &IssuanceRequest{PublicKey: &ecdsa.PublicKey{}}, - expectedError: "cannot sign ECDSA public keys", - }, - { - name: "must staple not allowed", - profile: &Profile{ - useForECDSALeaves: true, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - IncludeMustStaple: true, - }, - expectedError: "must-staple extension cannot be included", - }, - { - name: "ct poison not allowed", - profile: &Profile{ - useForECDSALeaves: true, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - IncludeCTPoison: true, - }, - expectedError: "ct poison extension cannot be included", - }, - { - name: "sct list not allowed", - profile: &Profile{ - useForECDSALeaves: true, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - SCTList: []ct.SignedCertificateTimestamp{}, - }, - expectedError: "sct list extension cannot be included", - }, - { - name: "sct list and ct poison not allowed", - profile: &Profile{ - useForECDSALeaves: true, - allowCTPoison: true, - allowSCTList: true, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - IncludeCTPoison: true, - SCTList: []ct.SignedCertificateTimestamp{}, - }, - expectedError: "cannot include both ct poison and sct list extensions", - }, - { - name: "common name not allowed", - profile: &Profile{ - useForECDSALeaves: true, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - CommonName: "cn", - }, - expectedError: "common name cannot be included", - }, - { - name: "negative validity", - profile: &Profile{ - useForECDSALeaves: true, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - NotBefore: fc.Now().Add(time.Hour), - NotAfter: fc.Now(), - }, - expectedError: "NotAfter must be after NotBefore", - }, - { - name: "validity larger than max", - profile: &Profile{ - useForECDSALeaves: true, - maxValidity: time.Minute, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - NotBefore: fc.Now(), - NotAfter: fc.Now().Add(time.Hour - time.Second), - }, - expectedError: "validity period is more than the maximum allowed period (1h0m0s>1m0s)", - }, - { - name: "validity larger than max due to inclusivity", - profile: &Profile{ - useForECDSALeaves: true, - maxValidity: time.Hour, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - NotBefore: fc.Now(), - NotAfter: fc.Now().Add(time.Hour), - }, - expectedError: "validity period is more than the maximum allowed period (1h0m1s>1h0m0s)", - }, - { - name: "validity backdated more than max", - profile: &Profile{ - useForECDSALeaves: true, - maxValidity: time.Hour * 2, - maxBackdate: time.Hour, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - NotBefore: fc.Now().Add(-time.Hour * 2), - NotAfter: fc.Now().Add(-time.Hour), - }, - expectedError: "NotBefore is backdated more than the maximum allowed period (2h0m0s>1h0m0s)", - }, - { - name: "validity is forward dated", - profile: &Profile{ - useForECDSALeaves: true, - maxValidity: time.Hour * 2, - maxBackdate: time.Hour, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - NotBefore: fc.Now().Add(time.Hour), - NotAfter: fc.Now().Add(time.Hour * 2), - }, - expectedError: "NotBefore is in the future", - }, - { - name: "serial too short", - profile: &Profile{ - useForECDSALeaves: true, - maxValidity: time.Hour * 2, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - NotBefore: fc.Now(), - NotAfter: fc.Now().Add(time.Hour), - Serial: []byte{0, 1, 2, 3, 4, 5, 6, 7}, - }, - expectedError: "serial must be between 9 and 19 bytes", - }, - { - name: "serial too long", - profile: &Profile{ - useForECDSALeaves: true, - maxValidity: time.Hour * 2, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - NotBefore: fc.Now(), - NotAfter: fc.Now().Add(time.Hour), - Serial: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, - }, - expectedError: "serial must be between 9 and 19 bytes", - }, - { - name: "good", - profile: &Profile{ - useForECDSALeaves: true, - maxValidity: time.Hour * 2, - }, - request: &IssuanceRequest{ - PublicKey: &ecdsa.PublicKey{}, - NotBefore: fc.Now(), - NotAfter: fc.Now().Add(time.Hour), - Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - err := tc.profile.requestValid(fc, tc.request) - if err != nil { - if tc.expectedError == "" { - t.Errorf("failed with unexpected error: %s", err) - } else if tc.expectedError != err.Error() { - t.Errorf("failed with unexpected error, wanted: %q, got: %q", tc.expectedError, err.Error()) - } - return - } else if tc.expectedError != "" { - t.Errorf("didn't fail, expected %q", tc.expectedError) - } - }) - } -} - -func TestGenerateTemplate(t *testing.T) { - tests := []struct { - name string - profile *Profile - expectedTemplate *x509.Certificate - }{ - { - name: "crl url", - profile: &Profile{ - crlURL: "crl-url", - sigAlg: x509.SHA256WithRSA, - }, - expectedTemplate: &x509.Certificate{ - BasicConstraintsValid: true, - SignatureAlgorithm: x509.SHA256WithRSA, - ExtKeyUsage: defaultEKU, - IssuingCertificateURL: []string{""}, - OCSPServer: []string{""}, - CRLDistributionPoints: []string{"crl-url"}, - }, - }, - { - name: "include policies", - profile: &Profile{ - sigAlg: x509.SHA256WithRSA, - policies: &pkix.Extension{ - Id: asn1.ObjectIdentifier{1, 2, 3}, - Value: []byte{4, 5, 6}, - }, - }, - expectedTemplate: &x509.Certificate{ - BasicConstraintsValid: true, - SignatureAlgorithm: x509.SHA256WithRSA, - ExtKeyUsage: defaultEKU, - IssuingCertificateURL: []string{""}, - OCSPServer: []string{""}, - ExtraExtensions: []pkix.Extension{ - { - Id: asn1.ObjectIdentifier{1, 2, 3}, - Value: []byte{4, 5, 6}, - }, - }, - }, - }, - } - fc := clock.NewFake() - fc.Set(time.Time{}.Add(time.Hour)) - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - template := tc.profile.generateTemplate(fc) - test.AssertDeepEquals(t, *template, *tc.expectedTemplate) - }) - } -} - -func TestNewIssuer(t *testing.T) { - _, err := NewIssuer( - issuerCert, - issuerSigner, - defaultProfile(), - &linter.Linter{}, - clock.NewFake(), - ) - test.AssertNotError(t, err, "NewIssuer failed") -} - -func TestNewIssuerUnsupportedKeyType(t *testing.T) { - _, err := NewIssuer( - &Certificate{ - Certificate: &x509.Certificate{ - PublicKey: &ed25519.PublicKey{}, - }, - }, - &ed25519.PrivateKey{}, - defaultProfile(), - &linter.Linter{}, - clock.NewFake(), - ) - test.AssertError(t, err, "NewIssuer didn't fail") - test.AssertEquals(t, err.Error(), "unsupported issuer key type") -} - -func TestNewIssuerNoCertSign(t *testing.T) { - _, err := NewIssuer( - &Certificate{ - Certificate: &x509.Certificate{ - PublicKey: &ecdsa.PublicKey{ - Curve: elliptic.P256(), - }, - KeyUsage: 0, - }, - }, - issuerSigner, - defaultProfile(), - &linter.Linter{}, - clock.NewFake(), - ) - test.AssertError(t, err, "NewIssuer didn't fail") - test.AssertEquals(t, err.Error(), "end-entity signing cert does not have keyUsage certSign") -} - -func TestNewIssuerNoDigitalSignature(t *testing.T) { - _, err := NewIssuer( - &Certificate{ - Certificate: &x509.Certificate{ - PublicKey: &ecdsa.PublicKey{ - Curve: elliptic.P256(), - }, - KeyUsage: x509.KeyUsageCertSign, - }, - }, - issuerSigner, - defaultProfile(), - &linter.Linter{}, - clock.NewFake(), - ) - test.AssertError(t, err, "NewIssuer didn't fail") - test.AssertEquals(t, err.Error(), "end-entity ocsp signing cert does not have keyUsage digitalSignature") -} - -func TestNewIssuerOCSPOnly(t *testing.T) { - p := defaultProfile() - p.useForRSALeaves = false - p.useForECDSALeaves = false - _, err := NewIssuer( - &Certificate{ - Certificate: &x509.Certificate{ - PublicKey: &ecdsa.PublicKey{ - Curve: elliptic.P256(), - }, - KeyUsage: x509.KeyUsageDigitalSignature, - }, - }, - issuerSigner, - p, - &linter.Linter{}, - clock.NewFake(), - ) - test.AssertNotError(t, err, "NewIssuer failed") -} - -func TestIssue(t *testing.T) { - for _, tc := range []struct { - name string - generateFunc func() (crypto.Signer, error) - ku x509.KeyUsage - }{ - { - name: "RSA", - generateFunc: func() (crypto.Signer, error) { - return rsa.GenerateKey(rand.Reader, 2048) - }, - ku: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, - }, - { - name: "ECDSA", - generateFunc: func() (crypto.Signer, error) { - return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - }, - ku: x509.KeyUsageDigitalSignature, - }, - } { - t.Run(tc.name, func(t *testing.T) { - fc := clock.NewFake() - fc.Set(time.Now()) - linter, err := linter.New( - issuerCert.Certificate, - issuerSigner, - []string{"w_ct_sct_policy_count_unsatisfied", "n_subject_common_name_included"}, - ) - test.AssertNotError(t, err, "failed to create linter") - signer, err := NewIssuer(issuerCert, issuerSigner, defaultProfile(), linter, fc) - test.AssertNotError(t, err, "NewIssuer failed") - pk, err := tc.generateFunc() - test.AssertNotError(t, err, "failed to generate test key") - certBytes, err := signer.Issue(&IssuanceRequest{ - PublicKey: pk.Public(), - Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, - CommonName: "example.com", - DNSNames: []string{"example.com"}, - NotBefore: fc.Now(), - NotAfter: fc.Now().Add(time.Hour - time.Second), - }) - test.AssertNotError(t, err, "Issue failed") - cert, err := x509.ParseCertificate(certBytes) - test.AssertNotError(t, err, "failed to parse certificate") - err = cert.CheckSignatureFrom(issuerCert.Certificate) - test.AssertNotError(t, err, "signature validation failed") - test.AssertDeepEquals(t, cert.DNSNames, []string{"example.com"}) - test.AssertEquals(t, cert.Subject.CommonName, "example.com") - test.AssertByteEquals(t, cert.SerialNumber.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}) - test.AssertDeepEquals(t, cert.PublicKey, pk.Public()) - test.AssertEquals(t, len(cert.Extensions), 8) // Constraints, KU, EKU, SKID, AKID, AIA, SAN, Policies - test.AssertEquals(t, cert.KeyUsage, tc.ku) - }) - } -} - -func TestIssueRSA(t *testing.T) { - fc := clock.NewFake() - fc.Set(time.Now()) - linter, err := linter.New( - issuerCert.Certificate, - issuerSigner, - []string{"w_ct_sct_policy_count_unsatisfied"}, - ) - test.AssertNotError(t, err, "failed to create linter") - signer, err := NewIssuer(issuerCert, issuerSigner, defaultProfile(), linter, fc) - test.AssertNotError(t, err, "NewIssuer failed") - pk, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "failed to generate test key") - certBytes, err := signer.Issue(&IssuanceRequest{ - PublicKey: pk.Public(), - Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, - DNSNames: []string{"example.com"}, - NotBefore: fc.Now(), - NotAfter: fc.Now().Add(time.Hour - time.Second), - }) - test.AssertNotError(t, err, "Issue failed") - cert, err := x509.ParseCertificate(certBytes) - test.AssertNotError(t, err, "failed to parse certificate") - err = cert.CheckSignatureFrom(issuerCert.Certificate) - test.AssertNotError(t, err, "signature validation failed") - test.AssertByteEquals(t, cert.SerialNumber.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}) - test.AssertDeepEquals(t, cert.PublicKey, pk.Public()) - test.AssertEquals(t, len(cert.Extensions), 8) // Constraints, KU, EKU, SKID, AKID, AIA, SAN, Policies - test.AssertEquals(t, cert.KeyUsage, x509.KeyUsageDigitalSignature|x509.KeyUsageKeyEncipherment) -} - -func TestIssueCTPoison(t *testing.T) { - fc := clock.NewFake() - fc.Set(time.Now()) - linter, err := linter.New( - issuerCert.Certificate, - issuerSigner, - []string{"w_ct_sct_policy_count_unsatisfied"}, - ) - test.AssertNotError(t, err, "failed to create linter") - signer, err := NewIssuer(issuerCert, issuerSigner, defaultProfile(), linter, fc) - test.AssertNotError(t, err, "NewIssuer failed") - pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "failed to generate test key") - certBytes, err := signer.Issue(&IssuanceRequest{ - PublicKey: pk.Public(), - Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, - DNSNames: []string{"example.com"}, - IncludeCTPoison: true, - NotBefore: fc.Now(), - NotAfter: fc.Now().Add(time.Hour - time.Second), - }) - test.AssertNotError(t, err, "Issue failed") - cert, err := x509.ParseCertificate(certBytes) - test.AssertNotError(t, err, "failed to parse certificate") - err = cert.CheckSignatureFrom(issuerCert.Certificate) - test.AssertNotError(t, err, "signature validation failed") - test.AssertByteEquals(t, cert.SerialNumber.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}) - test.AssertDeepEquals(t, cert.PublicKey, pk.Public()) - test.AssertEquals(t, len(cert.Extensions), 9) // Constraints, KU, EKU, SKID, AKID, AIA, SAN, Policies, CT Poison - test.AssertDeepEquals(t, cert.Extensions[8], ctPoisonExt) -} - -func TestIssueSCTList(t *testing.T) { - fc := clock.NewFake() - fc.Set(time.Now()) - linter, err := linter.New( - issuerCert.Certificate, - issuerSigner, - []string{"w_ct_sct_policy_count_unsatisfied"}, - ) - test.AssertNotError(t, err, "failed to create linter") - signer, err := NewIssuer(issuerCert, issuerSigner, defaultProfile(), linter, fc) - test.AssertNotError(t, err, "NewIssuer failed") - pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "failed to generate test key") - certBytes, err := signer.Issue(&IssuanceRequest{ - PublicKey: pk.Public(), - Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, - DNSNames: []string{"example.com"}, - SCTList: []ct.SignedCertificateTimestamp{ - {}, - }, - NotBefore: fc.Now(), - NotAfter: fc.Now().Add(time.Hour - time.Second), - }) - test.AssertNotError(t, err, "Issue failed") - cert, err := x509.ParseCertificate(certBytes) - test.AssertNotError(t, err, "failed to parse certificate") - err = cert.CheckSignatureFrom(issuerCert.Certificate) - test.AssertNotError(t, err, "signature validation failed") - test.AssertByteEquals(t, cert.SerialNumber.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}) - test.AssertDeepEquals(t, cert.PublicKey, pk.Public()) - test.AssertEquals(t, len(cert.Extensions), 9) // Constraints, KU, EKU, SKID, AKID, AIA, SAN, Policies, SCT list - test.AssertDeepEquals(t, cert.Extensions[8], pkix.Extension{ - Id: sctListOID, - Value: []byte{4, 51, 0, 49, 0, 47, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - }) -} - -func TestIssueMustStaple(t *testing.T) { - fc := clock.NewFake() - fc.Set(time.Now()) - linter, err := linter.New( - issuerCert.Certificate, - issuerSigner, - []string{"w_ct_sct_policy_count_unsatisfied"}, - ) - test.AssertNotError(t, err, "failed to create linter") - signer, err := NewIssuer(issuerCert, issuerSigner, defaultProfile(), linter, fc) - test.AssertNotError(t, err, "NewIssuer failed") - pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "failed to generate test key") - certBytes, err := signer.Issue(&IssuanceRequest{ - PublicKey: pk.Public(), - Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, - DNSNames: []string{"example.com"}, - IncludeMustStaple: true, - NotBefore: fc.Now(), - NotAfter: fc.Now().Add(time.Hour - time.Second), - }) - test.AssertNotError(t, err, "Issue failed") - cert, err := x509.ParseCertificate(certBytes) - test.AssertNotError(t, err, "failed to parse certificate") - err = cert.CheckSignatureFrom(issuerCert.Certificate) - test.AssertNotError(t, err, "signature validation failed") - test.AssertByteEquals(t, cert.SerialNumber.Bytes(), []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}) - test.AssertDeepEquals(t, cert.PublicKey, pk.Public()) - test.AssertEquals(t, len(cert.Extensions), 9) // Constraints, KU, EKU, SKID, AKID, AIA, SAN, Policies, Must-Staple - test.AssertDeepEquals(t, cert.Extensions[8], mustStapleExt) -} - -func TestIssueBadLint(t *testing.T) { - fc := clock.NewFake() - fc.Set(time.Now()) - linter, err := linter.New(issuerCert.Certificate, issuerSigner, []string{}) - test.AssertNotError(t, err, "failed to create linter") - signer, err := NewIssuer(issuerCert, issuerSigner, defaultProfile(), linter, fc) - test.AssertNotError(t, err, "NewIssuer failed") - pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "failed to generate test key") - _, err = signer.Issue(&IssuanceRequest{ - PublicKey: pk.Public(), - Serial: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, - DNSNames: []string{"example.com"}, - NotBefore: fc.Now(), - NotAfter: fc.Now().Add(time.Hour - time.Second), - }) - test.AssertError(t, err, "Issue didn't fail") - test.AssertEquals(t, err.Error(), "tbsCertificate linting failed: failed lints: w_ct_sct_policy_count_unsatisfied") -} - -func TestLoadChain_Valid(t *testing.T) { - chain, err := LoadChain([]string{ - "../test/test-ca-cross.pem", - "../test/test-root2.pem", - }) - test.AssertNotError(t, err, "Should load valid chain") - - expectedIssuer, err := core.LoadCert("../test/test-ca-cross.pem") - test.AssertNotError(t, err, "Failed to load test issuer") - - chainIssuer := chain[0] - test.AssertNotNil(t, chainIssuer, "Failed to decode chain PEM") - - test.AssertByteEquals(t, chainIssuer.Raw, expectedIssuer.Raw) -} - -func TestLoadChain_TooShort(t *testing.T) { - _, err := LoadChain([]string{"/path/to/one/cert.pem"}) - test.AssertError(t, err, "Should reject too-short chain") -} - -func TestLoadChain_Unloadable(t *testing.T) { - _, err := LoadChain([]string{ - "does-not-exist.pem", - "../test/test-root2.pem", - }) - test.AssertError(t, err, "Should reject unloadable chain") - - _, err = LoadChain([]string{ - "../test/test-ca-cross.pem", - "does-not-exist.pem", - }) - test.AssertError(t, err, "Should reject unloadable chain") - - invalidPEMFile, _ := ioutil.TempFile("", "invalid.pem") - err = ioutil.WriteFile(invalidPEMFile.Name(), []byte(""), 0640) - test.AssertNotError(t, err, "Error writing invalid PEM tmp file") - _, err = LoadChain([]string{ - invalidPEMFile.Name(), - "../test/test-root2.pem", - }) - test.AssertError(t, err, "Should reject unloadable chain") -} - -func TestLoadChain_InvalidSig(t *testing.T) { - _, err := LoadChain([]string{ - "../test/test-root2.pem", - "../test/test-ca-cross.pem", - }) - test.AssertError(t, err, "Should reject invalid signature") -} diff --git a/issuance/issuer.go b/issuance/issuer.go new file mode 100644 index 00000000000..9356dc9a78b --- /dev/null +++ b/issuance/issuer.go @@ -0,0 +1,373 @@ +package issuance + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "math/big" + "os" + "slices" + "strings" + + "github.com/jmhodges/clock" + + "github.com/letsencrypt/boulder/core" + "github.com/letsencrypt/boulder/linter" + "github.com/letsencrypt/boulder/privatekey" + "github.com/letsencrypt/pkcs11key/v4" +) + +// ----- Name ID ----- + +// NameID is a statistically-unique small ID which can be computed from +// both CA and end-entity certs to link them together into a validation chain. +// It is computed as a truncated hash over the issuer Subject Name bytes, or +// over the end-entity's Issuer Name bytes, which are required to be equal. +type NameID int64 + +// SubjectNameID returns the NameID (a truncated hash over the raw bytes of a +// Distinguished Name) of this issuer certificate's Subject. Useful for storing +// as a lookup key in contexts that don't expect hash collisions. +func SubjectNameID(ic *Certificate) NameID { + return truncatedHash(ic.RawSubject) +} + +// IssuerNameID returns the IssuerNameID (a truncated hash over the raw bytes +// of the Issuer Distinguished Name) of the given end-entity certificate. +// Useful for performing lookups in contexts that don't expect hash collisions. +func IssuerNameID(ee *x509.Certificate) NameID { + return truncatedHash(ee.RawIssuer) +} + +// truncatedHash computes a truncated SHA1 hash across arbitrary bytes. Uses +// SHA1 because that is the algorithm most commonly used in OCSP requests. +// PURPOSEFULLY NOT EXPORTED. Exists only to ensure that the implementations of +// SubjectNameID() and IssuerNameID() never diverge. Use those instead. +func truncatedHash(name []byte) NameID { + h := crypto.SHA1.New() + h.Write(name) + s := h.Sum(nil) + return NameID(big.NewInt(0).SetBytes(s[:7]).Int64()) +} + +// ----- Issuer Certificates ----- + +// Certificate embeds an *x509.Certificate and represents the added semantics +// that this certificate is a CA certificate. +type Certificate struct { + *x509.Certificate + // nameID is stored here simply for the sake of precomputation. + nameID NameID +} + +// NameID is equivalent to SubjectNameID(ic), but faster because it is +// precomputed. +func (ic *Certificate) NameID() NameID { + return ic.nameID +} + +// NewCertificate wraps an in-memory cert in an issuance.Certificate, marking it +// as an issuer cert. It may fail if the certificate does not contain the +// attributes expected of an issuer certificate. +func NewCertificate(ic *x509.Certificate) (*Certificate, error) { + if !ic.IsCA { + return nil, errors.New("certificate is not a CA certificate") + } + + res := Certificate{ic, 0} + res.nameID = SubjectNameID(&res) + return &res, nil +} + +func LoadCertificate(path string) (*Certificate, error) { + cert, err := core.LoadCert(path) + if err != nil { + return nil, fmt.Errorf("loading issuer certificate: %w", err) + } + return NewCertificate(cert) +} + +// LoadChain takes a list of filenames containing pem-formatted certificates, +// and returns a chain representing all of those certificates in order. It +// ensures that the resulting chain is valid. The final file is expected to be +// a root certificate, which the chain will be verified against, but which will +// not be included in the resulting chain. +func LoadChain(certFiles []string) ([]*Certificate, error) { + if len(certFiles) < 2 { + return nil, errors.New( + "each chain must have at least two certificates: an intermediate and a root") + } + + // Pre-load all the certificates to make validation easier. + certs := make([]*Certificate, len(certFiles)) + var err error + for i := range len(certFiles) { + certs[i], err = LoadCertificate(certFiles[i]) + if err != nil { + return nil, fmt.Errorf("failed to load certificate %q: %w", certFiles[i], err) + } + } + + // Iterate over all certs except for the last, checking that their signature + // comes from the next cert in the list. + chain := make([]*Certificate, len(certFiles)-1) + for i := range len(certs) - 1 { + err = certs[i].CheckSignatureFrom(certs[i+1].Certificate) + if err != nil { + return nil, fmt.Errorf("failed to verify signature from %q to %q (%q to %q): %w", + certs[i+1].Subject, certs[i].Subject, certFiles[i+1], certFiles[i], err) + } + chain[i] = certs[i] + } + + // Verify that the last cert is self-signed. + lastCert := certs[len(certs)-1] + err = lastCert.CheckSignatureFrom(lastCert.Certificate) + if err != nil { + return nil, fmt.Errorf( + "final cert in chain (%q; %q) must be self-signed (used only for validation): %w", + lastCert.Subject, certFiles[len(certFiles)-1], err) + } + + return chain, nil +} + +// ----- Issuers with Signers ----- + +// IssuerConfig describes the constraints on and URLs used by a single issuer. +type IssuerConfig struct { + // Deprecated: Populate IssuerConfig.Profiles to ensure "Active" + Active bool + + // Profiles is the list of profiles for which this issuer is willing to issue. + // The names listed here must match the names of configured profiles (see + // cmd/ca/main.go's Config.Issuance.CertProfiles and issuance/cert.go's + // ProfileConfig). If Profiles is not empty then the issuer can be used + // to sign precertificates and final certificates. All issuers, regardless + // if this field is empty or not, can be used to sign CRLs. All issuers + // with a profile(s) of a given key type (RSA or ECDSA) are part of a pool + // and each precertificate will be issued randomly from a selected pool. + // The selection of which pool depends on the precertificate's key algorithm. + Profiles []string `validate:"dive,alphanum,min=1,max=32"` + + IssuerURL string `validate:"required,url"` + CRLURLBase string `validate:"required,url,startswith=http://,endswith=/"` + + // Number of CRL shards. Must be positive, but can be 1 for no sharding. + CRLShards int `validate:"required,min=1"` + + Location IssuerLoc +} + +// IssuerLoc describes the on-disk location and parameters that an issuer +// should use to retrieve its certificate and private key. +// Only one of File, ConfigFile, or PKCS11 should be set. +type IssuerLoc struct { + // A file from which a private key will be read and parsed. + File string `validate:"required_without_all=ConfigFile PKCS11"` + // A file from which a pkcs11key.Config will be read and parsed, if File is not set. + ConfigFile string `validate:"required_without_all=PKCS11 File"` + // An in-memory pkcs11key.Config, which will be used if ConfigFile is not set. + PKCS11 *pkcs11key.Config `validate:"required_without_all=ConfigFile File"` + // A file from which a certificate will be read and parsed. + CertFile string `validate:"required"` + // Number of sessions to open with the HSM. For maximum performance, + // this should be equal to the number of cores in the HSM. Defaults to 1. + NumSessions int +} + +// Issuer is capable of issuing new certificates. +type Issuer struct { + // TODO(#7159): make Cert, Signer, and Linter private when all signing ops + // are handled through this package (e.g. the CA doesn't need direct access + // while signing CRLs anymore). + Cert *Certificate + Signer crypto.Signer + Linter *linter.Linter + + keyAlg x509.PublicKeyAlgorithm + sigAlg x509.SignatureAlgorithm + + // Used to set the Authority Information Access caIssuers URL in issued + // certificates. + issuerURL string + // Used to set the Issuing Distribution Point extension in issued CRLs + // and the CRL Distribution Point extension in issued certs. + crlURLBase string + + crlShards int + + // profiles is a list of the names of profiles that this issuer is willing to + // issue for. + profiles []string + + clk clock.Clock +} + +// newIssuer constructs a new Issuer from the in-memory certificate and signer. +// It exists as a helper for LoadIssuer to make testing simpler. +func newIssuer(config IssuerConfig, cert *Certificate, signer crypto.Signer, clk clock.Clock) (*Issuer, error) { + var keyAlg x509.PublicKeyAlgorithm + var sigAlg x509.SignatureAlgorithm + switch k := cert.PublicKey.(type) { + case *rsa.PublicKey: + keyAlg = x509.RSA + sigAlg = x509.SHA256WithRSA + case *ecdsa.PublicKey: + keyAlg = x509.ECDSA + switch k.Curve { + case elliptic.P256(): + sigAlg = x509.ECDSAWithSHA256 + case elliptic.P384(): + sigAlg = x509.ECDSAWithSHA384 + default: + return nil, fmt.Errorf("unsupported ECDSA curve: %q", k.Curve.Params().Name) + } + default: + return nil, errors.New("unsupported issuer key type") + } + + if config.IssuerURL == "" { + return nil, errors.New("issuer URL is required") + } + if config.CRLURLBase == "" { + return nil, errors.New("crlURLBase is required") + } + if !strings.HasPrefix(config.CRLURLBase, "http://") { + return nil, fmt.Errorf("crlURLBase must use HTTP scheme, got %q", config.CRLURLBase) + } + if !strings.HasSuffix(config.CRLURLBase, "/") { + return nil, fmt.Errorf("crlURLBase must end with exactly one forward slash, got %q", config.CRLURLBase) + } + if config.CRLShards <= 0 { + return nil, errors.New("number of CRL shards is required") + } + + // We require that all of our issuers be capable of both issuing certs and + // providing revocation information. + if cert.KeyUsage&x509.KeyUsageCertSign == 0 { + return nil, errors.New("end-entity signing cert does not have keyUsage certSign") + } + if cert.KeyUsage&x509.KeyUsageCRLSign == 0 { + return nil, errors.New("end-entity signing cert does not have keyUsage crlSign") + } + if cert.KeyUsage&x509.KeyUsageDigitalSignature == 0 { + return nil, errors.New("end-entity signing cert does not have keyUsage digitalSignature") + } + + lintSigner, err := linter.New(cert.Certificate, signer) + if err != nil { + return nil, fmt.Errorf("creating fake lint signer: %w", err) + } + + i := &Issuer{ + Cert: cert, + Signer: signer, + Linter: lintSigner, + keyAlg: keyAlg, + sigAlg: sigAlg, + issuerURL: config.IssuerURL, + crlURLBase: config.CRLURLBase, + crlShards: config.CRLShards, + profiles: config.Profiles, + clk: clk, + } + return i, nil +} + +// KeyType returns either x509.RSA or x509.ECDSA, depending on whether the +// issuer has an RSA or ECDSA keypair. This is useful for determining which +// issuance requests should be routed to this issuer. +func (i *Issuer) KeyType() x509.PublicKeyAlgorithm { + return i.keyAlg +} + +// IsActive is true if the issuer is willing to issue precertificates, and false +// if the issuer is only willing to issue final certificates and CRLs. +func (i *Issuer) IsActive() bool { + return len(i.profiles) > 0 +} + +// Name provides the Common Name specified in the issuer's certificate. +func (i *Issuer) Name() string { + return i.Cert.Subject.CommonName +} + +// NameID provides the NameID of the issuer's certificate. +func (i *Issuer) NameID() NameID { + return i.Cert.NameID() +} + +// Profiles returns the set of profiles that this issuer can issue for. +func (i *Issuer) Profiles() []string { + return slices.Clone(i.profiles) +} + +// LoadIssuer constructs a new Issuer, loading its certificate from disk and its +// private key material from the indicated location. It also verifies that the +// issuer metadata (such as AIA URLs) is well-formed. +func LoadIssuer(config IssuerConfig, clk clock.Clock) (*Issuer, error) { + issuerCert, err := LoadCertificate(config.Location.CertFile) + if err != nil { + return nil, err + } + + signer, err := loadSigner(config.Location, issuerCert.PublicKey) + if err != nil { + return nil, err + } + + if !core.KeyDigestEquals(signer.Public(), issuerCert.PublicKey) { + return nil, fmt.Errorf("issuer key did not match issuer cert %q", config.Location.CertFile) + } + + return newIssuer(config, issuerCert, signer, clk) +} + +func loadSigner(location IssuerLoc, pubkey crypto.PublicKey) (crypto.Signer, error) { + if location.File == "" && location.ConfigFile == "" && location.PKCS11 == nil { + return nil, errors.New("must supply File, ConfigFile, or PKCS11") + } + + if location.File != "" { + signer, _, err := privatekey.Load(location.File) + if err != nil { + return nil, err + } + return signer, nil + } + + var pkcs11Config *pkcs11key.Config + if location.ConfigFile != "" { + contents, err := os.ReadFile(location.ConfigFile) + if err != nil { + return nil, err + } + pkcs11Config = new(pkcs11key.Config) + err = json.Unmarshal(contents, pkcs11Config) + if err != nil { + return nil, err + } + } else { + pkcs11Config = location.PKCS11 + } + + if pkcs11Config.Module == "" || + pkcs11Config.TokenLabel == "" || + pkcs11Config.PIN == "" { + return nil, fmt.Errorf("missing a field in pkcs11Config %#v", pkcs11Config) + } + + numSessions := location.NumSessions + if numSessions <= 0 { + numSessions = 1 + } + + return pkcs11key.NewPool(numSessions, pkcs11Config.Module, + pkcs11Config.TokenLabel, pkcs11Config.PIN, pubkey) +} diff --git a/issuance/issuer_test.go b/issuance/issuer_test.go new file mode 100644 index 00000000000..faf4c23e5a3 --- /dev/null +++ b/issuance/issuer_test.go @@ -0,0 +1,270 @@ +package issuance + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "math/big" + "os" + "strings" + "testing" + "time" + + "github.com/jmhodges/clock" + + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/core" + "github.com/letsencrypt/boulder/test" +) + +func defaultProfileConfig() ProfileConfig { + return ProfileConfig{ + MaxValidityPeriod: config.Duration{Duration: time.Hour}, + MaxValidityBackdate: config.Duration{Duration: time.Hour}, + IgnoredLints: []string{ + // Ignore the two SCT lints because these tests don't get SCTs. + "w_ct_sct_policy_count_unsatisfied", + "e_scts_from_same_operator", + // Ignore the warning about including the SubjectKeyIdentifier extension: + // we include it on purpose, but plan to remove it soon. + "w_ext_subject_key_identifier_not_recommended_subscriber", + }, + } +} + +func defaultIssuerConfig() IssuerConfig { + return IssuerConfig{ + IssuerURL: "http://issuer-url.example.org", + CRLURLBase: "http://crl-url.example.org/", + CRLShards: 10, + Profiles: []string{"modern"}, + } +} + +var issuerCert *Certificate +var issuerSigner *ecdsa.PrivateKey + +func TestMain(m *testing.M) { + tk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + cmd.FailOnError(err, "failed to generate test key") + issuerSigner = tk + template := &x509.Certificate{ + SerialNumber: big.NewInt(123), + BasicConstraintsValid: true, + IsCA: true, + Subject: pkix.Name{ + CommonName: "big ca", + }, + KeyUsage: x509.KeyUsageCRLSign | x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature, + } + issuer, err := x509.CreateCertificate(rand.Reader, template, template, tk.Public(), tk) + cmd.FailOnError(err, "failed to generate test issuer") + cert, err := x509.ParseCertificate(issuer) + cmd.FailOnError(err, "failed to parse test issuer") + issuerCert = &Certificate{Certificate: cert} + os.Exit(m.Run()) +} + +func TestLoadCertificate(t *testing.T) { + t.Parallel() + tests := []struct { + name string + path string + wantErr string + }{ + {"invalid cert file", "../test/hierarchy/int-e1.crl.pem", "loading issuer certificate"}, + {"non-CA cert file", "../test/hierarchy/ee-e1.cert.pem", "not a CA certificate"}, + {"happy path", "../test/hierarchy/int-e1.cert.pem", ""}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + _, err := LoadCertificate(tc.path) + if err != nil { + if tc.wantErr != "" { + test.AssertContains(t, err.Error(), tc.wantErr) + } else { + t.Errorf("expected no error but got %v", err) + } + } else { + if tc.wantErr != "" { + t.Errorf("expected error %q but got none", tc.wantErr) + } + } + }) + } +} + +func TestLoadSigner(t *testing.T) { + t.Parallel() + + // We're using this for its pubkey. This definitely doesn't match the private + // key loaded in any of the tests below, but that's okay because it still gets + // us through all the logic in loadSigner. + fakeKey, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + test.AssertNotError(t, err, "generating test key") + + tests := []struct { + name string + loc IssuerLoc + wantErr string + }{ + {"empty IssuerLoc", IssuerLoc{}, "must supply"}, + {"invalid key file", IssuerLoc{File: "../test/hierarchy/int-e1.crl.pem"}, "unable to parse"}, + {"ECDSA key file", IssuerLoc{File: "../test/hierarchy/int-e1.key.pem"}, ""}, + {"RSA key file", IssuerLoc{File: "../test/hierarchy/int-r3.key.pem"}, ""}, + {"invalid config file", IssuerLoc{ConfigFile: "../test/ident-policy.yaml"}, "invalid character"}, + // Note that we don't have a test for "valid config file" because it would + // always fail -- in CI, the softhsm hasn't been initialized, so there's no + // key to look up; locally even if the softhsm has been initialized, the + // keys in it don't match the fakeKey we generated above. + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + _, err := loadSigner(tc.loc, fakeKey.Public()) + if err != nil { + if tc.wantErr != "" { + test.AssertContains(t, err.Error(), tc.wantErr) + } else { + t.Errorf("expected no error but got %v", err) + } + } else { + if tc.wantErr != "" { + t.Errorf("expected error %q but got none", tc.wantErr) + } + } + }) + } +} + +func TestLoadIssuer(t *testing.T) { + _, err := newIssuer( + defaultIssuerConfig(), + issuerCert, + issuerSigner, + clock.NewFake(), + ) + test.AssertNotError(t, err, "newIssuer failed") +} + +func TestNewIssuerUnsupportedKeyType(t *testing.T) { + _, err := newIssuer( + defaultIssuerConfig(), + &Certificate{ + Certificate: &x509.Certificate{ + PublicKey: &ed25519.PublicKey{}, + }, + }, + &ed25519.PrivateKey{}, + clock.NewFake(), + ) + test.AssertError(t, err, "newIssuer didn't fail") + test.AssertEquals(t, err.Error(), "unsupported issuer key type") +} + +func TestNewIssuerKeyUsage(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ku x509.KeyUsage + wantErr string + }{ + {"missing certSign", x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature, "does not have keyUsage certSign"}, + {"missing crlSign", x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature, "does not have keyUsage crlSign"}, + {"missing digitalSignature", x509.KeyUsageCertSign | x509.KeyUsageCRLSign, "does not have keyUsage digitalSignature"}, + {"all three", x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature, ""}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + _, err := newIssuer( + defaultIssuerConfig(), + &Certificate{ + Certificate: &x509.Certificate{ + SerialNumber: big.NewInt(123), + PublicKey: &ecdsa.PublicKey{ + Curve: elliptic.P256(), + }, + KeyUsage: tc.ku, + }, + }, + issuerSigner, + clock.NewFake(), + ) + if err != nil { + if tc.wantErr != "" { + test.AssertContains(t, err.Error(), tc.wantErr) + } else { + t.Errorf("expected no error but got %v", err) + } + } else { + if tc.wantErr != "" { + t.Errorf("expected error %q but got none", tc.wantErr) + } + } + }) + } +} + +func TestLoadChain_Valid(t *testing.T) { + chain, err := LoadChain([]string{ + "../test/hierarchy/int-e1.cert.pem", + "../test/hierarchy/root-x2.cert.pem", + }) + test.AssertNotError(t, err, "Should load valid chain") + + expectedIssuer, err := core.LoadCert("../test/hierarchy/int-e1.cert.pem") + test.AssertNotError(t, err, "Failed to load test issuer") + + chainIssuer := chain[0] + test.AssertNotNil(t, chainIssuer, "Failed to decode chain PEM") + + test.AssertByteEquals(t, chainIssuer.Raw, expectedIssuer.Raw) +} + +func TestLoadChain_TooShort(t *testing.T) { + _, err := LoadChain([]string{"/path/to/one/cert.pem"}) + test.AssertError(t, err, "Should reject too-short chain") +} + +func TestLoadChain_Unloadable(t *testing.T) { + _, err := LoadChain([]string{ + "does-not-exist.pem", + "../test/hierarchy/root-x2.cert.pem", + }) + test.AssertError(t, err, "Should reject unloadable chain") + + _, err = LoadChain([]string{ + "../test/hierarchy/int-e1.cert.pem", + "does-not-exist.pem", + }) + test.AssertError(t, err, "Should reject unloadable chain") + + invalidPEMFile, _ := os.CreateTemp("", "invalid.pem") + err = os.WriteFile(invalidPEMFile.Name(), []byte(""), 0640) + test.AssertNotError(t, err, "Error writing invalid PEM tmp file") + _, err = LoadChain([]string{ + invalidPEMFile.Name(), + "../test/hierarchy/root-x2.cert.pem", + }) + test.AssertError(t, err, "Should reject unloadable chain") +} + +func TestLoadChain_InvalidSig(t *testing.T) { + _, err := LoadChain([]string{ + "../test/hierarchy/int-e1.cert.pem", + "../test/hierarchy/root-x1.cert.pem", + }) + test.AssertError(t, err, "Should reject invalid signature") + test.Assert(t, strings.Contains(err.Error(), "root-x1.cert.pem"), + fmt.Sprintf("Expected error to mention filename, got: %s", err)) + test.Assert(t, strings.Contains(err.Error(), "signature from \"CN=(TEST) Ineffable Ice X1"), + fmt.Sprintf("Expected error to mention subject, got: %s", err)) +} diff --git a/link.sh b/link.sh deleted file mode 100755 index 77344d224cf..00000000000 --- a/link.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -# -# Symlink the various boulder subcommands into place. -# -BINDIR="$PWD/bin" -for n in `"${BINDIR}/boulder" --list` ; do - ln -sf boulder "${BINDIR}/$n" -done diff --git a/linter/linter.go b/linter/linter.go index 1bf04fef2b3..522dd5ee5a6 100644 --- a/linter/linter.go +++ b/linter/linter.go @@ -1,6 +1,7 @@ package linter import ( + "bytes" "crypto" "crypto/ecdsa" "crypto/rand" @@ -13,34 +14,66 @@ import ( "github.com/zmap/zlint/v3" "github.com/zmap/zlint/v3/lint" - _ "github.com/letsencrypt/boulder/linter/lints/all" - _ "github.com/letsencrypt/boulder/linter/lints/intermediate" - _ "github.com/letsencrypt/boulder/linter/lints/root" - _ "github.com/letsencrypt/boulder/linter/lints/subscriber" + "github.com/letsencrypt/boulder/core" + + _ "github.com/letsencrypt/boulder/linter/lints/cabf_br" + _ "github.com/letsencrypt/boulder/linter/lints/chrome" + _ "github.com/letsencrypt/boulder/linter/lints/cpcps" + _ "github.com/letsencrypt/boulder/linter/lints/rfc" ) +var ErrLinting = fmt.Errorf("failed lint(s)") + // Check accomplishes the entire process of linting: it generates a throwaway -// signing key, uses that to create a throwaway cert, and runs a default set -// of lints (everything except for the ETSI and EV lints) against it. This is -// the primary public interface of this package, but it can be inefficient; -// creating a new signer and a new lint registry are expensive operations which -// performance-sensitive clients may want to cache. -func Check(tbs *x509.Certificate, subjectPubKey crypto.PublicKey, realIssuer *x509.Certificate, realSigner crypto.Signer, skipLints []string) error { - linter, err := New(realIssuer, realSigner, skipLints) +// signing key, uses that to create a linting cert, and runs a default set of +// lints (everything except for the ETSI and EV lints) against it. If the +// subjectPubKey and realSigner indicate that this is a self-signed cert, the +// cert will have its pubkey replaced to also be self-signed. This is the +// primary public interface of this package, but it can be inefficient; creating +// a new signer and a new lint registry are expensive operations which +// performance-sensitive clients may want to cache via linter.New(). +func Check(tbs *x509.Certificate, subjectPubKey crypto.PublicKey, realIssuer *x509.Certificate, realSigner crypto.Signer, skipLints []string) ([]byte, error) { + linter, err := New(realIssuer, realSigner) + if err != nil { + return nil, err + } + + reg, err := NewRegistry(skipLints) + if err != nil { + return nil, err + } + + lintCertBytes, err := linter.Check(tbs, subjectPubKey, reg) + if err != nil { + return nil, err + } + + return lintCertBytes, nil +} + +// CheckCRL is like Check, but for CRLs. +func CheckCRL(tbs *x509.RevocationList, realIssuer *x509.Certificate, realSigner crypto.Signer, skipLints []string) error { + linter, err := New(realIssuer, realSigner) if err != nil { return err } - return linter.Check(tbs, subjectPubKey) + + reg, err := NewRegistry(skipLints) + if err != nil { + return err + } + + return linter.CheckCRL(tbs, reg) } // Linter is capable of linting a to-be-signed (TBS) certificate. It does so by // signing that certificate with a throwaway private key and a fake issuer whose // public key matches the throwaway private key, and then running the resulting -// throwaway certificate through a registry of zlint lints. +// certificate through a registry of zlint lints. type Linter struct { - issuer *x509.Certificate - signer crypto.Signer - registry lint.Registry + issuer *x509.Certificate + signer crypto.Signer + realPubKey crypto.PublicKey } // New constructs a Linter. It uses the provided real certificate and signer @@ -48,7 +81,7 @@ type Linter struct { // be used to sign the lint certificate. It uses the provided list of lint names // to skip to filter the zlint global registry to only those lints which should // be run. -func New(realIssuer *x509.Certificate, realSigner crypto.Signer, skipLints []string) (*Linter, error) { +func New(realIssuer *x509.Certificate, realSigner crypto.Signer) (*Linter, error) { lintSigner, err := makeSigner(realSigner) if err != nil { return nil, err @@ -57,22 +90,50 @@ func New(realIssuer *x509.Certificate, realSigner crypto.Signer, skipLints []str if err != nil { return nil, err } - reg, err := makeRegistry(skipLints) + return &Linter{lintIssuer, lintSigner, realSigner.Public()}, nil +} + +// Check signs the given TBS certificate using the Linter's fake issuer cert and +// private key, then runs the resulting certificate through all lints in reg. +// If the subjectPubKey is identical to the public key of the real signer +// used to create this linter, then the throwaway cert will have its pubkey +// replaced with the linter's pubkey so that it appears self-signed. It returns +// an error if any lint fails. On success it also returns the DER bytes of the +// linting certificate. +func (l Linter) Check(tbs *x509.Certificate, subjectPubKey crypto.PublicKey, reg lint.Registry) ([]byte, error) { + lintPubKey := subjectPubKey + selfSigned, err := core.PublicKeysEqual(subjectPubKey, l.realPubKey) + if err != nil { + return nil, err + } + if selfSigned { + lintPubKey = l.signer.Public() + } + + lintCertBytes, cert, err := makeLintCert(tbs, lintPubKey, l.issuer, l.signer) if err != nil { return nil, err } - return &Linter{lintIssuer, lintSigner, reg}, nil + + lintRes := zlint.LintCertificateEx(cert, reg) + err = ProcessResultSet(lintRes) + if err != nil { + return nil, err + } + + return lintCertBytes, nil } -// Check signs the given TBS certificate using the Linter's fake issuer cert and -// private key, then runs the resulting certificate through all non-filtered -// lints. It returns an error if any lint fails. -func (l Linter) Check(tbs *x509.Certificate, subjectPubKey crypto.PublicKey) error { - cert, err := makeLintCert(tbs, subjectPubKey, l.issuer, l.signer) +// CheckCRL signs the given RevocationList template using the Linter's fake +// issuer cert and private key, then runs the resulting CRL through all CRL +// lints in the registry. It returns an error if any check fails. +func (l Linter) CheckCRL(tbs *x509.RevocationList, reg lint.Registry) error { + crl, err := makeLintCRL(tbs, l.issuer, l.signer) if err != nil { return err } - return check(cert, l.registry) + lintRes := zlint.LintRevocationListEx(crl, reg) + return ProcessResultSet(lintRes) } func makeSigner(realSigner crypto.Signer) (crypto.Signer, error) { @@ -97,10 +158,17 @@ func makeSigner(realSigner crypto.Signer) (crypto.Signer, error) { func makeIssuer(realIssuer *x509.Certificate, lintSigner crypto.Signer) (*x509.Certificate, error) { lintIssuerTBS := &x509.Certificate{ - // This is the full list of attributes that x509.CreateCertificate() says it - // carries over from the template. Constructing this TBS certificate in - // this way ensures that the resulting lint issuer is as identical to the - // real issuer as we can get, without sharing a public key. + // This is nearly the full list of attributes that + // x509.CreateCertificate() says it carries over from the template. + // Constructing this TBS certificate in this way ensures that the + // resulting lint issuer is as identical to the real issuer as we can + // get, without sharing a public key. + // + // We do not copy the SignatureAlgorithm field while constructing the + // lintIssuer because the lintIssuer is self-signed. Depending on the + // realIssuer, which could be either an intermediate or cross-signed + // intermediate, the SignatureAlgorithm of that certificate may differ + // from the root certificate that had signed it. AuthorityKeyId: realIssuer.AuthorityKeyId, BasicConstraintsValid: realIssuer.BasicConstraintsValid, CRLDistributionPoints: realIssuer.CRLDistributionPoints, @@ -126,9 +194,8 @@ func makeIssuer(realIssuer *x509.Certificate, lintSigner crypto.Signer) (*x509.C PermittedEmailAddresses: realIssuer.PermittedEmailAddresses, PermittedIPRanges: realIssuer.PermittedIPRanges, PermittedURIDomains: realIssuer.PermittedURIDomains, - PolicyIdentifiers: realIssuer.PolicyIdentifiers, + Policies: realIssuer.Policies, SerialNumber: realIssuer.SerialNumber, - SignatureAlgorithm: realIssuer.SignatureAlgorithm, Subject: realIssuer.Subject, SubjectKeyId: realIssuer.SubjectKeyId, URIs: realIssuer.URIs, @@ -145,7 +212,9 @@ func makeIssuer(realIssuer *x509.Certificate, lintSigner crypto.Signer) (*x509.C return lintIssuer, nil } -func makeRegistry(skipLints []string) (lint.Registry, error) { +// NewRegistry returns a zlint Registry with irrelevant (ETSI, EV) lints +// excluded. This registry also includes all custom lints defined in Boulder. +func NewRegistry(skipLints []string) (lint.Registry, error) { reg, err := lint.GlobalRegistry().Filter(lint.FilterOptions{ ExcludeNames: skipLints, ExcludeSources: []lint.LintSource{ @@ -162,28 +231,49 @@ func makeRegistry(skipLints []string) (lint.Registry, error) { return reg, nil } -func makeLintCert(tbs *x509.Certificate, subjectPubKey crypto.PublicKey, issuer *x509.Certificate, signer crypto.Signer) (*zlintx509.Certificate, error) { +func makeLintCert(tbs *x509.Certificate, subjectPubKey crypto.PublicKey, issuer *x509.Certificate, signer crypto.Signer) ([]byte, *zlintx509.Certificate, error) { lintCertBytes, err := x509.CreateCertificate(rand.Reader, tbs, issuer, subjectPubKey, signer) if err != nil { - return nil, fmt.Errorf("failed to create lint certificate: %w", err) + return nil, nil, fmt.Errorf("failed to create lint certificate: %w", err) } lintCert, err := zlintx509.ParseCertificate(lintCertBytes) if err != nil { - return nil, fmt.Errorf("failed to parse lint certificate: %w", err) + return nil, nil, fmt.Errorf("failed to parse lint certificate: %w", err) + } + // RFC 5280, Sections 4.1.2.6 and 8 + // + // When the subject of the certificate is a CA, the subject + // field MUST be encoded in the same way as it is encoded in the + // issuer field (Section 4.1.2.4) in all certificates issued by + // the subject CA. + if !bytes.Equal(issuer.RawSubject, lintCert.RawIssuer) { + return nil, nil, fmt.Errorf("mismatch between lint issuer RawSubject and lintCert.RawIssuer DER bytes: \"%x\" != \"%x\"", issuer.RawSubject, lintCert.RawIssuer) } - return lintCert, nil + + return lintCertBytes, lintCert, nil } -func check(lintCert *zlintx509.Certificate, lints lint.Registry) error { - lintRes := zlint.LintCertificateEx(lintCert, lints) +func ProcessResultSet(lintRes *zlint.ResultSet) error { if lintRes.NoticesPresent || lintRes.WarningsPresent || lintRes.ErrorsPresent || lintRes.FatalsPresent { var failedLints []string for lintName, result := range lintRes.Results { if result.Status > lint.Pass { - failedLints = append(failedLints, lintName) + failedLints = append(failedLints, fmt.Sprintf("%s (%s)", lintName, result.Details)) } } - return fmt.Errorf("failed lints: %s", strings.Join(failedLints, ", ")) + return fmt.Errorf("%w: %s", ErrLinting, strings.Join(failedLints, ", ")) } return nil } + +func makeLintCRL(tbs *x509.RevocationList, issuer *x509.Certificate, signer crypto.Signer) (*zlintx509.RevocationList, error) { + lintCRLBytes, err := x509.CreateRevocationList(rand.Reader, tbs, issuer, signer) + if err != nil { + return nil, err + } + lintCRL, err := zlintx509.ParseRevocationList(lintCRLBytes) + if err != nil { + return nil, err + } + return lintCRL, nil +} diff --git a/linter/linter_test.go b/linter/linter_test.go index 5b2c06eb9b9..7f759629a51 100644 --- a/linter/linter_test.go +++ b/linter/linter_test.go @@ -6,13 +6,14 @@ import ( "crypto/elliptic" "crypto/rsa" "math/big" + "strings" "testing" "github.com/letsencrypt/boulder/test" ) func TestMakeSigner_RSA(t *testing.T) { - rsaMod, ok := big.NewInt(0).SetString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16) + rsaMod, ok := big.NewInt(0).SetString(strings.Repeat("ff", 128), 16) test.Assert(t, ok, "failed to set RSA mod") realSigner := &rsa.PrivateKey{ PublicKey: rsa.PublicKey{ diff --git a/linter/lints/all/w_validity_period_has_extra_second.go b/linter/lints/all/w_validity_period_has_extra_second.go deleted file mode 100644 index 9e62cff4aaa..00000000000 --- a/linter/lints/all/w_validity_period_has_extra_second.go +++ /dev/null @@ -1,43 +0,0 @@ -package subscriber - -import ( - "time" - - "github.com/zmap/zcrypto/x509" - "github.com/zmap/zlint/v3/lint" - - "github.com/letsencrypt/boulder/linter/lints" -) - -type certValidityNotRound struct{} - -func init() { - lint.RegisterLint(&lint.Lint{ - Name: "w_validity_period_has_extra_second", - Description: "Let's Encrypt Certificates have Validity Periods that are a round number of seconds", - Citation: "CPS: 7.1", - Source: lints.LetsEncryptCPSAll, - EffectiveDate: lints.CPSV33Date, - Lint: NewCertValidityNotRound, - }) -} - -func NewCertValidityNotRound() lint.LintInterface { - return &certValidityNotRound{} -} - -func (l *certValidityNotRound) CheckApplies(c *x509.Certificate) bool { - return true -} - -func (l *certValidityNotRound) Execute(c *x509.Certificate) *lint.LintResult { - // RFC 5280 4.1.2.5: "The validity period for a certificate is the period - // of time from notBefore through notAfter, inclusive." - certValidity := c.NotAfter.Add(time.Second).Sub(c.NotBefore) - - if certValidity%60 == 0 { - return &lint.LintResult{Status: lint.Pass} - } - - return &lint.LintResult{Status: lint.Error} -} diff --git a/linter/lints/cabf_br/lint_crl_acceptable_reason_codes.go b/linter/lints/cabf_br/lint_crl_acceptable_reason_codes.go new file mode 100644 index 00000000000..13b63d2b4af --- /dev/null +++ b/linter/lints/cabf_br/lint_crl_acceptable_reason_codes.go @@ -0,0 +1,69 @@ +package cabfbr + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints" +) + +type crlAcceptableReasonCodes struct{} + +/************************************************ +Baseline Requirements: 7.2.2.1: +The CRLReason indicated MUST NOT be unspecified (0). +The CRLReason MUST NOT be certificateHold (6). + +When the CRLReason code is not one of the following, then the reasonCode extension MUST NOT be provided: +- keyCompromise (RFC 5280 CRLReason #1); +- privilegeWithdrawn (RFC 5280 CRLReason #9); +- cessationOfOperation (RFC 5280 CRLReason #5); +- affiliationChanged (RFC 5280 CRLReason #3); or +- superseded (RFC 5280 CRLReason #4). +************************************************/ + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_crl_acceptable_reason_codes", + Description: "CRL entry Reason Codes must be 1, 3, 4, 5, or 9", + Citation: "BRs: 7.2.2.1", + Source: lint.CABFBaselineRequirements, + // We use the Mozilla Root Store Policy v2.8.1 effective date here + // because, although this lint enforces requirements from the BRs, those + // same requirements were in the MRSP first. + EffectiveDate: lints.MozillaPolicy281Date, + }, + Lint: NewCrlAcceptableReasonCodes, + }) +} + +func NewCrlAcceptableReasonCodes() lint.RevocationListLintInterface { + return &crlAcceptableReasonCodes{} +} + +func (l *crlAcceptableReasonCodes) CheckApplies(c *x509.RevocationList) bool { + return true +} + +func (l *crlAcceptableReasonCodes) Execute(c *x509.RevocationList) *lint.LintResult { + for _, rc := range c.RevokedCertificates { + if rc.ReasonCode == nil { + continue + } + switch *rc.ReasonCode { + case 1: // keyCompromise + case 3: // affiliationChanged + case 4: // superseded + case 5: // cessationOfOperation + case 9: // privilegeWithdrawn + continue + default: + return &lint.LintResult{ + Status: lint.Error, + Details: "CRLs MUST NOT include reasonCodes other than 1, 3, 4, 5, and 9", + } + } + } + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/cabf_br/lint_crl_acceptable_reason_codes_test.go b/linter/lints/cabf_br/lint_crl_acceptable_reason_codes_test.go new file mode 100644 index 00000000000..1ab8f08ab4c --- /dev/null +++ b/linter/lints/cabf_br/lint_crl_acceptable_reason_codes_test.go @@ -0,0 +1,87 @@ +package cabfbr + +import ( + "fmt" + "strings" + "testing" + + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints/test" +) + +func TestCrlAcceptableReasonCodes(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + want lint.LintStatus + wantSubStr string + }{ + { + // crl_good.pem contains a revocation entry with no reason code extension. + name: "good", + want: lint.Pass, + }, + { + name: "reason_0", + want: lint.Error, + wantSubStr: "MUST NOT include reasonCodes other than", + }, + { + name: "reason_1", + want: lint.Pass, + }, + { + name: "reason_2", + want: lint.Error, + wantSubStr: "MUST NOT include reasonCodes other than", + }, + { + name: "reason_3", + want: lint.Pass, + }, + { + name: "reason_4", + want: lint.Pass, + }, + { + name: "reason_5", + want: lint.Pass, + }, + { + name: "reason_6", + want: lint.Error, + wantSubStr: "MUST NOT include reasonCodes other than", + }, + { + name: "reason_8", + want: lint.Error, + wantSubStr: "MUST NOT include reasonCodes other than", + }, + { + name: "reason_9", + want: lint.Pass, + }, + { + name: "reason_10", + want: lint.Error, + wantSubStr: "MUST NOT include reasonCodes other than", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + l := NewCrlAcceptableReasonCodes() + c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name)) + r := l.Execute(c) + + if r.Status != tc.want { + t.Errorf("expected %q, got %q", tc.want, r.Status) + } + if !strings.Contains(r.Details, tc.wantSubStr) { + t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details) + } + }) + } +} diff --git a/linter/lints/cabf_br/lint_crl_no_critical_reason_codes.go b/linter/lints/cabf_br/lint_crl_no_critical_reason_codes.go new file mode 100644 index 00000000000..c1950ab01d0 --- /dev/null +++ b/linter/lints/cabf_br/lint_crl_no_critical_reason_codes.go @@ -0,0 +1,51 @@ +package cabfbr + +import ( + "github.com/zmap/zcrypto/encoding/asn1" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" +) + +type crlCriticalReasonCodes struct{} + +/************************************************ +Baseline Requirements: 7.2.2.1: +If present, [the reasonCode] extension MUST NOT be marked critical. +************************************************/ + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_crl_no_critical_reason_codes", + Description: "CRL entry reasonCode extension MUST NOT be marked critical", + Citation: "BRs: 7.2.2.1", + Source: lint.CABFBaselineRequirements, + EffectiveDate: util.CABFBRs_1_8_0_Date, + }, + Lint: NewCrlCriticalReasonCodes, + }) +} + +func NewCrlCriticalReasonCodes() lint.RevocationListLintInterface { + return &crlCriticalReasonCodes{} +} + +func (l *crlCriticalReasonCodes) CheckApplies(c *x509.RevocationList) bool { + return true +} + +func (l *crlCriticalReasonCodes) Execute(c *x509.RevocationList) *lint.LintResult { + reasonCodeOID := asn1.ObjectIdentifier{2, 5, 29, 21} // id-ce-reasonCode + for _, rc := range c.RevokedCertificates { + for _, ext := range rc.Extensions { + if ext.Id.Equal(reasonCodeOID) && ext.Critical { + return &lint.LintResult{ + Status: lint.Error, + Details: "CRL entry reasonCode extension MUST NOT be marked critical", + } + } + } + } + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/cabf_br/lint_crl_no_critical_reason_codes_test.go b/linter/lints/cabf_br/lint_crl_no_critical_reason_codes_test.go new file mode 100644 index 00000000000..8dc6d95faf5 --- /dev/null +++ b/linter/lints/cabf_br/lint_crl_no_critical_reason_codes_test.go @@ -0,0 +1,46 @@ +package cabfbr + +import ( + "fmt" + "strings" + "testing" + + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints/test" +) + +func TestCrlCriticalReasonCodes(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + want lint.LintStatus + wantSubStr string + }{ + { + name: "good", + want: lint.Pass, + }, + { + name: "critical_reason", + want: lint.Error, + wantSubStr: "reasonCode extension MUST NOT be marked critical", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + l := NewCrlCriticalReasonCodes() + c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name)) + r := l.Execute(c) + + if r.Status != tc.want { + t.Errorf("expected %q, got %q", tc.want, r.Status) + } + if !strings.Contains(r.Details, tc.wantSubStr) { + t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details) + } + }) + } +} diff --git a/linter/lints/cabf_br/lint_crl_validity_period.go b/linter/lints/cabf_br/lint_crl_validity_period.go new file mode 100644 index 00000000000..853e8376f97 --- /dev/null +++ b/linter/lints/cabf_br/lint_crl_validity_period.go @@ -0,0 +1,141 @@ +package cabfbr + +import ( + "fmt" + "time" + + "github.com/letsencrypt/boulder/linter/lints" + "github.com/zmap/zcrypto/encoding/asn1" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" + "golang.org/x/crypto/cryptobyte" + + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" +) + +type crlValidityPeriod struct{} + +/************************************************ +Baseline Requirements, Section 4.9.7: +* For the status of Subscriber Certificates [...] the value of the nextUpdate + field MUST NOT be more than ten days beyond the value of the thisUpdate field. +* For the status of Subordinate CA Certificates [...]. The value of the + nextUpdate field MUST NOT be more than twelve months beyond the value of the + thisUpdatefield. +************************************************/ + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_crl_validity_period", + Description: "Let's Encrypt CRLs must have an acceptable validity period", + Citation: "BRs: 4.9.7", + Source: lint.CABFBaselineRequirements, + EffectiveDate: util.CABFBRs_1_2_1_Date, + }, + Lint: NewCrlValidityPeriod, + }) +} + +func NewCrlValidityPeriod() lint.RevocationListLintInterface { + return &crlValidityPeriod{} +} + +func (l *crlValidityPeriod) CheckApplies(c *x509.RevocationList) bool { + return true +} + +func (l *crlValidityPeriod) Execute(c *x509.RevocationList) *lint.LintResult { + /* + Let's Encrypt issues two kinds of CRLs: + + 1) CRLs containing subscriber certificates, created by crl-updater. + These assert the distributionPoint and onlyContainsUserCerts + boolean. + 2) CRLs containing issuer CRLs, created by the ceremony tool. These + assert the onlyContainsCACerts boolean. + + We use the presence of these booleans to determine which BR-mandated + lifetime to enforce. + */ + + // The only way to determine which type of CRL we're dealing with. The + // issuingDistributionPoint must be parsed and the internal fields + // inspected. + idpOID := asn1.ObjectIdentifier{2, 5, 29, 28} // id-ce-issuingDistributionPoint + idpe := lints.GetExtWithOID(c.Extensions, idpOID) + if idpe == nil { + return &lint.LintResult{ + Status: lint.Warn, + Details: "CRL missing IssuingDistributionPoint", + } + } + + // Step inside the outer issuingDistributionPoint sequence to get access to + // its constituent fields. + idpv := cryptobyte.String(idpe.Value) + if !idpv.ReadASN1(&idpv, cryptobyte_asn1.SEQUENCE) { + return &lint.LintResult{ + Status: lint.Warn, + Details: "Failed to read IssuingDistributionPoint distributionPoint", + } + } + + // Throw distributionPoint away. + distributionPointTag := cryptobyte_asn1.Tag(0).ContextSpecific().Constructed() + _ = idpv.SkipOptionalASN1(distributionPointTag) + + // Parse IssuingDistributionPoint OPTIONAL BOOLEANS to eventually perform + // sanity checks. + idp := lints.NewIssuingDistributionPoint() + onlyContainsUserCertsTag := cryptobyte_asn1.Tag(1).ContextSpecific() + if !lints.ReadOptionalASN1BooleanWithTag(&idpv, &idp.OnlyContainsUserCerts, onlyContainsUserCertsTag, false) { + return &lint.LintResult{ + Status: lint.Warn, + Details: "Failed to read IssuingDistributionPoint onlyContainsUserCerts", + } + } + + onlyContainsCACertsTag := cryptobyte_asn1.Tag(2).ContextSpecific() + if !lints.ReadOptionalASN1BooleanWithTag(&idpv, &idp.OnlyContainsCACerts, onlyContainsCACertsTag, false) { + return &lint.LintResult{ + Status: lint.Warn, + Details: "Failed to read IssuingDistributionPoint onlyContainsCACerts", + } + } + + // Basic sanity check so that later on we can determine what type of CRL we + // issued based on the presence of one of these fields. If both fields exist + // then 1) it's a problem and 2) the real validity period is unknown. + if idp.OnlyContainsUserCerts && idp.OnlyContainsCACerts { + return &lint.LintResult{ + Status: lint.Error, + Details: "IssuingDistributionPoint should not have both onlyContainsUserCerts: TRUE and onlyContainsCACerts: TRUE", + } + } + + // Default to subscriber cert CRL. + var BRValidity = 10 * 24 * time.Hour + var validityString = "10 days" + if idp.OnlyContainsCACerts { + BRValidity = 365 * lints.BRDay + validityString = "365 days" + } + + parsedValidity := c.NextUpdate.Sub(c.ThisUpdate) + if parsedValidity <= 0 { + return &lint.LintResult{ + Status: lint.Error, + Details: "CRL has NextUpdate at or before ThisUpdate", + } + } + + if parsedValidity > BRValidity { + return &lint.LintResult{ + Status: lint.Error, + Details: fmt.Sprintf("CRL has validity period greater than %s", validityString), + } + } + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/cabf_br/lint_crl_validity_period_test.go b/linter/lints/cabf_br/lint_crl_validity_period_test.go new file mode 100644 index 00000000000..39e16ff8034 --- /dev/null +++ b/linter/lints/cabf_br/lint_crl_validity_period_test.go @@ -0,0 +1,83 @@ +package cabfbr + +import ( + "fmt" + "strings" + "testing" + + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints/test" +) + +func TestCrlValidityPeriod(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + want lint.LintStatus + wantSubStr string + }{ + { + name: "good", // CRL for subscriber certs + want: lint.Pass, + }, + { + name: "good_subordinate_ca", + want: lint.Pass, + }, + { + name: "idp_distributionPoint_and_onlyUser_and_onlyCA", // What type of CRL is it (besides horrible)?!!??! + want: lint.Error, + wantSubStr: "IssuingDistributionPoint should not have both onlyContainsUserCerts: TRUE and onlyContainsCACerts: TRUE", + }, + { + name: "negative_validity", + want: lint.Warn, + wantSubStr: "CRL missing IssuingDistributionPoint", + }, + { + name: "negative_validity_subscriber_cert", + want: lint.Error, + wantSubStr: "at or before", + }, + { + name: "negative_validity_subordinate_ca", + want: lint.Error, + wantSubStr: "at or before", + }, + { + name: "long_validity_subscriber_cert", // 10 days + 1 second + want: lint.Error, + wantSubStr: "CRL has validity period greater than 10 days", + }, + { + name: "long_validity_subordinate_ca", // 1 year + 1 second + want: lint.Error, + wantSubStr: "CRL has validity period greater than 365 days", + }, + { + // Technically this CRL is incorrect because Let's Encrypt does not + // (yet) issue CRLs containing both the distributionPoint and + // optional onlyContainsCACerts boolean, but we're still parsing the + // correct BR validity in this lint. + name: "long_validity_distributionPoint_and_subordinate_ca", + want: lint.Pass, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + l := NewCrlValidityPeriod() + c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name)) + r := l.Execute(c) + + if r.Status != tc.want { + t.Errorf("expected %q, got %q", tc.want, r.Status) + } + if !strings.Contains(r.Details, tc.wantSubStr) { + t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details) + } + }) + } +} diff --git a/linter/lints/cabf_br/testdata/crl_critical_reason.pem b/linter/lints/cabf_br/testdata/crl_critical_reason.pem new file mode 100644 index 00000000000..91f0732e076 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_critical_reason.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBVjCB3gIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjAsMCoCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMA8wDQYDVR0VAQH/BAMKAQGgNjA0MB8GA1UdIwQYMBaAFAHa +u3rLJSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQD +AwNnADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD +6rFQsHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDq +KD2E4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_good.pem b/linter/lints/cabf_br/testdata/crl_good.pem new file mode 100644 index 00000000000..8b383d0a07e --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_good.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBmDCCAR8CAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMTAxMDIwMTIwN1oXDTIyMTAxOTIwMTIwNlowKTAnAggDrlHbURVaPBcN +MjIxMDEwMTkxMjA3WjAMMAoGA1UdFQQDCgEBoHoweDAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFxzOPeSCumEwQgYDVR0cAQH/BDgw +NqAxoC+GLWh0dHA6Ly9jLmJvdWxkZXIudGVzdC82NjI4Mzc1NjkxMzU4ODI4OC8w +LmNybIEB/zAKBggqhkjOPQQDAwNnADBkAjAvDkIUnTYavJ6h8606MDyFh2uw/cF+ +OVnM4sE8nUdGy0XYg0hGfbR4MY+kRxRQayICMFeQPpcpIr0zgXpP6lUXU0rcLSva +tuaeQSVr24nGjZ7Py0vc94w0n7idZ8wje5+/Mw== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_good_subordinate_ca.pem b/linter/lints/cabf_br/testdata/crl_good_subordinate_ca.pem new file mode 100644 index 00000000000..a476c16fdfd --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_good_subordinate_ca.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBZDCB7AIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIxMDEwMjAxMjA3WhcNMjIxMDE5MjAxMjA2WjApMCcCCAOuUdtRFVo8Fw0y +MjEwMTAxOTEyMDdaMAwwCgYDVR0VBAMKAQGgRzBFMB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggXHM495IK6YTAPBgNVHRwBAf8EBTAD +ggH/MAoGCCqGSM49BAMDA2cAMGQCMC8OQhSdNhq8nqHzrTowPIWHa7D9wX45Wczi +wTydR0bLRdiDSEZ9tHgxj6RHFFBrIgIwV5A+lykivTOBek/qVRdTStwtK9q25p5B +JWvbicaNns/LS9z3jDSfuJ1nzCN7n78z +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_idp_distributionPoint_and_onlyUser_and_onlyCA.pem b/linter/lints/cabf_br/testdata/crl_idp_distributionPoint_and_onlyUser_and_onlyCA.pem new file mode 100644 index 00000000000..2513e3c7f89 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_idp_distributionPoint_and_onlyUser_and_onlyCA.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBmzCCASICAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMTAxMDIwMTIwN1oXDTIyMTAxOTIwMTIwNlowKTAnAggDrlHbURVaPBcN +MjIxMDEwMTkxMjA3WjAMMAoGA1UdFQQDCgEBoH0wezAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFxzOPeSCumEwRQYDVR0cAQH/BDsw +OaAxoC+GLWh0dHA6Ly9jLmJvdWxkZXIudGVzdC82NjI4Mzc1NjkxMzU4ODI4OC8w +LmNybIEB/4IB/zAKBggqhkjOPQQDAwNnADBkAjAvDkIUnTYavJ6h8606MDyFh2uw +/cF+OVnM4sE8nUdGy0XYg0hGfbR4MY+kRxRQayICMFeQPpcpIr0zgXpP6lUXU0rc +LSvatuaeQSVr24nGjZ7Py0vc94w0n7idZ8wje5+/Mw== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_long_validity.pem b/linter/lints/cabf_br/testdata/crl_long_validity.pem new file mode 100644 index 00000000000..cb745bfa71a --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_long_validity.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE2MTY0MzM5WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQGgNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNn +ADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQ +sHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E +4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_long_validity_distributionPoint_and_subordinate_ca.pem b/linter/lints/cabf_br/testdata/crl_long_validity_distributionPoint_and_subordinate_ca.pem new file mode 100644 index 00000000000..50b194c9c78 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_long_validity_distributionPoint_and_subordinate_ca.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBmDCCAR8CAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMTAxMDIwMTIwN1oXDTIyMTAxOTIwMTIwNlowKTAnAggDrlHbURVaPBcN +MjIxMDEwMTkxMjA3WjAMMAoGA1UdFQQDCgEBoHoweDAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFxzOPeSCumEwQgYDVR0cAQH/BDgw +NqAxoC+GLWh0dHA6Ly9jLmJvdWxkZXIudGVzdC82NjI4Mzc1NjkxMzU4ODI4OC8w +LmNybIIB/zAKBggqhkjOPQQDAwNnADBkAjAvDkIUnTYavJ6h8606MDyFh2uw/cF+ +OVnM4sE8nUdGy0XYg0hGfbR4MY+kRxRQayICMFeQPpcpIr0zgXpP6lUXU0rcLSva +tuaeQSVr24nGjZ7Py0vc94w0n7idZ8wje5+/Mw== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_long_validity_subordinate_ca.pem b/linter/lints/cabf_br/testdata/crl_long_validity_subordinate_ca.pem new file mode 100644 index 00000000000..b4210863215 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_long_validity_subordinate_ca.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBZDCB7AIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjMwNzE2MTY0MzM5WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQGgRzBFMB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAPBgNVHRwBAf8EBTAD +ggH/MAoGCCqGSM49BAMDA2cAMGQCMFayE0WLrRoxaXzYbdPAi7AEEr53OIulDND4 +vPlN0/A0RyJiIrgfXEPqsVCweqSoQQIwW7hgsE6Ke7wnxjuxc+jdK7iEyJxbbegQ +0eYs1lDH112u5l4UkOooPYThzlkcUdNC +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_long_validity_subscriber_cert.pem b/linter/lints/cabf_br/testdata/crl_long_validity_subscriber_cert.pem new file mode 100644 index 00000000000..0a0b36112f3 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_long_validity_subscriber_cert.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBmDCCAR8CAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMDcwNjE2NDMzOFoXDTIyMDcxNjE2NDMzOVowKTAnAggDrlHbURVaPBcN +MjIwNzA2MTU0MzM4WjAMMAoGA1UdFQQDCgEBoHoweDAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFv9LJt+yGA8wQgYDVR0cAQH/BDgw +NqAxoC+GLWh0dHA6Ly9jLmJvdWxkZXIudGVzdC82NjI4Mzc1NjkxMzU4ODI4OC8w +LmNybIEB/zAKBggqhkjOPQQDAwNnADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziL +pQzQ+Lz5TdPwNEciYiK4H1xD6rFQsHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMic +W23oENHmLNZQx9ddruZeFJDqKD2E4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_negative_validity.pem b/linter/lints/cabf_br/testdata/crl_negative_validity.pem new file mode 100644 index 00000000000..fc16812d6f4 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_negative_validity.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzA2MTY0MzM3WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQGgNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNn +ADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQ +sHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E +4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_negative_validity_subordinate_ca.pem b/linter/lints/cabf_br/testdata/crl_negative_validity_subordinate_ca.pem new file mode 100644 index 00000000000..e13ef6bfb25 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_negative_validity_subordinate_ca.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBZDCB7AIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzA2MTY0MzM3WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQGgRzBFMB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAPBgNVHRwBAf8EBTAD +ggH/MAoGCCqGSM49BAMDA2cAMGQCMFayE0WLrRoxaXzYbdPAi7AEEr53OIulDND4 +vPlN0/A0RyJiIrgfXEPqsVCweqSoQQIwW7hgsE6Ke7wnxjuxc+jdK7iEyJxbbegQ +0eYs1lDH112u5l4UkOooPYThzlkcUdNC +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_negative_validity_subscriber_cert.pem b/linter/lints/cabf_br/testdata/crl_negative_validity_subscriber_cert.pem new file mode 100644 index 00000000000..d41cedf2916 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_negative_validity_subscriber_cert.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBmDCCAR8CAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMDcwNjE2NDMzOFoXDTIyMDcwNjE2NDMzN1owKTAnAggDrlHbURVaPBcN +MjIwNzA2MTU0MzM4WjAMMAoGA1UdFQQDCgEBoHoweDAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFv9LJt+yGA8wQgYDVR0cAQH/BDgw +NqAxoC+GLWh0dHA6Ly9jLmJvdWxkZXIudGVzdC82NjI4Mzc1NjkxMzU4ODI4OC8w +LmNybIEB/zAKBggqhkjOPQQDAwNnADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziL +pQzQ+Lz5TdPwNEciYiK4H1xD6rFQsHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMic +W23oENHmLNZQx9ddruZeFJDqKD2E4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_reason_0.pem b/linter/lints/cabf_br/testdata/crl_reason_0.pem new file mode 100644 index 00000000000..308fd94d90a --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_reason_0.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQCgNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNn +ADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQ +sHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E +4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_reason_1.pem b/linter/lints/cabf_br/testdata/crl_reason_1.pem new file mode 100644 index 00000000000..0331fa9a881 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_reason_1.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQGgNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNn +ADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQ +sHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E +4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_reason_10.pem b/linter/lints/cabf_br/testdata/crl_reason_10.pem new file mode 100644 index 00000000000..86c79191681 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_reason_10.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQqgNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNn +ADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQ +sHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E +4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_reason_2.pem b/linter/lints/cabf_br/testdata/crl_reason_2.pem new file mode 100644 index 00000000000..bbeaaee00f5 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_reason_2.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQKgNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNn +ADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQ +sHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E +4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_reason_3.pem b/linter/lints/cabf_br/testdata/crl_reason_3.pem new file mode 100644 index 00000000000..66d2fae7d4c --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_reason_3.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQOgNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNn +ADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQ +sHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E +4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_reason_4.pem b/linter/lints/cabf_br/testdata/crl_reason_4.pem new file mode 100644 index 00000000000..62e2d14565e --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_reason_4.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQSgNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNn +ADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQ +sHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E +4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_reason_5.pem b/linter/lints/cabf_br/testdata/crl_reason_5.pem new file mode 100644 index 00000000000..879783e1b3d --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_reason_5.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQWgNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNn +ADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQ +sHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E +4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_reason_6.pem b/linter/lints/cabf_br/testdata/crl_reason_6.pem new file mode 100644 index 00000000000..cc91f53f379 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_reason_6.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQagNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNn +ADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQ +sHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E +4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_reason_8.pem b/linter/lints/cabf_br/testdata/crl_reason_8.pem new file mode 100644 index 00000000000..4d1ff3e8d04 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_reason_8.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQigNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNn +ADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQ +sHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E +4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cabf_br/testdata/crl_reason_9.pem b/linter/lints/cabf_br/testdata/crl_reason_9.pem new file mode 100644 index 00000000000..ae24a3d5f69 --- /dev/null +++ b/linter/lints/cabf_br/testdata/crl_reason_9.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQmgNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNn +ADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQ +sHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E +4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/chrome/e_scts_from_same_operator.go b/linter/lints/chrome/e_scts_from_same_operator.go new file mode 100644 index 00000000000..2b39baa43f6 --- /dev/null +++ b/linter/lints/chrome/e_scts_from_same_operator.go @@ -0,0 +1,99 @@ +package chrome + +import ( + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zcrypto/x509/ct" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" + + "github.com/letsencrypt/boulder/ctpolicy/loglist" + "github.com/letsencrypt/boulder/linter/lints" +) + +type sctsFromSameOperator struct { + logList loglist.List +} + +func init() { + lint.RegisterCertificateLint(&lint.CertificateLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_scts_from_same_operator", + Description: "Let's Encrypt Subscriber Certificates have two SCTs from logs run by different operators", + Citation: "Chrome CT Policy", + Source: lints.ChromeCTPolicy, + EffectiveDate: time.Date(2022, time.April, 15, 0, 0, 0, 0, time.UTC), + }, + Lint: NewSCTsFromSameOperator, + }) +} + +func NewSCTsFromSameOperator() lint.CertificateLintInterface { + return &sctsFromSameOperator{logList: loglist.GetLintList()} +} + +func (l *sctsFromSameOperator) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && !util.IsExtInCert(c, util.CtPoisonOID) +} + +func (l *sctsFromSameOperator) Execute(c *x509.Certificate) *lint.LintResult { + if len(l.logList) == 0 { + return &lint.LintResult{ + Status: lint.NE, + Details: "Failed to load log list, unable to check Certificate SCTs.", + } + } + + if len(c.SignedCertificateTimestampList) < 2 { + return &lint.LintResult{ + Status: lint.Error, + Details: "Certificate had too few embedded SCTs; browser policy requires 2.", + } + } + + logIDs := make(map[ct.SHA256Hash]struct{}) + for _, sct := range c.SignedCertificateTimestampList { + logIDs[sct.LogID] = struct{}{} + } + + if len(logIDs) < 2 { + return &lint.LintResult{ + Status: lint.Error, + Details: "Certificate SCTs from too few distinct logs; browser policy requires 2.", + } + } + + rfc6962Compliant := false + operatorNames := make(map[string]struct{}) + for logID := range logIDs { + log, err := l.logList.GetByID(logID.Base64String()) + if err != nil { + // This certificate *may* have more than 2 SCTs, so missing one now isn't + // a problem. + continue + } + if !log.Tiled { + rfc6962Compliant = true + } + operatorNames[log.Operator] = struct{}{} + } + + if !rfc6962Compliant { + return &lint.LintResult{ + Status: lint.Error, + Details: "At least one certificate SCT must be from an RFC6962-compliant log.", + } + } + + if len(operatorNames) < 2 { + return &lint.LintResult{ + Status: lint.Error, + Details: "Certificate SCTs from too few distinct log operators; browser policy requires 2.", + } + } + + return &lint.LintResult{ + Status: lint.Pass, + } +} diff --git a/linter/lints/common.go b/linter/lints/common.go index 4fef6c4c519..4efe482869d 100644 --- a/linter/lints/common.go +++ b/linter/lints/common.go @@ -1,9 +1,15 @@ package lints import ( + "bytes" + "net/url" "time" + "github.com/zmap/zcrypto/encoding/asn1" + "github.com/zmap/zcrypto/x509/pkix" "github.com/zmap/zlint/v3/lint" + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" ) const ( @@ -14,12 +20,115 @@ const ( BRDay time.Duration = 86400 * time.Second // Declare our own Sources for use in zlint registry filtering. - LetsEncryptCPSAll lint.LintSource = "LECPSAll" - LetsEncryptCPSIntermediate lint.LintSource = "LECPSIntermediate" - LetsEncryptCPSRoot lint.LintSource = "LECPSRoot" - LetsEncryptCPSSubscriber lint.LintSource = "LECPSSubscriber" + LetsEncryptCPS lint.LintSource = "LECPS" + ChromeCTPolicy lint.LintSource = "ChromeCT" ) var ( - CPSV33Date = time.Date(2021, time.June, 8, 0, 0, 0, 0, time.UTC) + CPSV33Date = time.Date(2021, time.June, 8, 0, 0, 0, 0, time.UTC) + MozillaPolicy281Date = time.Date(2023, time.February, 15, 0, 0, 0, 0, time.UTC) ) + +// IssuingDistributionPoint stores the IA5STRING value(s) of the optional +// distributionPoint, and the (implied OPTIONAL) BOOLEAN values of +// onlyContainsUserCerts and onlyContainsCACerts. +// +// RFC 5280 +// * Section 5.2.5 +// IssuingDistributionPoint ::= SEQUENCE { +// distributionPoint [0] DistributionPointName OPTIONAL, +// onlyContainsUserCerts [1] BOOLEAN DEFAULT FALSE, +// onlyContainsCACerts [2] BOOLEAN DEFAULT FALSE, +// ... +// } +// +// * Section 4.2.1.13 +// DistributionPointName ::= CHOICE { +// fullName [0] GeneralNames, +// ... } +// +// * Appendix A.1, Page 128 +// GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName +// GeneralName ::= CHOICE { +// ... +// uniformResourceIdentifier [6] IA5String, +// ... } +// +// Because this struct is used by cryptobyte (not by encoding/asn1), and because +// we only care about the uniformResourceIdentifier flavor of GeneralName, we +// are able to flatten the DistributionPointName down into a slice of URIs. +type IssuingDistributionPoint struct { + DistributionPointURIs []*url.URL + OnlyContainsUserCerts bool + OnlyContainsCACerts bool +} + +// NewIssuingDistributionPoint is a constructor which returns an +// IssuingDistributionPoint with each field set to zero values. +func NewIssuingDistributionPoint() *IssuingDistributionPoint { + return &IssuingDistributionPoint{} +} + +// GetExtWithOID is a helper for several of our custom lints. It returns the +// extension with the given OID if it exists, or nil otherwise. +func GetExtWithOID(exts []pkix.Extension, oid asn1.ObjectIdentifier) *pkix.Extension { + for _, ext := range exts { + if ext.Id.Equal(oid) { + return &ext + } + } + return nil +} + +// ReadOptionalASN1BooleanWithTag attempts to read and advance incoming to +// search for an optional DER-encoded ASN.1 element tagged with the given tag. +// Unless out is nil, it stores whether an element with the tag was found in +// out, otherwise out will take the default value. It reports whether all reads +// were successful. +func ReadOptionalASN1BooleanWithTag(incoming *cryptobyte.String, out *bool, tag cryptobyte_asn1.Tag, defaultValue bool) bool { + // ReadOptionalASN1 performs a peek and will not advance if the tag is + // missing, meaning that incoming will retain bytes. + var valuePresent bool + var valueBytes cryptobyte.String + if !incoming.ReadOptionalASN1(&valueBytes, &valuePresent, tag) { + return false + } + val := defaultValue + if valuePresent { + /* + X.690 (07/2002) + https://www.itu.int/rec/T-REC-X.690-200207-S/en + + Section 8.2.2: + If the boolean value is: + FALSE + the octet shall be zero. + If the boolean value is + TRUE + the octet shall have any non-zero value, as a sender's option. + + Section 11.1 Boolean values: + If the encoding represents the boolean value TRUE, its single contents octet shall have all eight + bits set to one. (Contrast with 8.2.2.) + + Succinctly, BER encoding states any nonzero value is TRUE. The DER + encoding restricts the value 0xFF as TRUE and any other: 0x01, + 0x23, 0xFE, etc as invalid encoding. + */ + boolBytes := []byte(valueBytes) + if bytes.Equal(boolBytes, []byte{0xFF}) { + val = true + } else if bytes.Equal(boolBytes, []byte{0x00}) { + val = false + } else { + // Unrecognized DER encoding of boolean! + return false + } + } + if out != nil { + *out = val + } + + // All reads were successful. + return true +} diff --git a/linter/lints/common_test.go b/linter/lints/common_test.go new file mode 100644 index 00000000000..f9a6757bd78 --- /dev/null +++ b/linter/lints/common_test.go @@ -0,0 +1,100 @@ +package lints + +import ( + "testing" + + "golang.org/x/crypto/cryptobyte" + "golang.org/x/crypto/cryptobyte/asn1" + + "github.com/letsencrypt/boulder/test" +) + +var onlyContainsUserCertsTag = asn1.Tag(1).ContextSpecific() +var onlyContainsCACertsTag = asn1.Tag(2).ContextSpecific() + +func TestReadOptionalASN1BooleanWithTag(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + // incoming will be mutated by the function under test + incoming []byte + out bool + defaultValue bool + asn1Tag asn1.Tag + expectedOk bool + // expectedTrailer counts the remaining bytes from incoming after having + // been advanced by the function under test + expectedTrailer int + expectedOut bool + }{ + { + name: "Good: onlyContainsUserCerts", + incoming: cryptobyte.String([]byte{0x81, 0x01, 0xFF}), + asn1Tag: onlyContainsUserCertsTag, + expectedOk: true, + expectedTrailer: 0, + expectedOut: true, + }, + { + name: "Good: onlyContainsCACerts", + incoming: cryptobyte.String([]byte{0x82, 0x01, 0xFF}), + asn1Tag: onlyContainsCACertsTag, + expectedOk: true, + expectedTrailer: 0, + expectedOut: true, + }, + { + name: "Good: Bytes are read and trailer remains", + incoming: cryptobyte.String([]byte{0x82, 0x01, 0xFF, 0xC0, 0xFF, 0xEE, 0xCA, 0xFE}), + asn1Tag: onlyContainsCACertsTag, + expectedOk: true, + expectedTrailer: 5, + expectedOut: true, + }, + { + name: "Bad: Read the tag, but out should be false, no trailer", + incoming: cryptobyte.String([]byte{0x82, 0x01, 0x00}), + asn1Tag: onlyContainsCACertsTag, + expectedOk: true, + expectedTrailer: 0, + expectedOut: false, + }, + { + name: "Bad: Read the tag, but out should be false, trailer remains", + incoming: cryptobyte.String([]byte{0x82, 0x01, 0x00, 0x99}), + asn1Tag: onlyContainsCACertsTag, + expectedOk: true, + expectedTrailer: 1, + expectedOut: false, + }, + { + name: "Bad: Wrong asn1Tag compared to incoming bytes, no bytes read", + incoming: cryptobyte.String([]byte{0x81, 0x01, 0xFF}), + asn1Tag: onlyContainsCACertsTag, + expectedOk: true, + expectedTrailer: 3, + expectedOut: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + // ReadOptionalASN1BooleanWithTag accepts nil as a valid outParam to + // maintain the style of upstream x/crypto/cryptobyte, but we + // currently don't pass nil. Instead we use a reference to a + // pre-existing boolean here and in the lint code. Passing in nil + // will _do the wrong thing (TM)_ in our CRL lints. + var outParam bool + ok := ReadOptionalASN1BooleanWithTag((*cryptobyte.String)(&tc.incoming), &outParam, tc.asn1Tag, false) + t.Log("Check if reading the tag was successful:") + test.AssertEquals(t, ok, tc.expectedOk) + t.Log("Check value of the optional boolean:") + test.AssertEquals(t, outParam, tc.expectedOut) + t.Log("Bytes should be popped off of incoming as they're successfully read:") + test.AssertEquals(t, len(tc.incoming), tc.expectedTrailer) + }) + } +} diff --git a/linter/lints/cpcps/lint_crl_has_idp.go b/linter/lints/cpcps/lint_crl_has_idp.go new file mode 100644 index 00000000000..7cf3fa22181 --- /dev/null +++ b/linter/lints/cpcps/lint_crl_has_idp.go @@ -0,0 +1,203 @@ +package cpcps + +import ( + "net/url" + + "github.com/zmap/zcrypto/encoding/asn1" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" + + "github.com/letsencrypt/boulder/linter/lints" +) + +type crlHasIDP struct{} + +/************************************************ +Various root programs (and the BRs, after Ballot SC-063 passes) require that +sharded/partitioned CRLs have a specifically-encoded Issuing Distribution Point +extension. Since there's no way to tell from the CRL itself whether or not it +is sharded, we apply this lint universally to all CRLs, but as part of the Let's +Encrypt-specific suite of lints. +************************************************/ + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_crl_has_idp", + Description: "Let's Encrypt CRLs must have the Issuing Distribution Point extension with appropriate contents", + Citation: "", + Source: lints.LetsEncryptCPS, + EffectiveDate: lints.CPSV33Date, + }, + Lint: NewCrlHasIDP, + }) +} + +func NewCrlHasIDP() lint.RevocationListLintInterface { + return &crlHasIDP{} +} + +func (l *crlHasIDP) CheckApplies(c *x509.RevocationList) bool { + return true +} + +func (l *crlHasIDP) Execute(c *x509.RevocationList) *lint.LintResult { + /* + Let's Encrypt issues CRLs for two distinct purposes: + 1) CRLs containing subscriber certificates created by the + crl-updater. These CRLs must have only the distributionPoint and + onlyContainsUserCerts fields set. + 2) CRLs containing subordinate CA certificates created by the + ceremony tool. These CRLs must only have the onlyContainsCACerts + field set. + */ + + idpOID := asn1.ObjectIdentifier{2, 5, 29, 28} // id-ce-issuingDistributionPoint + idpe := lints.GetExtWithOID(c.Extensions, idpOID) + if idpe == nil { + return &lint.LintResult{ + Status: lint.Warn, + Details: "CRL missing IssuingDistributionPoint", + } + } + if !idpe.Critical { + return &lint.LintResult{ + Status: lint.Error, + Details: "IssuingDistributionPoint MUST be critical", + } + } + + // Step inside the outer issuingDistributionPoint sequence to get access to + // its constituent fields: distributionPoint [0], + // onlyContainsUserCerts [1], and onlyContainsCACerts [2]. + idpv := cryptobyte.String(idpe.Value) + if !idpv.ReadASN1(&idpv, cryptobyte_asn1.SEQUENCE) { + return &lint.LintResult{ + Status: lint.Warn, + Details: "Failed to read issuingDistributionPoint", + } + } + + var dpName cryptobyte.String + var distributionPointExists bool + distributionPointTag := cryptobyte_asn1.Tag(0).ContextSpecific().Constructed() + if !idpv.ReadOptionalASN1(&dpName, &distributionPointExists, distributionPointTag) { + return &lint.LintResult{ + Status: lint.Warn, + Details: "Failed to read IssuingDistributionPoint distributionPoint", + } + } + + idp := lints.NewIssuingDistributionPoint() + if distributionPointExists { + lintErr := parseDistributionPointName(&dpName, idp) + if lintErr != nil { + return lintErr + } + } + + onlyContainsUserCertsTag := cryptobyte_asn1.Tag(1).ContextSpecific() + if !lints.ReadOptionalASN1BooleanWithTag(&idpv, &idp.OnlyContainsUserCerts, onlyContainsUserCertsTag, false) { + return &lint.LintResult{ + Status: lint.Error, + Details: "Failed to read IssuingDistributionPoint onlyContainsUserCerts", + } + } + + onlyContainsCACertsTag := cryptobyte_asn1.Tag(2).ContextSpecific() + if !lints.ReadOptionalASN1BooleanWithTag(&idpv, &idp.OnlyContainsCACerts, onlyContainsCACertsTag, false) { + return &lint.LintResult{ + Status: lint.Error, + Details: "Failed to read IssuingDistributionPoint onlyContainsCACerts", + } + } + + if !idpv.Empty() { + return &lint.LintResult{ + Status: lint.Error, + Details: "Unexpected IssuingDistributionPoint fields were found", + } + } + + if idp.OnlyContainsUserCerts && idp.OnlyContainsCACerts { + return &lint.LintResult{ + Status: lint.Error, + Details: "IssuingDistributionPoint should not have both onlyContainsUserCerts: TRUE and onlyContainsCACerts: TRUE", + } + } else if idp.OnlyContainsUserCerts { + if len(idp.DistributionPointURIs) == 0 { + return &lint.LintResult{ + Status: lint.Error, + Details: "User certificate CRLs MUST have at least one DistributionPointName FullName", + } + } + } else if idp.OnlyContainsCACerts { + if len(idp.DistributionPointURIs) != 0 { + return &lint.LintResult{ + Status: lint.Error, + Details: "CA certificate CRLs SHOULD NOT have a DistributionPointName FullName", + } + } + } else { + return &lint.LintResult{ + Status: lint.Error, + Details: "Neither onlyContainsUserCerts nor onlyContainsCACerts was set", + } + } + + return &lint.LintResult{Status: lint.Pass} +} + +// parseDistributionPointName examines the provided distributionPointName +// and updates idp with the URI if it is found. The distribution point name is +// checked for validity and returns a non-nil LintResult if there were any +// problems. +func parseDistributionPointName(distributionPointName *cryptobyte.String, idp *lints.IssuingDistributionPoint) *lint.LintResult { + fullNameTag := cryptobyte_asn1.Tag(0).ContextSpecific().Constructed() + if !distributionPointName.ReadASN1(distributionPointName, fullNameTag) { + return &lint.LintResult{ + Status: lint.Error, + Details: "Failed to read IssuingDistributionPoint distributionPoint fullName", + } + } + + for !distributionPointName.Empty() { + var uriBytes []byte + uriTag := cryptobyte_asn1.Tag(6).ContextSpecific() + if !distributionPointName.ReadASN1Bytes(&uriBytes, uriTag) { + return &lint.LintResult{ + Status: lint.Error, + Details: "Failed to read IssuingDistributionPoint URI", + } + } + uri, err := url.Parse(string(uriBytes)) + if err != nil { + return &lint.LintResult{ + Status: lint.Error, + Details: "Failed to parse IssuingDistributionPoint URI", + } + } + if uri.Scheme != "http" { + return &lint.LintResult{ + Status: lint.Error, + Details: "IssuingDistributionPoint URI MUST use http scheme", + } + } + idp.DistributionPointURIs = append(idp.DistributionPointURIs, uri) + } + if len(idp.DistributionPointURIs) == 0 { + return &lint.LintResult{ + Status: lint.Error, + Details: "IssuingDistributionPoint FullName URI MUST be present", + } + } else if len(idp.DistributionPointURIs) > 1 { + return &lint.LintResult{ + Status: lint.Notice, + Details: "IssuingDistributionPoint unexpectedly has more than one FullName", + } + } + + return nil +} diff --git a/linter/lints/cpcps/lint_crl_has_idp_test.go b/linter/lints/cpcps/lint_crl_has_idp_test.go new file mode 100644 index 00000000000..ff93b70903d --- /dev/null +++ b/linter/lints/cpcps/lint_crl_has_idp_test.go @@ -0,0 +1,95 @@ +package cpcps + +import ( + "fmt" + "strings" + "testing" + + "github.com/zmap/zlint/v3/lint" + + linttest "github.com/letsencrypt/boulder/linter/lints/test" +) + +func TestCrlHasIDP(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + want lint.LintStatus + wantSubStr string + }{ + { + name: "good", // CRL for subscriber certs + want: lint.Pass, + }, + { + name: "good_subordinate_ca", + want: lint.Pass, + }, + { + name: "no_idp", + want: lint.Warn, + wantSubStr: "CRL missing IssuingDistributionPoint", + }, + { + name: "idp_no_dpn", + want: lint.Error, + wantSubStr: "User certificate CRLs MUST have at least one DistributionPointName FullName", + }, + { + name: "idp_no_fullname", + want: lint.Error, + wantSubStr: "Failed to read IssuingDistributionPoint distributionPoint fullName", + }, + { + name: "idp_no_uris", + want: lint.Error, + wantSubStr: "IssuingDistributionPoint FullName URI MUST be present", + }, + { + name: "idp_two_uris", + want: lint.Notice, + wantSubStr: "IssuingDistributionPoint unexpectedly has more than one FullName", + }, + { + name: "idp_https", + want: lint.Error, + wantSubStr: "IssuingDistributionPoint URI MUST use http scheme", + }, + { + name: "idp_no_usercerts", + want: lint.Error, + wantSubStr: "Neither onlyContainsUserCerts nor onlyContainsCACerts was set", + }, + { + name: "idp_some_reasons", // Subscriber cert + want: lint.Error, + wantSubStr: "Unexpected IssuingDistributionPoint fields were found", + }, + { + name: "idp_distributionPoint_and_onlyCA", + want: lint.Error, + wantSubStr: "CA certificate CRLs SHOULD NOT have a DistributionPointName FullName", + }, + { + name: "idp_distributionPoint_and_onlyUser_and_onlyCA", + want: lint.Error, + wantSubStr: "IssuingDistributionPoint should not have both onlyContainsUserCerts: TRUE and onlyContainsCACerts: TRUE", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + l := NewCrlHasIDP() + c := linttest.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name)) + r := l.Execute(c) + + if r.Status != tc.want { + t.Errorf("expected %q, got %q", tc.want, r.Status) + } + if !strings.Contains(r.Details, tc.wantSubStr) { + t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details) + } + }) + } +} diff --git a/linter/lints/cpcps/lint_crl_has_no_aia.go b/linter/lints/cpcps/lint_crl_has_no_aia.go new file mode 100644 index 00000000000..43f08976d80 --- /dev/null +++ b/linter/lints/cpcps/lint_crl_has_no_aia.go @@ -0,0 +1,51 @@ +package cpcps + +import ( + "github.com/zmap/zcrypto/encoding/asn1" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints" +) + +type crlHasNoAIA struct{} + +/************************************************ +RFC 5280: 5.2.7 + +The requirements around the Authority Information Access extension are extensive. +Therefore we do not include one. +Conforming CRL issuers MUST include the nextUpdate field in all CRLs. +************************************************/ + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_crl_has_no_aia", + Description: "Let's Encrypt does not include the CRL AIA extension", + Citation: "", + Source: lints.LetsEncryptCPS, + EffectiveDate: lints.CPSV33Date, + }, + Lint: NewCrlHasNoAIA, + }) +} + +func NewCrlHasNoAIA() lint.RevocationListLintInterface { + return &crlHasNoAIA{} +} + +func (l *crlHasNoAIA) CheckApplies(c *x509.RevocationList) bool { + return true +} + +func (l *crlHasNoAIA) Execute(c *x509.RevocationList) *lint.LintResult { + aiaOID := asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1} // id-pe-authorityInfoAccess + if lints.GetExtWithOID(c.Extensions, aiaOID) != nil { + return &lint.LintResult{ + Status: lint.Notice, + Details: "CRL has an Authority Information Access url", + } + } + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/cpcps/lint_crl_has_no_aia_test.go b/linter/lints/cpcps/lint_crl_has_no_aia_test.go new file mode 100644 index 00000000000..679bfe7ba55 --- /dev/null +++ b/linter/lints/cpcps/lint_crl_has_no_aia_test.go @@ -0,0 +1,46 @@ +package cpcps + +import ( + "fmt" + "strings" + "testing" + + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints/test" +) + +func TestCrlHasNoAIA(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + want lint.LintStatus + wantSubStr string + }{ + { + name: "good", + want: lint.Pass, + }, + { + name: "aia", + want: lint.Notice, + wantSubStr: "Authority Information Access", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + l := NewCrlHasNoAIA() + c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name)) + r := l.Execute(c) + + if r.Status != tc.want { + t.Errorf("expected %q, got %q", tc.want, r.Status) + } + if !strings.Contains(r.Details, tc.wantSubStr) { + t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details) + } + }) + } +} diff --git a/linter/lints/cpcps/lint_crl_has_no_cert_issuers.go b/linter/lints/cpcps/lint_crl_has_no_cert_issuers.go new file mode 100644 index 00000000000..61bed1fbb2f --- /dev/null +++ b/linter/lints/cpcps/lint_crl_has_no_cert_issuers.go @@ -0,0 +1,54 @@ +package cpcps + +import ( + "github.com/zmap/zcrypto/encoding/asn1" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints" +) + +type crlHasNoCertIssuers struct{} + +/************************************************ +RFC 5280: 5.3.3 + +Section 5.3.3 defines the Certificate Issuer entry extension. The presence of +this extension means that the CRL is an "indirect CRL", including certificates +which were issued by a different issuer than the one issuing the CRL itself. +We do not issue indirect CRLs, so our CRL entries should not have this extension. +************************************************/ + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_crl_has_no_cert_issuers", + Description: "Let's Encrypt does not issue indirect CRLs", + Citation: "", + Source: lints.LetsEncryptCPS, + EffectiveDate: lints.CPSV33Date, + }, + Lint: NewCrlHasNoCertIssuers, + }) +} + +func NewCrlHasNoCertIssuers() lint.RevocationListLintInterface { + return &crlHasNoCertIssuers{} +} + +func (l *crlHasNoCertIssuers) CheckApplies(c *x509.RevocationList) bool { + return true +} + +func (l *crlHasNoCertIssuers) Execute(c *x509.RevocationList) *lint.LintResult { + certIssuerOID := asn1.ObjectIdentifier{2, 5, 29, 29} // id-ce-certificateIssuer + for _, entry := range c.RevokedCertificates { + if lints.GetExtWithOID(entry.Extensions, certIssuerOID) != nil { + return &lint.LintResult{ + Status: lint.Notice, + Details: "CRL has an entry with a Certificate Issuer extension", + } + } + } + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/cpcps/lint_crl_has_no_cert_issuers_test.go b/linter/lints/cpcps/lint_crl_has_no_cert_issuers_test.go new file mode 100644 index 00000000000..c2710ad5819 --- /dev/null +++ b/linter/lints/cpcps/lint_crl_has_no_cert_issuers_test.go @@ -0,0 +1,45 @@ +package cpcps + +import ( + "fmt" + "strings" + "testing" + + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints/test" +) + +func TestCrlHasNoCertIssuers(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + want lint.LintStatus + wantSubStr string + }{ + { + name: "good", + want: lint.Pass, + }, + { + name: "cert_issuer", + want: lint.Notice, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + l := NewCrlHasNoCertIssuers() + c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name)) + r := l.Execute(c) + + if r.Status != tc.want { + t.Errorf("expected %q, got %q", tc.want, r.Status) + } + if !strings.Contains(r.Details, tc.wantSubStr) { + t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details) + } + }) + } +} diff --git a/linter/lints/cpcps/lint_crl_is_not_delta.go b/linter/lints/cpcps/lint_crl_is_not_delta.go new file mode 100644 index 00000000000..eaa588c446e --- /dev/null +++ b/linter/lints/cpcps/lint_crl_is_not_delta.go @@ -0,0 +1,65 @@ +package cpcps + +import ( + "github.com/zmap/zcrypto/encoding/asn1" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints" +) + +type crlIsNotDelta struct{} + +/************************************************ +RFC 5280: 5.2.4 + +Section 5.2.4 defines a Delta CRL, and all the requirements that come with it. +These requirements are complex and do not serve our purpose, so we ensure that +we never issue a CRL which could be construed as a Delta CRL. + +RFC 5280: 5.2.6 + +Similarly, Section 5.2.6 defines the Freshest CRL extension, which is only +applicable in the case that the CRL is a Delta CRL. +************************************************/ + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_crl_is_not_delta", + Description: "Let's Encrypt does not issue delta CRLs", + Citation: "", + Source: lints.LetsEncryptCPS, + EffectiveDate: lints.CPSV33Date, + }, + Lint: NewCrlIsNotDelta, + }) +} + +func NewCrlIsNotDelta() lint.RevocationListLintInterface { + return &crlIsNotDelta{} +} + +func (l *crlIsNotDelta) CheckApplies(c *x509.RevocationList) bool { + return true +} + +func (l *crlIsNotDelta) Execute(c *x509.RevocationList) *lint.LintResult { + deltaCRLIndicatorOID := asn1.ObjectIdentifier{2, 5, 29, 27} // id-ce-deltaCRLIndicator + if lints.GetExtWithOID(c.Extensions, deltaCRLIndicatorOID) != nil { + return &lint.LintResult{ + Status: lint.Notice, + Details: "CRL is a Delta CRL", + } + } + + freshestCRLOID := asn1.ObjectIdentifier{2, 5, 29, 46} // id-ce-freshestCRL + if lints.GetExtWithOID(c.Extensions, freshestCRLOID) != nil { + return &lint.LintResult{ + Status: lint.Notice, + Details: "CRL has a Freshest CRL url", + } + } + + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/cpcps/lint_crl_is_not_delta_test.go b/linter/lints/cpcps/lint_crl_is_not_delta_test.go new file mode 100644 index 00000000000..23137d9d68b --- /dev/null +++ b/linter/lints/cpcps/lint_crl_is_not_delta_test.go @@ -0,0 +1,51 @@ +package cpcps + +import ( + "fmt" + "strings" + "testing" + + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints/test" +) + +func TestCrlIsNotDelta(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + want lint.LintStatus + wantSubStr string + }{ + { + name: "good", + want: lint.Pass, + }, + { + name: "delta", + want: lint.Notice, + wantSubStr: "Delta", + }, + { + name: "freshest", + want: lint.Notice, + wantSubStr: "Freshest", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + l := NewCrlIsNotDelta() + c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name)) + r := l.Execute(c) + + if r.Status != tc.want { + t.Errorf("expected %q, got %q", tc.want, r.Status) + } + if !strings.Contains(r.Details, tc.wantSubStr) { + t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details) + } + }) + } +} diff --git a/linter/lints/cpcps/lint_root_ca_cert_validity_period_greater_than_25_years.go b/linter/lints/cpcps/lint_root_ca_cert_validity_period_greater_than_25_years.go new file mode 100644 index 00000000000..a963cf1958f --- /dev/null +++ b/linter/lints/cpcps/lint_root_ca_cert_validity_period_greater_than_25_years.go @@ -0,0 +1,49 @@ +package cpcps + +import ( + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" + + "github.com/letsencrypt/boulder/linter/lints" +) + +type rootCACertValidityTooLong struct{} + +func init() { + lint.RegisterCertificateLint(&lint.CertificateLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_root_ca_cert_validity_period_greater_than_25_years", + Description: "Let's Encrypt Root CA Certificates have Validity Periods of up to 25 years", + Citation: "CPS: 7.1", + Source: lints.LetsEncryptCPS, + EffectiveDate: lints.CPSV33Date, + }, + Lint: NewRootCACertValidityTooLong, + }) +} + +func NewRootCACertValidityTooLong() lint.CertificateLintInterface { + return &rootCACertValidityTooLong{} +} + +func (l *rootCACertValidityTooLong) CheckApplies(c *x509.Certificate) bool { + return util.IsRootCA(c) +} + +func (l *rootCACertValidityTooLong) Execute(c *x509.Certificate) *lint.LintResult { + // CPS 7.1: "Root CA Certificate Validity Period: Up to 25 years." + maxValidity := 25 * 365 * lints.BRDay + + // RFC 5280 4.1.2.5: "The validity period for a certificate is the period + // of time from notBefore through notAfter, inclusive." + certValidity := c.NotAfter.Add(time.Second).Sub(c.NotBefore) + + if certValidity > maxValidity { + return &lint.LintResult{Status: lint.Error} + } + + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/cpcps/lint_subordinate_ca_cert_validity_period_greater_than_8_years.go b/linter/lints/cpcps/lint_subordinate_ca_cert_validity_period_greater_than_8_years.go new file mode 100644 index 00000000000..fdf5906c984 --- /dev/null +++ b/linter/lints/cpcps/lint_subordinate_ca_cert_validity_period_greater_than_8_years.go @@ -0,0 +1,49 @@ +package cpcps + +import ( + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" + + "github.com/letsencrypt/boulder/linter/lints" +) + +type subordinateCACertValidityTooLong struct{} + +func init() { + lint.RegisterCertificateLint(&lint.CertificateLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_validity_period_greater_than_8_years", + Description: "Let's Encrypt Intermediate CA Certificates have Validity Periods of up to 8 years", + Citation: "CPS: 7.1", + Source: lints.LetsEncryptCPS, + EffectiveDate: lints.CPSV33Date, + }, + Lint: NewSubordinateCACertValidityTooLong, + }) +} + +func NewSubordinateCACertValidityTooLong() lint.CertificateLintInterface { + return &subordinateCACertValidityTooLong{} +} + +func (l *subordinateCACertValidityTooLong) CheckApplies(c *x509.Certificate) bool { + return util.IsSubCA(c) +} + +func (l *subordinateCACertValidityTooLong) Execute(c *x509.Certificate) *lint.LintResult { + // CPS 7.1: "Intermediate CA Certificate Validity Period: Up to 8 years." + maxValidity := 8 * 365 * lints.BRDay + + // RFC 5280 4.1.2.5: "The validity period for a certificate is the period + // of time from notBefore through notAfter, inclusive." + certValidity := c.NotAfter.Add(time.Second).Sub(c.NotBefore) + + if certValidity > maxValidity { + return &lint.LintResult{Status: lint.Error} + } + + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/cpcps/lint_subscriber_cert_validity_greater_than_100_days.go b/linter/lints/cpcps/lint_subscriber_cert_validity_greater_than_100_days.go new file mode 100644 index 00000000000..e91e187c41e --- /dev/null +++ b/linter/lints/cpcps/lint_subscriber_cert_validity_greater_than_100_days.go @@ -0,0 +1,49 @@ +package cpcps + +import ( + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" + + "github.com/letsencrypt/boulder/linter/lints" +) + +type subscriberCertValidityTooLong struct{} + +func init() { + lint.RegisterCertificateLint(&lint.CertificateLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_subscriber_cert_validity_period_greater_than_100_days", + Description: "Let's Encrypt Subscriber Certificates have Validity Periods of up to 100 days", + Citation: "CPS: 7.1", + Source: lints.LetsEncryptCPS, + EffectiveDate: lints.CPSV33Date, + }, + Lint: NewSubscriberCertValidityTooLong, + }) +} + +func NewSubscriberCertValidityTooLong() lint.CertificateLintInterface { + return &subscriberCertValidityTooLong{} +} + +func (l *subscriberCertValidityTooLong) CheckApplies(c *x509.Certificate) bool { + return util.IsServerAuthCert(c) && !c.IsCA +} + +func (l *subscriberCertValidityTooLong) Execute(c *x509.Certificate) *lint.LintResult { + // CPS 7.1: "DV SSL End Entity Certificate Validity Period: Up to 100 days." + maxValidity := 100 * lints.BRDay + + // RFC 5280 4.1.2.5: "The validity period for a certificate is the period + // of time from notBefore through notAfter, inclusive." + certValidity := c.NotAfter.Add(time.Second).Sub(c.NotBefore) + + if certValidity > maxValidity { + return &lint.LintResult{Status: lint.Error} + } + + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/cpcps/lint_validity_period_has_extra_second.go b/linter/lints/cpcps/lint_validity_period_has_extra_second.go new file mode 100644 index 00000000000..e8ea3483129 --- /dev/null +++ b/linter/lints/cpcps/lint_validity_period_has_extra_second.go @@ -0,0 +1,45 @@ +package cpcps + +import ( + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints" +) + +type certValidityNotRound struct{} + +func init() { + lint.RegisterCertificateLint(&lint.CertificateLint{ + LintMetadata: lint.LintMetadata{ + Name: "w_validity_period_has_extra_second", + Description: "Let's Encrypt Certificates have Validity Periods that are a round number of seconds", + Citation: "CPS: 7.1", + Source: lints.LetsEncryptCPS, + EffectiveDate: lints.CPSV33Date, + }, + Lint: NewCertValidityNotRound, + }) +} + +func NewCertValidityNotRound() lint.CertificateLintInterface { + return &certValidityNotRound{} +} + +func (l *certValidityNotRound) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *certValidityNotRound) Execute(c *x509.Certificate) *lint.LintResult { + // RFC 5280 4.1.2.5: "The validity period for a certificate is the period + // of time from notBefore through notAfter, inclusive." + certValidity := c.NotAfter.Add(time.Second).Sub(c.NotBefore) + + if certValidity%60 == 0 { + return &lint.LintResult{Status: lint.Pass} + } + + return &lint.LintResult{Status: lint.Error} +} diff --git a/linter/lints/cpcps/testdata/crl_aia.pem b/linter/lints/cpcps/testdata/crl_aia.pem new file mode 100644 index 00000000000..406305d856c --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_aia.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBgDCCAQcCAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMDcwNjE2NDMzOFoXDTIyMDcxNTE2NDMzOFowKTAnAggDrlHbURVaPBcN +MjIwNzA2MTU0MzM4WjAMMAoGA1UdFQQDCgEBoGIwYDAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFv9LJt+yGA8wKgYIKwYBBQUHAQEE +HjAcMBoGCCsGAQUFBzABgg5lMS5vLmxlbmNyLm9yZzAKBggqhkjOPQQDAwNnADBk +AjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQsHqk +qEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E4c5Z +HFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_cert_issuer.pem b/linter/lints/cpcps/testdata/crl_cert_issuer.pem new file mode 100644 index 00000000000..3ff128cfa58 --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_cert_issuer.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBczCB+wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjBJMEcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMCwwCgYDVR0VBAMKAQEwHgYDVR0dBBcwFYITaW50LWUxLmJv +dWxkZXIudGVzdKA2MDQwHwYDVR0jBBgwFoAUAdq7esslII5eedb5lkIvAkEpB74w +EQYDVR0UBAoCCBb/SybfshgPMAoGCCqGSM49BAMDA2cAMGQCMFayE0WLrRoxaXzY +bdPAi7AEEr53OIulDND4vPlN0/A0RyJiIrgfXEPqsVCweqSoQQIwW7hgsE6Ke7wn +xjuxc+jdK7iEyJxbbegQ0eYs1lDH112u5l4UkOooPYThzlkcUdNC +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_delta.pem b/linter/lints/cpcps/testdata/crl_delta.pem new file mode 100644 index 00000000000..3019facecf3 --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_delta.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBZjCB7gIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQGgSTBHMB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzARBgNVHRsECgIIFv9L +Jt+yGA4wCgYIKoZIzj0EAwMDZwAwZAIwVrITRYutGjFpfNht08CLsAQSvnc4i6UM +0Pi8+U3T8DRHImIiuB9cQ+qxULB6pKhBAjBbuGCwTop7vCfGO7Fz6N0ruITInFtt +6BDR5izWUMfXXa7mXhSQ6ig9hOHOWRxR00I= +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_freshest.pem b/linter/lints/cpcps/testdata/crl_freshest.pem new file mode 100644 index 00000000000..196871fa11e --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_freshest.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBdjCB/gIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQGgWTBXMB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggW/0sm37IYDzAhBgNVHS4EGjAYMBaA +FIASMBCCDmUxLmMubGVuY3Iub3JnMAoGCCqGSM49BAMDA2cAMGQCMFayE0WLrRox +aXzYbdPAi7AEEr53OIulDND4vPlN0/A0RyJiIrgfXEPqsVCweqSoQQIwW7hgsE6K +e7wnxjuxc+jdK7iEyJxbbegQ0eYs1lDH112u5l4UkOooPYThzlkcUdNC +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_good.pem b/linter/lints/cpcps/testdata/crl_good.pem new file mode 100644 index 00000000000..8b383d0a07e --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_good.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBmDCCAR8CAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMTAxMDIwMTIwN1oXDTIyMTAxOTIwMTIwNlowKTAnAggDrlHbURVaPBcN +MjIxMDEwMTkxMjA3WjAMMAoGA1UdFQQDCgEBoHoweDAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFxzOPeSCumEwQgYDVR0cAQH/BDgw +NqAxoC+GLWh0dHA6Ly9jLmJvdWxkZXIudGVzdC82NjI4Mzc1NjkxMzU4ODI4OC8w +LmNybIEB/zAKBggqhkjOPQQDAwNnADBkAjAvDkIUnTYavJ6h8606MDyFh2uw/cF+ +OVnM4sE8nUdGy0XYg0hGfbR4MY+kRxRQayICMFeQPpcpIr0zgXpP6lUXU0rcLSva +tuaeQSVr24nGjZ7Py0vc94w0n7idZ8wje5+/Mw== +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_good_subordinate_ca.pem b/linter/lints/cpcps/testdata/crl_good_subordinate_ca.pem new file mode 100644 index 00000000000..a476c16fdfd --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_good_subordinate_ca.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBZDCB7AIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIxMDEwMjAxMjA3WhcNMjIxMDE5MjAxMjA2WjApMCcCCAOuUdtRFVo8Fw0y +MjEwMTAxOTEyMDdaMAwwCgYDVR0VBAMKAQGgRzBFMB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggXHM495IK6YTAPBgNVHRwBAf8EBTAD +ggH/MAoGCCqGSM49BAMDA2cAMGQCMC8OQhSdNhq8nqHzrTowPIWHa7D9wX45Wczi +wTydR0bLRdiDSEZ9tHgxj6RHFFBrIgIwV5A+lykivTOBek/qVRdTStwtK9q25p5B +JWvbicaNns/LS9z3jDSfuJ1nzCN7n78z +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_idp_distributionPoint_and_onlyCA.pem b/linter/lints/cpcps/testdata/crl_idp_distributionPoint_and_onlyCA.pem new file mode 100644 index 00000000000..50b194c9c78 --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_idp_distributionPoint_and_onlyCA.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBmDCCAR8CAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMTAxMDIwMTIwN1oXDTIyMTAxOTIwMTIwNlowKTAnAggDrlHbURVaPBcN +MjIxMDEwMTkxMjA3WjAMMAoGA1UdFQQDCgEBoHoweDAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFxzOPeSCumEwQgYDVR0cAQH/BDgw +NqAxoC+GLWh0dHA6Ly9jLmJvdWxkZXIudGVzdC82NjI4Mzc1NjkxMzU4ODI4OC8w +LmNybIIB/zAKBggqhkjOPQQDAwNnADBkAjAvDkIUnTYavJ6h8606MDyFh2uw/cF+ +OVnM4sE8nUdGy0XYg0hGfbR4MY+kRxRQayICMFeQPpcpIr0zgXpP6lUXU0rcLSva +tuaeQSVr24nGjZ7Py0vc94w0n7idZ8wje5+/Mw== +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_idp_distributionPoint_and_onlyUser_and_onlyCA.pem b/linter/lints/cpcps/testdata/crl_idp_distributionPoint_and_onlyUser_and_onlyCA.pem new file mode 100644 index 00000000000..2513e3c7f89 --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_idp_distributionPoint_and_onlyUser_and_onlyCA.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBmzCCASICAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMTAxMDIwMTIwN1oXDTIyMTAxOTIwMTIwNlowKTAnAggDrlHbURVaPBcN +MjIxMDEwMTkxMjA3WjAMMAoGA1UdFQQDCgEBoH0wezAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFxzOPeSCumEwRQYDVR0cAQH/BDsw +OaAxoC+GLWh0dHA6Ly9jLmJvdWxkZXIudGVzdC82NjI4Mzc1NjkxMzU4ODI4OC8w +LmNybIEB/4IB/zAKBggqhkjOPQQDAwNnADBkAjAvDkIUnTYavJ6h8606MDyFh2uw +/cF+OVnM4sE8nUdGy0XYg0hGfbR4MY+kRxRQayICMFeQPpcpIr0zgXpP6lUXU0rc +LSvatuaeQSVr24nGjZ7Py0vc94w0n7idZ8wje5+/Mw== +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_idp_https.pem b/linter/lints/cpcps/testdata/crl_idp_https.pem new file mode 100644 index 00000000000..3a5bdfa3ad4 --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_idp_https.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBmTCCASACAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMTAxMDIwMTIwN1oXDTIyMTAxOTIwMTIwNlowKTAnAggDrlHbURVaPBcN +MjIxMDEwMTkxMjA3WjAMMAoGA1UdFQQDCgEBoHsweTAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFxzOPeSCumEwQwYDVR0cAQH/BDkw +N6AyoDCGLmh0dHBzOi8vYy5ib3VsZGVyLnRlc3QvNjYyODM3NTY5MTM1ODgyODgv +MC5jcmyBAf8wCgYIKoZIzj0EAwMDZwAwZAIwLw5CFJ02GryeofOtOjA8hYdrsP3B +fjlZzOLBPJ1HRstF2INIRn20eDGPpEcUUGsiAjBXkD6XKSK9M4F6T+pVF1NK3C0r +2rbmnkEla9uJxo2ez8tL3PeMNJ+4nWfMI3ufvzM= +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_idp_no_dpn.pem b/linter/lints/cpcps/testdata/crl_idp_no_dpn.pem new file mode 100644 index 00000000000..ddfcb136b37 --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_idp_no_dpn.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBZDCB7AIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIxMDEwMjAxMjA3WhcNMjIxMDE5MjAxMjA2WjApMCcCCAOuUdtRFVo8Fw0y +MjEwMTAxOTEyMDdaMAwwCgYDVR0VBAMKAQGgRzBFMB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggXHM495IK6YTAPBgNVHRwBAf8EBTAD +gQH/MAoGCCqGSM49BAMDA2cAMGQCMC8OQhSdNhq8nqHzrTowPIWHa7D9wX45Wczi +wTydR0bLRdiDSEZ9tHgxj6RHFFBrIgIwV5A+lykivTOBek/qVRdTStwtK9q25p5B +JWvbicaNns/LS9z3jDSfuJ1nzCN7n78z +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_idp_no_fullname.pem b/linter/lints/cpcps/testdata/crl_idp_no_fullname.pem new file mode 100644 index 00000000000..036dbbca035 --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_idp_no_fullname.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBZjCB7gIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIxMDEwMjAxMjA3WhcNMjIxMDE5MjAxMjA2WjApMCcCCAOuUdtRFVo8Fw0y +MjEwMTAxOTEyMDdaMAwwCgYDVR0VBAMKAQGgSTBHMB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggXHM495IK6YTARBgNVHRwBAf8EBzAF +oACBAf8wCgYIKoZIzj0EAwMDZwAwZAIwLw5CFJ02GryeofOtOjA8hYdrsP3BfjlZ +zOLBPJ1HRstF2INIRn20eDGPpEcUUGsiAjBXkD6XKSK9M4F6T+pVF1NK3C0r2rbm +nkEla9uJxo2ez8tL3PeMNJ+4nWfMI3ufvzM= +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_idp_no_uris.pem b/linter/lints/cpcps/testdata/crl_idp_no_uris.pem new file mode 100644 index 00000000000..117d36bda45 --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_idp_no_uris.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBaDCB8AIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIxMDEwMjAxMjA3WhcNMjIxMDE5MjAxMjA2WjApMCcCCAOuUdtRFVo8Fw0y +MjEwMTAxOTEyMDdaMAwwCgYDVR0VBAMKAQGgSzBJMB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggXHM495IK6YTATBgNVHRwBAf8ECTAH +oAKgAIEB/zAKBggqhkjOPQQDAwNnADBkAjAvDkIUnTYavJ6h8606MDyFh2uw/cF+ +OVnM4sE8nUdGy0XYg0hGfbR4MY+kRxRQayICMFeQPpcpIr0zgXpP6lUXU0rcLSva +tuaeQSVr24nGjZ7Py0vc94w0n7idZ8wje5+/Mw== +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_idp_no_usercerts.pem b/linter/lints/cpcps/testdata/crl_idp_no_usercerts.pem new file mode 100644 index 00000000000..ff95bd9f735 --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_idp_no_usercerts.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBlTCCARwCAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMTAxMDIwMTIwN1oXDTIyMTAxOTIwMTIwNlowKTAnAggDrlHbURVaPBcN +MjIxMDEwMTkxMjA3WjAMMAoGA1UdFQQDCgEBoHcwdTAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFxzOPeSCumEwPwYDVR0cAQH/BDUw +M6AxoC+GLWh0dHA6Ly9jLmJvdWxkZXIudGVzdC82NjI4Mzc1NjkxMzU4ODI4OC8w +LmNybDAKBggqhkjOPQQDAwNnADBkAjAvDkIUnTYavJ6h8606MDyFh2uw/cF+OVnM +4sE8nUdGy0XYg0hGfbR4MY+kRxRQayICMFeQPpcpIr0zgXpP6lUXU0rcLSvatuae +QSVr24nGjZ7Py0vc94w0n7idZ8wje5+/Mw== +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_idp_some_reasons.pem b/linter/lints/cpcps/testdata/crl_idp_some_reasons.pem new file mode 100644 index 00000000000..e8eb9713325 --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_idp_some_reasons.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBnDCCASMCAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMTAxMDIwMTIwN1oXDTIyMTAxOTIwMTIwNlowKTAnAggDrlHbURVaPBcN +MjIxMDEwMTkxMjA3WjAMMAoGA1UdFQQDCgEBoH4wfDAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFxzOPeSCumEwRgYDVR0cAQH/BDww +OqAxoC+GLWh0dHA6Ly9jLmJvdWxkZXIudGVzdC82NjI4Mzc1NjkxMzU4ODI4OC8w +LmNybIEB/6MCBkAwCgYIKoZIzj0EAwMDZwAwZAIwLw5CFJ02GryeofOtOjA8hYdr +sP3BfjlZzOLBPJ1HRstF2INIRn20eDGPpEcUUGsiAjBXkD6XKSK9M4F6T+pVF1NK +3C0r2rbmnkEla9uJxo2ez8tL3PeMNJ+4nWfMI3ufvzM= +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_idp_two_uris.pem b/linter/lints/cpcps/testdata/crl_idp_two_uris.pem new file mode 100644 index 00000000000..4294a25267b --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_idp_two_uris.pem @@ -0,0 +1,12 @@ +-----BEGIN X509 CRL----- +MIIByTCCAVACAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMTAxMDIwMTIwN1oXDTIyMTAxOTIwMTIwNlowKTAnAggDrlHbURVaPBcN +MjIxMDEwMTkxMjA3WjAMMAoGA1UdFQQDCgEBoIGqMIGnMB8GA1UdIwQYMBaAFAHa +u3rLJSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggXHM495IK6YTBxBgNVHRwBAf8E +ZzBloGCgXoYtaHR0cDovL2MuYm91bGRlci50ZXN0LzY2MjgzNzU2OTEzNTg4Mjg4 +LzAuY3Jshi1odHRwOi8vYy5ib3VsZGVyLnRlc3QvNjYyODM3NTY5MTM1ODgyODgv +MS5jcmyBAf8wCgYIKoZIzj0EAwMDZwAwZAIwLw5CFJ02GryeofOtOjA8hYdrsP3B +fjlZzOLBPJ1HRstF2INIRn20eDGPpEcUUGsiAjBXkD6XKSK9M4F6T+pVF1NK3C0r +2rbmnkEla9uJxo2ez8tL3PeMNJ+4nWfMI3ufvzM= +-----END X509 CRL----- diff --git a/linter/lints/cpcps/testdata/crl_no_idp.pem b/linter/lints/cpcps/testdata/crl_no_idp.pem new file mode 100644 index 00000000000..18470cca0d7 --- /dev/null +++ b/linter/lints/cpcps/testdata/crl_no_idp.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBUzCB2wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIxMDEwMjAxMjA3WhcNMjIxMDE5MjAxMjA2WjApMCcCCAOuUdtRFVo8Fw0y +MjEwMTAxOTEyMDdaMAwwCgYDVR0VBAMKAQGgNjA0MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBEGA1UdFAQKAggXHM495IK6YTAKBggqhkjOPQQDAwNn +ADBkAjAvDkIUnTYavJ6h8606MDyFh2uw/cF+OVnM4sE8nUdGy0XYg0hGfbR4MY+k +RxRQayICMFeQPpcpIr0zgXpP6lUXU0rcLSvatuaeQSVr24nGjZ7Py0vc94w0n7id +Z8wje5+/Mw== +-----END X509 CRL----- diff --git a/linter/lints/intermediate/e_validity_period_greater_than_8_years.go b/linter/lints/intermediate/e_validity_period_greater_than_8_years.go deleted file mode 100644 index 939e3334e1c..00000000000 --- a/linter/lints/intermediate/e_validity_period_greater_than_8_years.go +++ /dev/null @@ -1,47 +0,0 @@ -package subscriber - -import ( - "time" - - "github.com/zmap/zcrypto/x509" - "github.com/zmap/zlint/v3/lint" - "github.com/zmap/zlint/v3/util" - - "github.com/letsencrypt/boulder/linter/lints" -) - -type certValidityTooLong struct{} - -func init() { - lint.RegisterLint(&lint.Lint{ - Name: "e_validity_period_greater_than_8_years", - Description: "Let's Encrypt Intermediate CA Certificates have Validity Periods of up to 8 years", - Citation: "CPS: 7.1", - Source: lints.LetsEncryptCPSIntermediate, - EffectiveDate: lints.CPSV33Date, - Lint: NewCertValidityTooLong, - }) -} - -func NewCertValidityTooLong() lint.LintInterface { - return &certValidityTooLong{} -} - -func (l *certValidityTooLong) CheckApplies(c *x509.Certificate) bool { - return util.IsSubCA(c) -} - -func (l *certValidityTooLong) Execute(c *x509.Certificate) *lint.LintResult { - // CPS 7.1: "Intermediate CA Certificate Validity Period: Up to 8 years." - maxValidity := 8 * 365 * lints.BRDay - - // RFC 5280 4.1.2.5: "The validity period for a certificate is the period - // of time from notBefore through notAfter, inclusive." - certValidity := c.NotAfter.Add(time.Second).Sub(c.NotBefore) - - if certValidity > maxValidity { - return &lint.LintResult{Status: lint.Error} - } - - return &lint.LintResult{Status: lint.Pass} -} diff --git a/linter/lints/rfc/lint_cert_via_pkimetal.go b/linter/lints/rfc/lint_cert_via_pkimetal.go new file mode 100644 index 00000000000..31fc08d8135 --- /dev/null +++ b/linter/lints/rfc/lint_cert_via_pkimetal.go @@ -0,0 +1,158 @@ +package rfc + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "slices" + "strings" + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" +) + +// PKIMetalConfig and its execute method provide a shared basis for linting +// both certs and CRLs using PKIMetal. +type PKIMetalConfig struct { + Addr string `toml:"addr" comment:"The address where a pkilint REST API can be reached."` + Severity string `toml:"severity" comment:"The minimum severity of findings to report (meta, debug, info, notice, warning, error, bug, or fatal)."` + Timeout time.Duration `toml:"timeout" comment:"How long, in nanoseconds, to wait before giving up."` + IgnoreLints []string `toml:"ignore_lints" comment:"The unique Validator:Code IDs of lint findings which should be ignored."` +} + +func (pkim *PKIMetalConfig) execute(endpoint string, der []byte) (*lint.LintResult, error) { + timeout := pkim.Timeout + if timeout == 0 { + timeout = 100 * time.Millisecond + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + apiURL, err := url.JoinPath(pkim.Addr, endpoint) + if err != nil { + return nil, fmt.Errorf("constructing pkimetal url: %w", err) + } + + // reqForm matches PKIMetal's documented form-urlencoded request format. It + // does not include the "profile" field, as its default value ("autodetect") + // is good for our purposes. + // https://github.com/pkimetal/pkimetal/blob/578ac224a7ca3775af51b47fce16c95753d9ac8d/doc/openapi.yaml#L179-L194 + reqForm := url.Values{} + reqForm.Set("b64input", base64.StdEncoding.EncodeToString(der)) + reqForm.Set("severity", pkim.Severity) + reqForm.Set("format", "json") + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, strings.NewReader(reqForm.Encode())) + if err != nil { + return nil, fmt.Errorf("creating pkimetal request: %w", err) + } + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + req.Header.Add("Accept", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("making POST request to pkimetal API: %s (timeout %s)", err, timeout) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("got status %d (%s) from pkimetal API", resp.StatusCode, resp.Status) + } + + resJSON, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("reading response from pkimetal API: %s", err) + } + + // finding matches the repeated portion of PKIMetal's documented JSON response. + // https://github.com/pkimetal/pkimetal/blob/578ac224a7ca3775af51b47fce16c95753d9ac8d/doc/openapi.yaml#L201-L221 + type finding struct { + Linter string `json:"linter"` + Finding string `json:"finding"` + Severity string `json:"severity"` + Code string `json:"code"` + Field string `json:"field"` + } + + var res []finding + err = json.Unmarshal(resJSON, &res) + if err != nil { + return nil, fmt.Errorf("parsing response from pkimetal API: %s", err) + } + + var findings []string + for _, finding := range res { + var id string + if finding.Code != "" { + id = fmt.Sprintf("%s:%s", finding.Linter, finding.Code) + } else { + id = fmt.Sprintf("%s:%s", finding.Linter, strings.ReplaceAll(strings.ToLower(finding.Finding), " ", "_")) + } + if slices.Contains(pkim.IgnoreLints, id) { + continue + } + desc := fmt.Sprintf("%s from %s: %s", finding.Severity, id, finding.Finding) + findings = append(findings, desc) + } + + if len(findings) != 0 { + // Group the findings by severity, for human readers. + slices.Sort(findings) + return &lint.LintResult{ + Status: lint.Error, + Details: fmt.Sprintf("got %d lint findings from pkimetal API: %s", len(findings), strings.Join(findings, "; ")), + }, nil + } + + return &lint.LintResult{Status: lint.Pass}, nil +} + +type certViaPKIMetal struct { + PKIMetalConfig +} + +func init() { + lint.RegisterCertificateLint(&lint.CertificateLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_pkimetal_lint_cabf_serverauth_cert", + Description: "Runs pkimetal's suite of cabf serverauth certificate lints", + Citation: "https://github.com/pkimetal/pkimetal", + Source: lint.Community, + EffectiveDate: util.CABEffectiveDate, + }, + Lint: NewCertViaPKIMetal, + }) +} + +func NewCertViaPKIMetal() lint.CertificateLintInterface { + return &certViaPKIMetal{} +} + +func (l *certViaPKIMetal) Configure() any { + return l +} + +func (l *certViaPKIMetal) CheckApplies(c *x509.Certificate) bool { + // This lint applies to all certificates issued by Boulder, as long as it has + // been configured with an address to reach out to. If not, skip it. + return l.Addr != "" +} + +func (l *certViaPKIMetal) Execute(c *x509.Certificate) *lint.LintResult { + res, err := l.execute("lintcert", c.Raw) + if err != nil { + return &lint.LintResult{ + Status: lint.Error, + Details: err.Error(), + } + } + + return res +} diff --git a/linter/lints/rfc/lint_crl_has_aki.go b/linter/lints/rfc/lint_crl_has_aki.go new file mode 100644 index 00000000000..58e7b5c0087 --- /dev/null +++ b/linter/lints/rfc/lint_crl_has_aki.go @@ -0,0 +1,62 @@ +package rfc + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" +) + +type crlHasAKI struct{} + +/************************************************ +RFC 5280: 5.2.1 +Conforming CRL issuers MUST use the key identifier method, and MUST include this +extension in all CRLs issued. +************************************************/ + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_crl_has_aki", + Description: "Conforming", + Citation: "RFC 5280: 5.2.1", + Source: lint.RFC5280, + EffectiveDate: util.RFC5280Date, + }, + Lint: NewCrlHasAKI, + }) +} + +func NewCrlHasAKI() lint.RevocationListLintInterface { + return &crlHasAKI{} +} + +func (l *crlHasAKI) CheckApplies(c *x509.RevocationList) bool { + return true +} + +func (l *crlHasAKI) Execute(c *x509.RevocationList) *lint.LintResult { + if len(c.AuthorityKeyId) == 0 { + return &lint.LintResult{ + Status: lint.Error, + Details: "CRLs MUST include the authority key identifier extension", + } + } + aki := cryptobyte.String(c.AuthorityKeyId) + var akiBody cryptobyte.String + if !aki.ReadASN1(&akiBody, cryptobyte_asn1.SEQUENCE) { + return &lint.LintResult{ + Status: lint.Error, + Details: "CRL has a malformed authority key identifier extension", + } + } + if !akiBody.PeekASN1Tag(cryptobyte_asn1.Tag(0).ContextSpecific()) { + return &lint.LintResult{ + Status: lint.Error, + Details: "CRLs MUST use the key identifier method in the authority key identifier extension", + } + } + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/rfc/lint_crl_has_aki_test.go b/linter/lints/rfc/lint_crl_has_aki_test.go new file mode 100644 index 00000000000..776727df475 --- /dev/null +++ b/linter/lints/rfc/lint_crl_has_aki_test.go @@ -0,0 +1,51 @@ +package rfc + +import ( + "fmt" + "strings" + "testing" + + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints/test" +) + +func TestCrlHasAKI(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + want lint.LintStatus + wantSubStr string + }{ + { + name: "good", + want: lint.Pass, + }, + { + name: "no_aki", + want: lint.Error, + wantSubStr: "MUST include the authority key identifier", + }, + { + name: "aki_name_and_serial", + want: lint.Error, + wantSubStr: "MUST use the key identifier method", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + l := NewCrlHasAKI() + c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name)) + r := l.Execute(c) + + if r.Status != tc.want { + t.Errorf("expected %q, got %q", tc.want, r.Status) + } + if !strings.Contains(r.Details, tc.wantSubStr) { + t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details) + } + }) + } +} diff --git a/linter/lints/rfc/lint_crl_has_issuer_name.go b/linter/lints/rfc/lint_crl_has_issuer_name.go new file mode 100644 index 00000000000..192d0ebd85e --- /dev/null +++ b/linter/lints/rfc/lint_crl_has_issuer_name.go @@ -0,0 +1,50 @@ +package rfc + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" +) + +type crlHasIssuerName struct{} + +/************************************************ +RFC 5280: 5.1.2.3 +The issuer field MUST contain a non-empty X.500 distinguished name (DN). + +This lint does not enforce that the issuer field complies with the rest of +the encoding rules of a certificate issuer name, because it (perhaps wrongly) +assumes that those were checked when the issuer was itself issued, and on all +certificates issued by this CRL issuer. +************************************************/ + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_crl_has_issuer_name", + Description: "The CRL Issuer field MUST contain a non-empty X.500 distinguished name", + Citation: "RFC 5280: 5.1.2.3", + Source: lint.RFC5280, + EffectiveDate: util.RFC5280Date, + }, + Lint: NewCrlHasIssuerName, + }) +} + +func NewCrlHasIssuerName() lint.RevocationListLintInterface { + return &crlHasIssuerName{} +} + +func (l *crlHasIssuerName) CheckApplies(c *x509.RevocationList) bool { + return true +} + +func (l *crlHasIssuerName) Execute(c *x509.RevocationList) *lint.LintResult { + if len(c.Issuer.Names) == 0 { + return &lint.LintResult{ + Status: lint.Error, + Details: "The CRL Issuer field MUST contain a non-empty X.500 distinguished name", + } + } + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/rfc/lint_crl_has_issuer_name_test.go b/linter/lints/rfc/lint_crl_has_issuer_name_test.go new file mode 100644 index 00000000000..ef6dcf38db7 --- /dev/null +++ b/linter/lints/rfc/lint_crl_has_issuer_name_test.go @@ -0,0 +1,46 @@ +package rfc + +import ( + "fmt" + "strings" + "testing" + + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints/test" +) + +func TestCrlHasIssuerName(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + want lint.LintStatus + wantSubStr string + }{ + { + name: "good", + want: lint.Pass, + }, + { + name: "no_issuer_name", + want: lint.Error, + wantSubStr: "MUST contain a non-empty X.500 distinguished name", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + l := NewCrlHasIssuerName() + c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name)) + r := l.Execute(c) + + if r.Status != tc.want { + t.Errorf("expected %q, got %q", tc.want, r.Status) + } + if !strings.Contains(r.Details, tc.wantSubStr) { + t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details) + } + }) + } +} diff --git a/linter/lints/rfc/lint_crl_has_number.go b/linter/lints/rfc/lint_crl_has_number.go new file mode 100644 index 00000000000..3120abd1162 --- /dev/null +++ b/linter/lints/rfc/lint_crl_has_number.go @@ -0,0 +1,67 @@ +package rfc + +import ( + "github.com/zmap/zcrypto/encoding/asn1" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" + + "github.com/letsencrypt/boulder/linter/lints" +) + +type crlHasNumber struct{} + +/************************************************ +RFC 5280: 5.2.3 +CRL issuers conforming to this profile MUST include this extension in all CRLs +and MUST mark this extension as non-critical. Conforming CRL issuers MUST NOT +use CRLNumber values longer than 20 octets. +************************************************/ + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_crl_has_number", + Description: "CRLs must have a well-formed CRL Number extension", + Citation: "RFC 5280: 5.2.3", + Source: lint.RFC5280, + EffectiveDate: util.RFC5280Date, + }, + Lint: NewCrlHasNumber, + }) +} + +func NewCrlHasNumber() lint.RevocationListLintInterface { + return &crlHasNumber{} +} + +func (l *crlHasNumber) CheckApplies(c *x509.RevocationList) bool { + return true +} + +func (l *crlHasNumber) Execute(c *x509.RevocationList) *lint.LintResult { + if c.Number == nil { + return &lint.LintResult{ + Status: lint.Error, + Details: "CRLs MUST include the CRL number extension", + } + } + + crlNumberOID := asn1.ObjectIdentifier{2, 5, 29, 20} // id-ce-cRLNumber + ext := lints.GetExtWithOID(c.Extensions, crlNumberOID) + if ext != nil && ext.Critical { + return &lint.LintResult{ + Status: lint.Error, + Details: "CRL Number MUST NOT be marked critical", + } + } + + numBytes := c.Number.Bytes() + if len(numBytes) > 20 || (len(numBytes) == 20 && numBytes[0]&0x80 != 0) { + return &lint.LintResult{ + Status: lint.Error, + Details: "CRL Number MUST NOT be longer than 20 octets", + } + } + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/rfc/lint_crl_has_number_test.go b/linter/lints/rfc/lint_crl_has_number_test.go new file mode 100644 index 00000000000..a9225aeaca0 --- /dev/null +++ b/linter/lints/rfc/lint_crl_has_number_test.go @@ -0,0 +1,56 @@ +package rfc + +import ( + "fmt" + "strings" + "testing" + + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints/test" +) + +func TestCrlHasNumber(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + want lint.LintStatus + wantSubStr string + }{ + { + name: "good", + want: lint.Pass, + }, + { + name: "no_number", + want: lint.Error, + wantSubStr: "MUST include the CRL number", + }, + { + name: "critical_number", + want: lint.Error, + wantSubStr: "MUST NOT be marked critical", + }, + { + name: "long_number", + want: lint.Error, + wantSubStr: "MUST NOT be longer than 20 octets", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + l := NewCrlHasNumber() + c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name)) + r := l.Execute(c) + + if r.Status != tc.want { + t.Errorf("expected %q, got %q", tc.want, r.Status) + } + if !strings.Contains(r.Details, tc.wantSubStr) { + t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details) + } + }) + } +} diff --git a/linter/lints/rfc/lint_crl_has_valid_timestamps.go b/linter/lints/rfc/lint_crl_has_valid_timestamps.go new file mode 100644 index 00000000000..0546d62c5e7 --- /dev/null +++ b/linter/lints/rfc/lint_crl_has_valid_timestamps.go @@ -0,0 +1,230 @@ +package rfc + +import ( + "errors" + "fmt" + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" +) + +const ( + utcTimeFormat = "YYMMDDHHMMSSZ" + generalizedTimeFormat = "YYYYMMDDHHMMSSZ" +) + +type crlHasValidTimestamps struct{} + +/************************************************ +RFC 5280: 5.1.2.4 +CRL issuers conforming to this profile MUST encode thisUpdate as UTCTime for +dates through the year 2049. CRL issuers conforming to this profile MUST encode +thisUpdate as GeneralizedTime for dates in the year 2050 or later. Conforming +applications MUST be able to process dates that are encoded in either UTCTime or +GeneralizedTime. + +Where encoded as UTCTime, thisUpdate MUST be specified and interpreted as +defined in Section 4.1.2.5.1. Where encoded as GeneralizedTime, thisUpdate MUST +be specified and interpreted as defined in Section 4.1.2.5.2. + +RFC 5280: 5.1.2.5 +CRL issuers conforming to this profile MUST encode nextUpdate as UTCTime for +dates through the year 2049. CRL issuers conforming to this profile MUST encode +nextUpdate as GeneralizedTime for dates in the year 2050 or later. Conforming +applications MUST be able to process dates that are encoded in either UTCTime or +GeneralizedTime. + +Where encoded as UTCTime, nextUpdate MUST be specified and interpreted as +defined in Section 4.1.2.5.1. Where encoded as GeneralizedTime, nextUpdate MUST +be specified and interpreted as defined in Section 4.1.2.5.2. + +RFC 5280: 5.1.2.6 +The time for revocationDate MUST be expressed as described in Section 5.1.2.4. + +RFC 5280: 4.1.2.5.1 +UTCTime values MUST be expressed in Greenwich Mean Time (Zulu) and MUST include +seconds (i.e., times are YYMMDDHHMMSSZ), even where the number of seconds is +zero. + +RFC 5280: 4.1.2.5.2 +GeneralizedTime values MUST be expressed in Greenwich Mean Time (Zulu) and MUST +include seconds (i.e., times are YYYYMMDDHHMMSSZ), even where the number of +seconds is zero. GeneralizedTime values MUST NOT include fractional seconds. +************************************************/ + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_crl_has_valid_timestamps", + Description: "CRL thisUpdate, nextUpdate, and revocationDates must be properly encoded", + Citation: "RFC 5280: 5.1.2.4, 5.1.2.5, and 5.1.2.6", + Source: lint.RFC5280, + EffectiveDate: util.RFC5280Date, + }, + Lint: NewCrlHasValidTimestamps, + }) +} + +func NewCrlHasValidTimestamps() lint.RevocationListLintInterface { + return &crlHasValidTimestamps{} +} + +func (l *crlHasValidTimestamps) CheckApplies(c *x509.RevocationList) bool { + return true +} + +func (l *crlHasValidTimestamps) Execute(c *x509.RevocationList) *lint.LintResult { + input := cryptobyte.String(c.RawTBSRevocationList) + lintFail := lint.LintResult{ + Status: lint.Error, + Details: "Failed to re-parse tbsCertList during linting", + } + + // Read tbsCertList. + var tbs cryptobyte.String + if !input.ReadASN1(&tbs, cryptobyte_asn1.SEQUENCE) { + return &lintFail + } + + // Skip (optional) version. + if !tbs.SkipOptionalASN1(cryptobyte_asn1.INTEGER) { + return &lintFail + } + + // Skip signature. + if !tbs.SkipASN1(cryptobyte_asn1.SEQUENCE) { + return &lintFail + } + + // Skip issuer. + if !tbs.SkipASN1(cryptobyte_asn1.SEQUENCE) { + return &lintFail + } + + // Read thisUpdate. + var thisUpdate cryptobyte.String + var thisUpdateTag cryptobyte_asn1.Tag + if !tbs.ReadAnyASN1Element(&thisUpdate, &thisUpdateTag) { + return &lintFail + } + + // Lint thisUpdate. + err := lintTimestamp(&thisUpdate, thisUpdateTag) + if err != nil { + return &lint.LintResult{Status: lint.Error, Details: err.Error()} + } + + // Peek (optional) nextUpdate. + if tbs.PeekASN1Tag(cryptobyte_asn1.UTCTime) || tbs.PeekASN1Tag(cryptobyte_asn1.GeneralizedTime) { + // Read nextUpdate. + var nextUpdate cryptobyte.String + var nextUpdateTag cryptobyte_asn1.Tag + if !tbs.ReadAnyASN1Element(&nextUpdate, &nextUpdateTag) { + return &lintFail + } + + // Lint nextUpdate. + err = lintTimestamp(&nextUpdate, nextUpdateTag) + if err != nil { + return &lint.LintResult{Status: lint.Error, Details: err.Error()} + } + } + + // Peek (optional) revokedCertificates. + if tbs.PeekASN1Tag(cryptobyte_asn1.SEQUENCE) { + // Read sequence of revokedCertificate. + var revokedSeq cryptobyte.String + if !tbs.ReadASN1(&revokedSeq, cryptobyte_asn1.SEQUENCE) { + return &lintFail + } + + // Iterate over each revokedCertificate sequence. + for !revokedSeq.Empty() { + // Read revokedCertificate. + var certSeq cryptobyte.String + if !revokedSeq.ReadASN1Element(&certSeq, cryptobyte_asn1.SEQUENCE) { + return &lintFail + } + + if !certSeq.ReadASN1(&certSeq, cryptobyte_asn1.SEQUENCE) { + return &lintFail + } + + // Skip userCertificate (serial number). + if !certSeq.SkipASN1(cryptobyte_asn1.INTEGER) { + return &lintFail + } + + // Read revocationDate. + var revocationDate cryptobyte.String + var revocationDateTag cryptobyte_asn1.Tag + if !certSeq.ReadAnyASN1Element(&revocationDate, &revocationDateTag) { + return &lintFail + } + + // Lint revocationDate. + err = lintTimestamp(&revocationDate, revocationDateTag) + if err != nil { + return &lint.LintResult{Status: lint.Error, Details: err.Error()} + } + } + } + return &lint.LintResult{Status: lint.Pass} +} + +func lintTimestamp(der *cryptobyte.String, tag cryptobyte_asn1.Tag) error { + // Preserve the original timestamp for length checking. + derBytes := *der + var tsBytes cryptobyte.String + if !derBytes.ReadASN1(&tsBytes, tag) { + return errors.New("failed to read timestamp") + } + tsLen := len(string(tsBytes)) + + var parsedTime time.Time + switch tag { + case cryptobyte_asn1.UTCTime: + // Verify that the timestamp is properly formatted. + if tsLen != len(utcTimeFormat) { + return fmt.Errorf("timestamps encoded using UTCTime MUST be specified in the format %q", utcTimeFormat) + } + + if !der.ReadASN1UTCTime(&parsedTime) { + return errors.New("failed to read timestamp encoded using UTCTime") + } + + // Verify that the timestamp is prior to the year 2050. This should + // really never happen. + if parsedTime.Year() > 2049 { + return errors.New("ReadASN1UTCTime returned a UTCTime after 2049") + } + case cryptobyte_asn1.GeneralizedTime: + // Verify that the timestamp is properly formatted. + if tsLen != len(generalizedTimeFormat) { + return fmt.Errorf( + "timestamps encoded using GeneralizedTime MUST be specified in the format %q", generalizedTimeFormat, + ) + } + + if !der.ReadASN1GeneralizedTime(&parsedTime) { + return fmt.Errorf("failed to read timestamp encoded using GeneralizedTime") + } + + // Verify that the timestamp occurred after the year 2049. + if parsedTime.Year() < 2050 { + return errors.New("timestamps prior to 2050 MUST be encoded using UTCTime") + } + default: + return errors.New("unsupported time format") + } + + // Verify that the location is UTC. + if parsedTime.Location() != time.UTC { + return errors.New("time must be in UTC") + } + return nil +} diff --git a/linter/lints/rfc/lint_crl_has_valid_timestamps_test.go b/linter/lints/rfc/lint_crl_has_valid_timestamps_test.go new file mode 100644 index 00000000000..137ab89fa4a --- /dev/null +++ b/linter/lints/rfc/lint_crl_has_valid_timestamps_test.go @@ -0,0 +1,64 @@ +package rfc + +import ( + "fmt" + "strings" + "testing" + + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints/test" +) + +func TestCrlHasValidTimestamps(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + want lint.LintStatus + wantSubStr string + }{ + { + name: "good", + want: lint.Pass, + }, + { + name: "good_utctime_1950", + want: lint.Pass, + }, + { + name: "good_gentime_2050", + want: lint.Pass, + }, + { + name: "gentime_2049", + want: lint.Error, + wantSubStr: "timestamps prior to 2050 MUST be encoded using UTCTime", + }, + { + name: "utctime_no_seconds", + want: lint.Error, + wantSubStr: "timestamps encoded using UTCTime MUST be specified in the format \"YYMMDDHHMMSSZ\"", + }, + { + name: "gentime_revoked_2049", + want: lint.Error, + wantSubStr: "timestamps prior to 2050 MUST be encoded using UTCTime", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + l := NewCrlHasValidTimestamps() + c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name)) + r := l.Execute(c) + + if r.Status != tc.want { + t.Errorf("expected %q, got %q", tc.want, r.Status) + } + if !strings.Contains(r.Details, tc.wantSubStr) { + t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details) + } + }) + } +} diff --git a/linter/lints/rfc/lint_crl_no_empty_revoked_certificates_list.go b/linter/lints/rfc/lint_crl_no_empty_revoked_certificates_list.go new file mode 100644 index 00000000000..053da88b890 --- /dev/null +++ b/linter/lints/rfc/lint_crl_no_empty_revoked_certificates_list.go @@ -0,0 +1,46 @@ +package rfc + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" +) + +type crlNoEmptyRevokedCertsList struct{} + +/************************************************ +RFC 5280: 5.1.2.6 +When there are no revoked certificates, the revoked certificates list MUST be +absent. +************************************************/ + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_crl_no_empty_revoked_certificates_list", + Description: "When there are no revoked certificates, the revoked certificates list MUST be absent.", + Citation: "RFC 5280: 5.1.2.6", + Source: lint.RFC5280, + EffectiveDate: util.RFC5280Date, + }, + Lint: NewCrlNoEmptyRevokedCertsList, + }) +} + +func NewCrlNoEmptyRevokedCertsList() lint.RevocationListLintInterface { + return &crlNoEmptyRevokedCertsList{} +} + +func (l *crlNoEmptyRevokedCertsList) CheckApplies(c *x509.RevocationList) bool { + return true +} + +func (l *crlNoEmptyRevokedCertsList) Execute(c *x509.RevocationList) *lint.LintResult { + if c.RevokedCertificates != nil && len(c.RevokedCertificates) == 0 { + return &lint.LintResult{ + Status: lint.Error, + Details: "If the revokedCertificates list is empty, it must not be present", + } + } + return &lint.LintResult{Status: lint.Pass} +} diff --git a/linter/lints/rfc/lint_crl_no_empty_revoked_certificates_list_test.go b/linter/lints/rfc/lint_crl_no_empty_revoked_certificates_list_test.go new file mode 100644 index 00000000000..d0361a812ae --- /dev/null +++ b/linter/lints/rfc/lint_crl_no_empty_revoked_certificates_list_test.go @@ -0,0 +1,50 @@ +package rfc + +import ( + "fmt" + "strings" + "testing" + + "github.com/zmap/zlint/v3/lint" + + "github.com/letsencrypt/boulder/linter/lints/test" +) + +func TestCrlNoEmptyRevokedCertsList(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + want lint.LintStatus + wantSubStr string + }{ + { + name: "good", + want: lint.Pass, + }, + { + name: "none_revoked", + want: lint.Pass, + }, + { + name: "empty_revoked", + want: lint.Error, + wantSubStr: "must not be present", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + l := NewCrlNoEmptyRevokedCertsList() + c := test.LoadPEMCRL(t, fmt.Sprintf("testdata/crl_%s.pem", tc.name)) + r := l.Execute(c) + + if r.Status != tc.want { + t.Errorf("expected %q, got %q", tc.want, r.Status) + } + if !strings.Contains(r.Details, tc.wantSubStr) { + t.Errorf("expected %q, got %q", tc.wantSubStr, r.Details) + } + }) + } +} diff --git a/linter/lints/rfc/lint_crl_via_pkimetal.go b/linter/lints/rfc/lint_crl_via_pkimetal.go new file mode 100644 index 00000000000..c927eebe525 --- /dev/null +++ b/linter/lints/rfc/lint_crl_via_pkimetal.go @@ -0,0 +1,50 @@ +package rfc + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/v3/lint" + "github.com/zmap/zlint/v3/util" +) + +type crlViaPKIMetal struct { + PKIMetalConfig +} + +func init() { + lint.RegisterRevocationListLint(&lint.RevocationListLint{ + LintMetadata: lint.LintMetadata{ + Name: "e_pkimetal_lint_cabf_serverauth_crl", + Description: "Runs pkimetal's suite of cabf serverauth CRL lints", + Citation: "https://github.com/pkimetal/pkimetal", + Source: lint.Community, + EffectiveDate: util.CABEffectiveDate, + }, + Lint: NewCrlViaPKIMetal, + }) +} + +func NewCrlViaPKIMetal() lint.RevocationListLintInterface { + return &crlViaPKIMetal{} +} + +func (l *crlViaPKIMetal) Configure() any { + return l +} + +func (l *crlViaPKIMetal) CheckApplies(c *x509.RevocationList) bool { + // This lint applies to all CRLs issued by Boulder, as long as it has + // been configured with an address to reach out to. If not, skip it. + return l.Addr != "" +} + +func (l *crlViaPKIMetal) Execute(c *x509.RevocationList) *lint.LintResult { + res, err := l.execute("lintcrl", c.Raw) + if err != nil { + return &lint.LintResult{ + Status: lint.Error, + Details: err.Error(), + } + } + + return res +} diff --git a/linter/lints/rfc/testdata/crl_aki_name_and_serial.pem b/linter/lints/rfc/testdata/crl_aki_name_and_serial.pem new file mode 100644 index 00000000000..f223479e218 --- /dev/null +++ b/linter/lints/rfc/testdata/crl_aki_name_and_serial.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBazCB8wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQGgTjBMMDcGA1UdIwQwMC6BFzAVghNp +bnQtZTEuYm91bGRlci50ZXN0ghMCEQChCjEx4ZnD1S6gsNFjWXmlMBEGA1UdFAQK +AggW/0sm37IYDzAKBggqhkjOPQQDAwNnADBkAjBWshNFi60aMWl82G3TwIuwBBK+ +dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQsHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4 +hMicW23oENHmLNZQx9ddruZeFJDqKD2E4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_critical_number.pem b/linter/lints/rfc/testdata/crl_critical_number.pem new file mode 100644 index 00000000000..1fdccc98db7 --- /dev/null +++ b/linter/lints/rfc/testdata/crl_critical_number.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBVjCB3gIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQGgOTA3MB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MBQGA1UdFAEB/wQKAggW/0sm37IYDzAKBggqhkjOPQQD +AwNnADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD +6rFQsHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDq +KD2E4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_empty_revoked.pem b/linter/lints/rfc/testdata/crl_empty_revoked.pem new file mode 100644 index 00000000000..874518ce735 --- /dev/null +++ b/linter/lints/rfc/testdata/crl_empty_revoked.pem @@ -0,0 +1,9 @@ +-----BEGIN X509 CRL----- +MIIBKjCBsgIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjAAoDYwNDAfBgNVHSMEGDAW +gBQB2rt6yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFv9LJt+yGA8wCgYIKoZI +zj0EAwMDZwAwZAIwVrITRYutGjFpfNht08CLsAQSvnc4i6UM0Pi8+U3T8DRHImIi +uB9cQ+qxULB6pKhBAjBbuGCwTop7vCfGO7Fz6N0ruITInFtt6BDR5izWUMfXXa7m +XhSQ6ig9hOHOWRxR00I= +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_gentime_2049.pem b/linter/lints/rfc/testdata/crl_gentime_2049.pem new file mode 100644 index 00000000000..9f41404638f --- /dev/null +++ b/linter/lints/rfc/testdata/crl_gentime_2049.pem @@ -0,0 +1,9 @@ +-----BEGIN X509 CRL----- +MIIBRzCBzwIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRgPMjA0OTA3MDYxNjQzMzhaFw0yMjA3MTUxNjQzMzhaMBswGQIIA65R21EVWjwX +DTIyMDcwNjE1NDMzOFqgNjA0MB8GA1UdIwQYMBaAFAHau3rLJSCOXnnW+ZZCLwJB +KQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNnADBkAjBWshNFi60a +MWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQsHqkqEECMFu4YLBO +inu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_gentime_revoked_2049.pem b/linter/lints/rfc/testdata/crl_gentime_revoked_2049.pem new file mode 100644 index 00000000000..1d411184d65 --- /dev/null +++ b/linter/lints/rfc/testdata/crl_gentime_revoked_2049.pem @@ -0,0 +1,9 @@ +-----BEGIN X509 CRL----- +MIIBSzCB0wIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRgPMjA1MDA3MDYxNjQzMzhaGA8yMDUwMDcxNTE2NDMzOFowHTAbAggDrlHbURVa +PBgPMjA0OTA3MDYxNTQzMzhaoDYwNDAfBgNVHSMEGDAWgBQB2rt6yyUgjl551vmW +Qi8CQSkHvjARBgNVHRQECgIIFv9LJt+yGA8wCgYIKoZIzj0EAwMDZwAwZAIwVrIT +RYutGjFpfNht08CLsAQSvnc4i6UM0Pi8+U3T8DRHImIiuB9cQ+qxULB6pKhBAjBb +uGCwTop7vCfGO7Fz6N0ruITInFtt6BDR5izWUMfXXa7mXhSQ6ig9hOHOWRxR00I= +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_good.pem b/linter/lints/rfc/testdata/crl_good.pem new file mode 100644 index 00000000000..8b383d0a07e --- /dev/null +++ b/linter/lints/rfc/testdata/crl_good.pem @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBmDCCAR8CAQEwCgYIKoZIzj0EAwMwSTELMAkGA1UEBhMCWFgxFTATBgNVBAoT +DEJvdWxkZXIgVGVzdDEjMCEGA1UEAxMaKFRFU1QpIEVsZWdhbnQgRWxlcGhhbnQg +RTEXDTIyMTAxMDIwMTIwN1oXDTIyMTAxOTIwMTIwNlowKTAnAggDrlHbURVaPBcN +MjIxMDEwMTkxMjA3WjAMMAoGA1UdFQQDCgEBoHoweDAfBgNVHSMEGDAWgBQB2rt6 +yyUgjl551vmWQi8CQSkHvjARBgNVHRQECgIIFxzOPeSCumEwQgYDVR0cAQH/BDgw +NqAxoC+GLWh0dHA6Ly9jLmJvdWxkZXIudGVzdC82NjI4Mzc1NjkxMzU4ODI4OC8w +LmNybIEB/zAKBggqhkjOPQQDAwNnADBkAjAvDkIUnTYavJ6h8606MDyFh2uw/cF+ +OVnM4sE8nUdGy0XYg0hGfbR4MY+kRxRQayICMFeQPpcpIr0zgXpP6lUXU0rcLSva +tuaeQSVr24nGjZ7Py0vc94w0n7idZ8wje5+/Mw== +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_good_gentime_2050.pem b/linter/lints/rfc/testdata/crl_good_gentime_2050.pem new file mode 100644 index 00000000000..b837453a605 --- /dev/null +++ b/linter/lints/rfc/testdata/crl_good_gentime_2050.pem @@ -0,0 +1,9 @@ +-----BEGIN X509 CRL----- +MIIBRzCBzwIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRgPMjA1MDA3MDYxNjQzMzhaFw0yMjA3MTUxNjQzMzhaMBswGQIIA65R21EVWjwX +DTIyMDcwNjE1NDMzOFqgNjA0MB8GA1UdIwQYMBaAFAHau3rLJSCOXnnW+ZZCLwJB +KQe+MBEGA1UdFAQKAggW/0sm37IYDzAKBggqhkjOPQQDAwNnADBkAjBWshNFi60a +MWl82G3TwIuwBBK+dziLpQzQ+Lz5TdPwNEciYiK4H1xD6rFQsHqkqEECMFu4YLBO +inu8J8Y7sXPo3Su4hMicW23oENHmLNZQx9ddruZeFJDqKD2E4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_good_utctime_1950.pem b/linter/lints/rfc/testdata/crl_good_utctime_1950.pem new file mode 100644 index 00000000000..a7008944336 --- /dev/null +++ b/linter/lints/rfc/testdata/crl_good_utctime_1950.pem @@ -0,0 +1,9 @@ +-----BEGIN X509 CRL----- +MIIBRTCBzQIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNNTAwNzA2MTY0MzM4WhcNNTAwNzE1MTY0MzM4WjAbMBkCCAOuUdtRFVo8Fw01 +MDA3MDYxNTQzMzhaoDYwNDAfBgNVHSMEGDAWgBQB2rt6yyUgjl551vmWQi8CQSkH +vjARBgNVHRQECgIIFv9LJt+yGA8wCgYIKoZIzj0EAwMDZwAwZAIwVrITRYutGjFp +fNht08CLsAQSvnc4i6UM0Pi8+U3T8DRHImIiuB9cQ+qxULB6pKhBAjBbuGCwTop7 +vCfGO7Fz6N0ruITInFtt6BDR5izWUMfXXa7mXhSQ6ig9hOHOWRxR00I= +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_long_number.pem b/linter/lints/rfc/testdata/crl_long_number.pem new file mode 100644 index 00000000000..e8b855dbe26 --- /dev/null +++ b/linter/lints/rfc/testdata/crl_long_number.pem @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBYDCB6AIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQGgQzBBMB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MB4GA1UdFAQXAhUW/0sm37IYDxb/SybfshgPFv9LJt8w +CgYIKoZIzj0EAwMDZwAwZAIwVrITRYutGjFpfNht08CLsAQSvnc4i6UM0Pi8+U3T +8DRHImIiuB9cQ+qxULB6pKhBAjBbuGCwTop7vCfGO7Fz6N0ruITInFtt6BDR5izW +UMfXXa7mXhSQ6ig9hOHOWRxR00I= +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_no_aki.pem b/linter/lints/rfc/testdata/crl_no_aki.pem new file mode 100644 index 00000000000..a1fdf6e4322 --- /dev/null +++ b/linter/lints/rfc/testdata/crl_no_aki.pem @@ -0,0 +1,9 @@ +-----BEGIN X509 CRL----- +MIIBMjCBugIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQGgFTATMBEGA1UdFAQKAggW/0sm37IY +DzAKBggqhkjOPQQDAwNnADBkAjBWshNFi60aMWl82G3TwIuwBBK+dziLpQzQ+Lz5 +TdPwNEciYiK4H1xD6rFQsHqkqEECMFu4YLBOinu8J8Y7sXPo3Su4hMicW23oENHm +LNZQx9ddruZeFJDqKD2E4c5ZHFHTQg== +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_no_issuer_name.pem b/linter/lints/rfc/testdata/crl_no_issuer_name.pem new file mode 100644 index 00000000000..c45c428c0dc --- /dev/null +++ b/linter/lints/rfc/testdata/crl_no_issuer_name.pem @@ -0,0 +1,8 @@ +-----BEGIN X509 CRL----- +MIIBCjCBkgIBATAKBggqhkjOPQQDAzAAFw0yMjA3MDYxNjQzMzhaFw0yMjA3MTUx +NjQzMzhaMCkwJwIIA65R21EVWjwXDTIyMDcwNjE1NDMzOFowDDAKBgNVHRUEAwoB +AaA2MDQwHwYDVR0jBBgwFoAUAdq7esslII5eedb5lkIvAkEpB74wEQYDVR0UBAoC +CBb/SybfshgPMAoGCCqGSM49BAMDA2cAMGQCMFayE0WLrRoxaXzYbdPAi7AEEr53 +OIulDND4vPlN0/A0RyJiIrgfXEPqsVCweqSoQQIwW7hgsE6Ke7wnxjuxc+jdK7iE +yJxbbegQ0eYs1lDH112u5l4UkOooPYThzlkcUdNC +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_no_number.pem b/linter/lints/rfc/testdata/crl_no_number.pem new file mode 100644 index 00000000000..65578de8bb8 --- /dev/null +++ b/linter/lints/rfc/testdata/crl_no_number.pem @@ -0,0 +1,9 @@ +-----BEGIN X509 CRL----- +MIIBQDCByAIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WjApMCcCCAOuUdtRFVo8Fw0y +MjA3MDYxNTQzMzhaMAwwCgYDVR0VBAMKAQGgIzAhMB8GA1UdIwQYMBaAFAHau3rL +JSCOXnnW+ZZCLwJBKQe+MAoGCCqGSM49BAMDA2cAMGQCMFayE0WLrRoxaXzYbdPA +i7AEEr53OIulDND4vPlN0/A0RyJiIrgfXEPqsVCweqSoQQIwW7hgsE6Ke7wnxjux +c+jdK7iEyJxbbegQ0eYs1lDH112u5l4UkOooPYThzlkcUdNC +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_none_revoked.pem b/linter/lints/rfc/testdata/crl_none_revoked.pem new file mode 100644 index 00000000000..b73885ddb12 --- /dev/null +++ b/linter/lints/rfc/testdata/crl_none_revoked.pem @@ -0,0 +1,9 @@ +-----BEGIN X509 CRL----- +MIIBKDCBsAIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcNMjIwNzE1MTY0MzM4WqA2MDQwHwYDVR0jBBgwFoAU +Adq7esslII5eedb5lkIvAkEpB74wEQYDVR0UBAoCCBb/SybfshgPMAoGCCqGSM49 +BAMDA2cAMGQCMFayE0WLrRoxaXzYbdPAi7AEEr53OIulDND4vPlN0/A0RyJiIrgf +XEPqsVCweqSoQQIwW7hgsE6Ke7wnxjuxc+jdK7iEyJxbbegQ0eYs1lDH112u5l4U +kOooPYThzlkcUdNC +-----END X509 CRL----- diff --git a/linter/lints/rfc/testdata/crl_utctime_no_seconds.pem b/linter/lints/rfc/testdata/crl_utctime_no_seconds.pem new file mode 100644 index 00000000000..af07a12bdea --- /dev/null +++ b/linter/lints/rfc/testdata/crl_utctime_no_seconds.pem @@ -0,0 +1,9 @@ +-----BEGIN X509 CRL----- +MIIBQzCBywIBATAKBggqhkjOPQQDAzBJMQswCQYDVQQGEwJYWDEVMBMGA1UEChMM +Qm91bGRlciBUZXN0MSMwIQYDVQQDExooVEVTVCkgRWxlZ2FudCBFbGVwaGFudCBF +MRcNMjIwNzA2MTY0MzM4WhcLMjIwNzE1MTY0M1owGzAZAggDrlHbURVaPBcNMjIw +NzA2MTU0MzM4WqA2MDQwHwYDVR0jBBgwFoAUAdq7esslII5eedb5lkIvAkEpB74w +EQYDVR0UBAoCCBb/SybfshgPMAoGCCqGSM49BAMDA2cAMGQCMFayE0WLrRoxaXzY +bdPAi7AEEr53OIulDND4vPlN0/A0RyJiIrgfXEPqsVCweqSoQQIwW7hgsE6Ke7wn +xjuxc+jdK7iEyJxbbegQ0eYs1lDH112u5l4UkOooPYThzlkcUdNC +-----END X509 CRL----- diff --git a/linter/lints/root/e_validity_period_greater_than_25_years.go b/linter/lints/root/e_validity_period_greater_than_25_years.go deleted file mode 100644 index 78bc89bd890..00000000000 --- a/linter/lints/root/e_validity_period_greater_than_25_years.go +++ /dev/null @@ -1,47 +0,0 @@ -package subscriber - -import ( - "time" - - "github.com/zmap/zcrypto/x509" - "github.com/zmap/zlint/v3/lint" - "github.com/zmap/zlint/v3/util" - - "github.com/letsencrypt/boulder/linter/lints" -) - -type certValidityTooLong struct{} - -func init() { - lint.RegisterLint(&lint.Lint{ - Name: "e_validity_period_greater_than_25_years", - Description: "Let's Encrypt Root CA Certificates have Validity Periods of up to 25 years", - Citation: "CPS: 7.1", - Source: lints.LetsEncryptCPSRoot, - EffectiveDate: lints.CPSV33Date, - Lint: NewCertValidityTooLong, - }) -} - -func NewCertValidityTooLong() lint.LintInterface { - return &certValidityTooLong{} -} - -func (l *certValidityTooLong) CheckApplies(c *x509.Certificate) bool { - return util.IsRootCA(c) -} - -func (l *certValidityTooLong) Execute(c *x509.Certificate) *lint.LintResult { - // CPS 7.1: "Root CA Certificate Validity Period: Up to 25 years." - maxValidity := 25 * 365 * lints.BRDay - - // RFC 5280 4.1.2.5: "The validity period for a certificate is the period - // of time from notBefore through notAfter, inclusive." - certValidity := c.NotAfter.Add(time.Second).Sub(c.NotBefore) - - if certValidity > maxValidity { - return &lint.LintResult{Status: lint.Error} - } - - return &lint.LintResult{Status: lint.Pass} -} diff --git a/linter/lints/subscriber/e_validity_period_greater_than_100_days.go b/linter/lints/subscriber/e_validity_period_greater_than_100_days.go deleted file mode 100644 index c57f73ab752..00000000000 --- a/linter/lints/subscriber/e_validity_period_greater_than_100_days.go +++ /dev/null @@ -1,47 +0,0 @@ -package subscriber - -import ( - "time" - - "github.com/zmap/zcrypto/x509" - "github.com/zmap/zlint/v3/lint" - "github.com/zmap/zlint/v3/util" - - "github.com/letsencrypt/boulder/linter/lints" -) - -type certValidityTooLong struct{} - -func init() { - lint.RegisterLint(&lint.Lint{ - Name: "e_validity_period_greater_than_100_days", - Description: "Let's Encrypt Subscriber Certificates have Validity Periods of up to 100 days", - Citation: "CPS: 7.1", - Source: lints.LetsEncryptCPSSubscriber, - EffectiveDate: lints.CPSV33Date, - Lint: NewCertValidityTooLong, - }) -} - -func NewCertValidityTooLong() lint.LintInterface { - return &certValidityTooLong{} -} - -func (l *certValidityTooLong) CheckApplies(c *x509.Certificate) bool { - return util.IsServerAuthCert(c) && !c.IsCA -} - -func (l *certValidityTooLong) Execute(c *x509.Certificate) *lint.LintResult { - // CPS 7.1: "DV SSL End Entity Certificate Validity Period: Up to 100 days." - maxValidity := 100 * lints.BRDay - - // RFC 5280 4.1.2.5: "The validity period for a certificate is the period - // of time from notBefore through notAfter, inclusive." - certValidity := c.NotAfter.Add(time.Second).Sub(c.NotBefore) - - if certValidity > maxValidity { - return &lint.LintResult{Status: lint.Error} - } - - return &lint.LintResult{Status: lint.Pass} -} diff --git a/linter/lints/test/README.md b/linter/lints/test/README.md new file mode 100644 index 00000000000..07b0e0e31b4 --- /dev/null +++ b/linter/lints/test/README.md @@ -0,0 +1,35 @@ +# Test Lint CRLs + +The contents of this directory are a variety of PEM-encoded CRLs uses to test +the CRL linting functions in the parent directory. + +To create a new test CRL to exercise a new lint: + +1. Install the `der2text` and `text2der` tools: + + ```sh + $ go install github.com/syncsynchalt/der2text/cmds/text2der@latest + $ go install github.com/syncsynchalt/der2text/cmds/der2text@latest + ``` + +2. Use `der2text` to create an editable version of CRL you want to start with, usually `crl_good.pem`: + + ```sh + $ der2text crl_good.pem > my_new_crl.txt + ``` + +3. Edit the text file. See [the der2text readme](https://github.com/syncsynchalt/der2text) for details about the file format. + +4. Write the new PEM file and run the tests to see if it works! Repeat steps 3 and 4 as necessary until you get the correct result. + + ```sh + $ text2der my_new_crl.txt >| my_new_crl.pem + $ go test .. + ``` + +5. Remove the text file and commit your new CRL. + + ```sh + $ rm my_new_crl.txt + $ git add . + ``` diff --git a/linter/lints/test/helpers.go b/linter/lints/test/helpers.go new file mode 100644 index 00000000000..55badf8be1c --- /dev/null +++ b/linter/lints/test/helpers.go @@ -0,0 +1,23 @@ +package test + +import ( + "encoding/pem" + "os" + "testing" + + "github.com/zmap/zcrypto/x509" + + "github.com/letsencrypt/boulder/test" +) + +func LoadPEMCRL(t *testing.T, filename string) *x509.RevocationList { + t.Helper() + file, err := os.ReadFile(filename) + test.AssertNotError(t, err, "reading CRL file") + block, rest := pem.Decode(file) + test.AssertEquals(t, block.Type, "X509 CRL") + test.AssertEquals(t, len(rest), 0) + crl, err := x509.ParseRevocationList(block.Bytes) + test.AssertNotError(t, err, "parsing CRL bytes") + return crl +} diff --git a/log/log.go b/log/log.go index 441e2cf15c2..e69e0c1c0e1 100644 --- a/log/log.go +++ b/log/log.go @@ -1,6 +1,7 @@ package log import ( + "bytes" "encoding/base64" "encoding/binary" "encoding/json" @@ -10,32 +11,30 @@ import ( "io" "log/syslog" "os" - "path" - "runtime" "strings" "sync" "github.com/jmhodges/clock" + "golang.org/x/term" + + "github.com/letsencrypt/boulder/core" ) // A Logger logs messages with explicit priority levels. It is // implemented by a logging back-end as provided by New() or -// NewMock(). +// NewMock(). Any additions to this interface with format strings should be +// added to the govet configuration in .golangci.yml type Logger interface { - Err(msg string) - Errf(format string, a ...interface{}) + Errf(format string, a ...any) Warning(msg string) - Warningf(format string, a ...interface{}) + Warningf(format string, a ...any) Info(msg string) - Infof(format string, a ...interface{}) + Infof(format string, a ...any) + InfoObject(string, any) Debug(msg string) - Debugf(format string, a ...interface{}) - AuditPanic() - AuditInfo(msg string) - AuditInfof(format string, a ...interface{}) - AuditObject(string, interface{}) - AuditErr(string) - AuditErrf(format string, a ...interface{}) + Debugf(format string, a ...any) + AuditInfo(string, any) + AuditErr(string, error, map[string]any) } // impl implements Logger. @@ -55,20 +54,44 @@ var _Singleton singleton // The constant used to identify audit-specific messages const auditTag = "[AUDIT]" -// New returns a new Logger that uses the given syslog.Writer as a backend. +// New returns a new Logger that uses the given syslog.Writer as a backend +// and also writes to stdout/stderr. It is safe for concurrent use. func New(log *syslog.Writer, stdoutLogLevel int, syslogLogLevel int) (Logger, error) { if log == nil { - return nil, errors.New("Attempted to use a nil System Logger.") + return nil, errors.New("Attempted to use a nil System Logger") } return &impl{ - &bothWriter{log, stdoutLogLevel, syslogLogLevel, clock.New(), os.Stdout}, + &bothWriter{ + sync.Mutex{}, + log, + newStdoutWriter(stdoutLogLevel), + syslogLogLevel, + }, }, nil } -// initialize should only be used in unit tests. +// StdoutLogger returns a Logger that writes solely to stdout and stderr. +// It is safe for concurrent use. +func StdoutLogger(level int) Logger { + return &impl{newStdoutWriter(level)} +} + +func newStdoutWriter(level int) *stdoutWriter { + prefix, clkFormat := getPrefix() + return &stdoutWriter{ + prefix: prefix, + level: level, + clkFormat: clkFormat, + clk: clock.New(), + stdout: os.Stdout, + stderr: os.Stderr, + isatty: term.IsTerminal(int(os.Stdout.Fd())), + } +} + +// initialize is used in unit tests and called by `Get` before the logger +// is fully set up. func initialize() { - // defaultPriority is never used because we always use specific priority-based - // logging methods. const defaultPriority = syslog.LOG_INFO | syslog.LOG_LOCAL0 syslogger, err := syslog.Dial("", "", defaultPriority, "test") if err != nil { @@ -87,7 +110,7 @@ func initialize() { // first time. func Set(logger Logger) (err error) { if _Singleton.log != nil { - err = errors.New("You may not call Set after it has already been implicitly or explicitly set.") + err = errors.New("You may not call Set after it has already been implicitly or explicitly set") _Singleton.log.Warning(err.Error()) } else { _Singleton.log = logger @@ -110,180 +133,234 @@ func Get() Logger { } type writer interface { - logAtLevel(syslog.Priority, string) + logAtLevel(syslog.Priority, string, ...any) } // bothWriter implements writer and writes to both syslog and stdout. type bothWriter struct { + sync.Mutex *syslog.Writer - stdoutLevel int + *stdoutWriter syslogLevel int - clk clock.Clock - stdout io.Writer } +// stdoutWriter implements writer and writes just to stdout. +type stdoutWriter struct { + // prefix is a set of information that is the same for every log line, + // imitating what syslog emits for us when we use the syslog writer. + prefix string + level int + clkFormat string + clk clock.Clock + stdout io.Writer + stderr io.Writer + isatty bool +} + +// LogLineChecksum computes a CRC32 over the log line, which can be checked by +// log-validator to ensure no unexpected log corruption has occurred. func LogLineChecksum(line string) string { crc := crc32.ChecksumIEEE([]byte(line)) - // Using the hash.Hash32 doesn't make this any easier - // as it also returns a uint32 rather than []byte - buf := make([]byte, binary.MaxVarintLen32) - binary.PutUvarint(buf, uint64(crc)) + buf := make([]byte, crc32.Size) + // Error is unreachable because we provide a supported type and buffer size + _, _ = binary.Encode(buf, binary.LittleEndian, crc) return base64.RawURLEncoding.EncodeToString(buf) } -// Log the provided message at the appropriate level, writing to +func checkSummed(msg string) string { + return fmt.Sprintf("%s %s", LogLineChecksum(msg), msg) +} + +// logAtLevel logs the provided message at the appropriate level, writing to // both stdout and the Logger -func (w *bothWriter) logAtLevel(level syslog.Priority, msg string) { - var prefix string +func (w *bothWriter) logAtLevel(level syslog.Priority, msg string, a ...any) { var err error - const red = "\033[31m\033[1m" - const yellow = "\033[33m" + // Apply conditional formatting for f functions + if a != nil { + msg = fmt.Sprintf(msg, a...) + } // Since messages are delimited by newlines, we have to escape any internal or // trailing newlines before generating the checksum or outputting the message. - msg = strings.Replace(msg, "\n", "\\n", -1) - msg = fmt.Sprintf("%s %s", LogLineChecksum(msg), msg) + msg = strings.ReplaceAll(msg, "\n", "\\n") + + w.Lock() + defer w.Unlock() switch syslogAllowed := int(level) <= w.syslogLevel; level { case syslog.LOG_ERR: if syslogAllowed { - err = w.Err(msg) + err = w.Err(checkSummed(msg)) } - prefix = red + "E" case syslog.LOG_WARNING: if syslogAllowed { - err = w.Warning(msg) + err = w.Warning(checkSummed(msg)) } - prefix = yellow + "W" case syslog.LOG_INFO: if syslogAllowed { - err = w.Info(msg) + err = w.Info(checkSummed(msg)) } - prefix = "I" case syslog.LOG_DEBUG: if syslogAllowed { - err = w.Debug(msg) + err = w.Debug(checkSummed(msg)) } - prefix = "D" default: - err = w.Err(fmt.Sprintf("%s (unknown logging level: %d)", msg, int(level))) + err = w.Err(fmt.Sprintf("%s (unknown logging level: %d)", checkSummed(msg), int(level))) } if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to syslog: %s (%s)\n", msg, err) + fmt.Fprintf(os.Stderr, "Failed to write to syslog: %d %s (%s)\n", int(level), checkSummed(msg), err) } - var reset string - if strings.HasPrefix(prefix, "\033") { - reset = "\033[0m" - } - - if int(level) <= w.stdoutLevel { - if _, err := fmt.Fprintf(w.stdout, "%s%s %s %s%s\n", - prefix, - w.clk.Now().Format("150405"), - path.Base(os.Args[0]), - msg, - reset); err != nil { - panic(fmt.Sprintf("failed to write to stdout: %v\n", err)) - } - } + w.stdoutWriter.logAtLevel(level, msg) } -func (log *impl) auditAtLevel(level syslog.Priority, msg string) { - text := fmt.Sprintf("%s %s", auditTag, msg) - log.w.logAtLevel(level, text) -} +// logAtLevel logs the provided message to stdout, or stderr if it is at Warning or Error level. +func (w *stdoutWriter) logAtLevel(level syslog.Priority, msg string, a ...any) { + if int(level) <= w.level { + output := w.stdout + if int(level) <= int(syslog.LOG_WARNING) { + output = w.stderr + } -// AuditPanic catches panicking executables. This method should be added -// in a defer statement as early as possible -func (log *impl) AuditPanic() { - err := recover() - if err != nil { - buf := make([]byte, 8192) - log.AuditErrf("Panic caused by err: %s", err) + // Apply conditional formatting for f functions + if a != nil { + msg = fmt.Sprintf(msg, a...) + } - runtime.Stack(buf, false) - log.AuditErrf("Stack Trace (Current frame) %s", buf) + msg = strings.ReplaceAll(msg, "\n", "\\n") + + var color string + var reset string + + const red = "\033[31m\033[1m" + const yellow = "\033[33m" + const gray = "\033[37m\033[2m" + + if w.isatty { + if int(level) == int(syslog.LOG_DEBUG) { + color = gray + reset = "\033[0m" + } else if int(level) == int(syslog.LOG_WARNING) { + color = yellow + reset = "\033[0m" + } else if int(level) <= int(syslog.LOG_ERR) { + color = red + reset = "\033[0m" + } + } - runtime.Stack(buf, true) - log.Warningf("Stack Trace (All frames): %s", buf) + if _, err := fmt.Fprintf(output, "%s%s %s%d %s %s%s\n", + color, + w.clk.Now().UTC().Format(w.clkFormat), + w.prefix, + int(level), + core.Command(), + checkSummed(msg), + reset); err != nil { + panic(fmt.Sprintf("failed to write to stdout: %v\n", err)) + } } } -// Err level messages are always marked with the audit tag, for special handling -// at the upstream system logger. -func (log *impl) Err(msg string) { - log.auditAtLevel(syslog.LOG_ERR, msg) +func (log *impl) auditAtLevel(level syslog.Priority, msg string) { + msg = fmt.Sprintf("%s %s", auditTag, msg) + log.w.logAtLevel(level, msg) } // Errf level messages are always marked with the audit tag, for special handling // at the upstream system logger. -func (log *impl) Errf(format string, a ...interface{}) { - log.Err(fmt.Sprintf(format, a...)) +func (log *impl) Errf(format string, a ...any) { + log.w.logAtLevel(syslog.LOG_ERR, format, a...) } // Warning level messages pass through normally. func (log *impl) Warning(msg string) { - log.w.logAtLevel(syslog.LOG_WARNING, msg) + log.Warningf(msg) } // Warningf level messages pass through normally. -func (log *impl) Warningf(format string, a ...interface{}) { - log.Warning(fmt.Sprintf(format, a...)) +func (log *impl) Warningf(format string, a ...any) { + log.w.logAtLevel(syslog.LOG_WARNING, format, a...) } // Info level messages pass through normally. func (log *impl) Info(msg string) { - log.w.logAtLevel(syslog.LOG_INFO, msg) + log.Infof(msg) } // Infof level messages pass through normally. -func (log *impl) Infof(format string, a ...interface{}) { - log.Info(fmt.Sprintf(format, a...)) +func (log *impl) Infof(format string, a ...any) { + log.w.logAtLevel(syslog.LOG_INFO, format, a...) } -// Debug level messages pass through normally. -func (log *impl) Debug(msg string) { - log.w.logAtLevel(syslog.LOG_DEBUG, msg) -} +// InfoObject logs an INFO level JSON-serialized object message. +func (log *impl) InfoObject(msg string, obj any) { + jsonObj, err := formatObj(obj) + if err != nil { + log.auditAtLevel(syslog.LOG_ERR, fmt.Sprintf("Object for msg %q could not be serialized to JSON. Raw: %+v", msg, obj)) + return + } -// Debugf level messages pass through normally. -func (log *impl) Debugf(format string, a ...interface{}) { - log.Debug(fmt.Sprintf(format, a...)) + log.Infof("%s JSON=%s", msg, jsonObj) } -// AuditInfo sends an INFO-severity message that is prefixed with the -// audit tag, for special handling at the upstream system logger. -func (log *impl) AuditInfo(msg string) { - log.auditAtLevel(syslog.LOG_INFO, msg) +// Debug level messages pass through normally. +func (log *impl) Debug(msg string) { + log.Debugf(msg) + } -// AuditInfof sends an INFO-severity message that is prefixed with the -// audit tag, for special handling at the upstream system logger. -func (log *impl) AuditInfof(format string, a ...interface{}) { - log.AuditInfo(fmt.Sprintf(format, a...)) +// Debugf level messages pass through normally. +func (log *impl) Debugf(format string, a ...any) { + log.w.logAtLevel(syslog.LOG_DEBUG, format, a...) } -// AuditObject sends an INFO-severity JSON-serialized object message that is prefixed +// AuditInfo sends an INFO-severity JSON-serialized object message that is prefixed // with the audit tag, for special handling at the upstream system logger. -func (log *impl) AuditObject(msg string, obj interface{}) { - jsonObj, err := json.Marshal(obj) +func (log *impl) AuditInfo(msg string, obj any) { + jsonObj, err := formatObj(obj) if err != nil { - log.auditAtLevel(syslog.LOG_ERR, fmt.Sprintf("Object could not be serialized to JSON. Raw: %+v", obj)) + log.auditAtLevel(syslog.LOG_ERR, fmt.Sprintf("Object for msg %q could not be serialized to JSON. Raw: %+v", msg, obj)) return } log.auditAtLevel(syslog.LOG_INFO, fmt.Sprintf("%s JSON=%s", msg, jsonObj)) } -// AuditErr can format an error for auditing; it does so at ERR level. -func (log *impl) AuditErr(msg string) { - log.auditAtLevel(syslog.LOG_ERR, msg) +// AuditErr sends an ERROR-level JSON-serialized message that is prefixed with +// the audit tag. It restricts its last argument to map[string]any, rather than +// allowing any struct at all like AuditInfo, so that it can add the given error +// to that map under the key "error". +func (log *impl) AuditErr(msg string, err error, obj map[string]any) { + if err != nil { + if obj == nil { + obj = make(map[string]any) + } + obj["error"] = err.Error() + } + + jsonObj, err := formatObj(obj) + if err != nil { + log.auditAtLevel(syslog.LOG_ERR, fmt.Sprintf("Object for msg %q could not be serialized to JSON. Raw: %+v", msg, obj)) + return + } + + log.auditAtLevel(syslog.LOG_ERR, fmt.Sprintf("%s JSON=%s", msg, jsonObj)) } -// AuditErrf can format an error for auditing; it does so at ERR level. -func (log *impl) AuditErrf(format string, a ...interface{}) { - log.AuditErr(fmt.Sprintf(format, a...)) +// formatObj marshals any object to json. It's the equivalent of json.Marshal, +// except that it doesn't escape <, >, and &, and it doesn't include the +// trailing newline. Code based on appendJSONMarshal from the slog package. +func formatObj(obj any) (string, error) { + var bb bytes.Buffer + enc := json.NewEncoder(&bb) + enc.SetEscapeHTML(false) + err := enc.Encode(obj) + if err != nil { + return "", err + } + bs := bb.String() + return strings.TrimRight(bs, "\n"), nil } diff --git a/log/log_test.go b/log/log_test.go index 6ba6e9d6c46..295c9290031 100644 --- a/log/log_test.go +++ b/log/log_test.go @@ -3,15 +3,16 @@ package log import ( "bytes" "fmt" - "log" "log/syslog" "net" "os" "strings" + "sync" "testing" "time" "github.com/jmhodges/clock" + "github.com/letsencrypt/boulder/test" ) @@ -73,85 +74,67 @@ func TestEmit(t *testing.T) { t.Parallel() log := setup(t) - log.AuditInfo("test message") + log.AuditInfo("test message", nil) } func TestEmitEmpty(t *testing.T) { t.Parallel() log := setup(t) - log.AuditInfo("") + log.AuditInfo("", nil) } -func ExampleLogger() { - // Write all logs to UDP on a high port so as to not bother the system - // which is running the test - writer, err := syslog.Dial("udp", "127.0.0.1:65530", syslog.LOG_INFO|syslog.LOG_LOCAL0, "") - if err != nil { - log.Fatal(err) +func TestStdoutLogger(t *testing.T) { + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + logger := &impl{ + &stdoutWriter{ + prefix: "prefix ", + level: 7, + clkFormat: "2006-01-02", + clk: clock.NewFake(), + stdout: stdout, + stderr: stderr, + }, } - logger, err := New(writer, stdoutLevel, syslogLevel) - if err != nil { - log.Fatal(err) - } - impl, ok := logger.(*impl) - if !ok { - log.Fatalf("Wrong type returned from New: %T", logger) - } + logger.AuditErr("Error Audit", fmt.Errorf("oops"), nil) + logger.Warning("Warning log") + logger.Info("Info log") - bw, ok := impl.w.(*bothWriter) - if !ok { - log.Fatalf("Wrong type of impl's writer: %T\n", impl.w) - } - bw.clk = clock.NewFake() - impl.AuditErr("Error Audit") - impl.Warning("Warning Audit") - // Output: - // E000000 log.test 46_ghQg [AUDIT] Error Audit - // W000000 log.test 9rr1xwQ Warning Audit + test.AssertEquals(t, stdout.String(), "1970-01-01 prefix 6 log.test JSP6nQ Info log\n") + test.AssertEquals(t, stderr.String(), "1970-01-01 prefix 3 log.test d_ZkUQ [AUDIT] Error Audit JSON={\"error\":\"oops\"}\n1970-01-01 prefix 4 log.test d52dyA Warning log\n") } func TestSyslogMethods(t *testing.T) { t.Parallel() impl := setup(t) - impl.AuditInfo("audit-logger_test.go: audit-info") - impl.AuditErr("audit-logger_test.go: audit-err") + impl.AuditInfo("audit-logger_test.go: audit-info", map[string]any{"key": "value"}) + impl.AuditErr("audit-logger_test.go: audit-err", fmt.Errorf("oops"), map[string]any{"key": "value"}) impl.Debug("audit-logger_test.go: debug") - impl.Err("audit-logger_test.go: err") impl.Info("audit-logger_test.go: info") impl.Warning("audit-logger_test.go: warning") - impl.AuditInfof("audit-logger_test.go: %s", "audit-info") - impl.AuditErrf("audit-logger_test.go: %s", "audit-err") impl.Debugf("audit-logger_test.go: %s", "debug") impl.Errf("audit-logger_test.go: %s", "err") impl.Infof("audit-logger_test.go: %s", "info") impl.Warningf("audit-logger_test.go: %s", "warning") } -func TestPanic(t *testing.T) { - t.Parallel() - impl := setup(t) - defer impl.AuditPanic() - panic("Test panic") - // Can't assert anything here or golint gets angry -} - -func TestAuditObject(t *testing.T) { +func TestAuditInfo(t *testing.T) { t.Parallel() log := NewMock() // Test a simple object - log.AuditObject("Prefix", "String") + log.AuditInfo("Prefix", "String") if len(log.GetAllMatching("[AUDIT]")) != 1 { t.Errorf("Failed to audit log simple object") } // Test a system object log.Clear() - log.AuditObject("Prefix", t) + log.AuditInfo("Prefix", t) if len(log.GetAllMatching("[AUDIT]")) != 1 { t.Errorf("Failed to audit log system object") } @@ -163,19 +146,32 @@ func TestAuditObject(t *testing.T) { B string } var valid = validObj{A: "B", B: "C"} - log.AuditObject("Prefix", valid) + log.AuditInfo("Prefix", valid) if len(log.GetAllMatching("[AUDIT]")) != 1 { t.Errorf("Failed to audit log complex object") } + // Test a map + log.Clear() + log.AuditInfo("Prefix", map[string]any{"hello": "world", "number": 123}) + if len(log.GetAllMatching("[AUDIT]")) != 1 { + t.Errorf("Failed to audit log map") + } + + // Test a nil object + log.Clear() + log.AuditInfo("Prefix", nil) + if len(log.GetAllMatching("[AUDIT]")) != 1 { + t.Errorf("Failed to audit nil object") + } + // Test logging an unserializable object log.Clear() type invalidObj struct { A chan string } - var invalid = invalidObj{A: make(chan string)} - log.AuditObject("Prefix", invalid) + log.AuditInfo("Prefix", invalid) if len(log.GetAllMatching("[AUDIT]")) != 1 { t.Errorf("Failed to audit log unserializable object %v", log.GetAllMatching("[AUDIT]")) } @@ -200,11 +196,11 @@ func TestTransmission(t *testing.T) { data := make([]byte, 128) - impl.AuditInfo("audit-logger_test.go: audit-info") + impl.AuditInfo("audit-logger_test.go: audit-info", map[string]any{"key": "value"}) _, _, err = l.ReadFrom(data) test.AssertNotError(t, err, "Failed to find packet") - impl.AuditErr("audit-logger_test.go: audit-err") + impl.AuditErr("audit-logger_test.go: audit-err", fmt.Errorf("oops"), nil) _, _, err = l.ReadFrom(data) test.AssertNotError(t, err, "Failed to find packet") @@ -212,10 +208,6 @@ func TestTransmission(t *testing.T) { _, _, err = l.ReadFrom(data) test.AssertNotError(t, err, "Failed to find packet") - impl.Err("audit-logger_test.go: err") - _, _, err = l.ReadFrom(data) - test.AssertNotError(t, err, "Failed to find packet") - impl.Info("audit-logger_test.go: info") _, _, err = l.ReadFrom(data) test.AssertNotError(t, err, "Failed to find packet") @@ -224,14 +216,6 @@ func TestTransmission(t *testing.T) { _, _, err = l.ReadFrom(data) test.AssertNotError(t, err, "Failed to find packet") - impl.AuditInfof("audit-logger_test.go: %s", "audit-info") - _, _, err = l.ReadFrom(data) - test.AssertNotError(t, err, "Failed to find packet") - - impl.AuditErrf("audit-logger_test.go: %s", "audit-err") - _, _, err = l.ReadFrom(data) - test.AssertNotError(t, err, "Failed to find packet") - impl.Debugf("audit-logger_test.go: %s", "debug") _, _, err = l.ReadFrom(data) test.AssertNotError(t, err, "Failed to find packet") @@ -339,13 +323,46 @@ func TestStdoutFailure(t *testing.T) { }() // Try to audit log something - log.AuditInfo("This should cause a panic, stdout is closed!") + log.AuditInfo("This should cause a panic, stdout is closed!", nil) } func TestLogAtLevelEscapesNewlines(t *testing.T) { var buf bytes.Buffer - w := &bothWriter{nil, 6, 0, clock.New(), &buf} + w := &bothWriter{sync.Mutex{}, + nil, + &stdoutWriter{ + stdout: &buf, + clk: clock.NewFake(), + level: 6, + }, + -1, + } w.logAtLevel(6, "foo\nbar") test.Assert(t, strings.Contains(buf.String(), "foo\\nbar"), "failed to escape newline") } + +func TestLogLineChecksum(t *testing.T) { + testCases := []struct { + name string + function func(string) string + input string + expected string + }{ + { + name: "LogLineChecksum with Hello, World!", + function: LogLineChecksum, + input: "Hello, World!", + expected: "0MNK7A", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + checksum := tc.function(tc.input) + if checksum != tc.expected { + t.Fatalf("got %q, want %q", checksum, tc.expected) + } + }) + } +} diff --git a/log/mock.go b/log/mock.go index 97511cc5d85..289c09ec1af 100644 --- a/log/mock.go +++ b/log/mock.go @@ -4,7 +4,7 @@ import ( "fmt" "log/syslog" "regexp" - "time" + "strings" ) // UseMock sets a mock logger as the default logger, and returns it. @@ -19,13 +19,6 @@ func NewMock() *Mock { return &Mock{impl{newMockWriter()}} } -// NewWaitingMock creates a mock logger implementing the writer interface. -// It stores all logged messages in a buffer for inspection by test -// functions. -func NewWaitingMock() *WaitingMock { - return &WaitingMock{impl{newWaitingMockWriter()}} -} - // Mock is a logger that stores all log messages in memory to be examined by a // test. type Mock struct { @@ -55,8 +48,8 @@ var levelName = map[syslog.Priority]string{ syslog.LOG_DEBUG: "DEBUG", } -func (w *mockWriter) logAtLevel(p syslog.Priority, msg string) { - w.msgChan <- fmt.Sprintf("%s: %s", levelName[p&7], msg) +func (w *mockWriter) logAtLevel(p syslog.Priority, msg string, a ...any) { + w.msgChan <- fmt.Sprintf("%s: %s", levelName[p&7], fmt.Sprintf(msg, a...)) } // newMockWriter returns a new mockWriter @@ -116,44 +109,16 @@ func (m *Mock) GetAllMatching(reString string) []string { return matches } +func (m *Mock) ExpectMatch(reString string) error { + results := m.GetAllMatching(reString) + if len(results) == 0 { + return fmt.Errorf("expected log line %q, got %q", reString, strings.Join(m.GetAll(), "\n")) + } + return nil +} + // Clear resets the log buffer. func (m *Mock) Clear() { w := m.w.(*mockWriter) w.clearChan <- struct{}{} } - -type waitingMockWriter struct { - logChan chan string -} - -// newWaitingMockWriter returns a new waitingMockWriter -func newWaitingMockWriter() *waitingMockWriter { - logChan := make(chan string, 1000) - return &waitingMockWriter{ - logChan, - } -} - -func (m *waitingMockWriter) logAtLevel(p syslog.Priority, msg string) { - m.logChan <- fmt.Sprintf("%s: %s", levelName[p&7], msg) -} - -// WaitForMatch returns the first log line matching a regex. It accepts a -// regexp string and timeout. If the timeout value is met before the -// matching pattern is read from the channel, an error is returned. -func (m *WaitingMock) WaitForMatch(reString string, timeout time.Duration) (string, error) { - w := m.w.(*waitingMockWriter) - deadline := time.After(timeout) - re := regexp.MustCompile(reString) - for { - select { - case logLine := <-w.logChan: - if re.MatchString(logLine) { - close(w.logChan) - return logLine, nil - } - case <-deadline: - return "", fmt.Errorf("timeout waiting for match: %q", reString) - } - } -} diff --git a/log/prod_prefix.go b/log/prod_prefix.go new file mode 100644 index 00000000000..b4cf55daff5 --- /dev/null +++ b/log/prod_prefix.go @@ -0,0 +1,31 @@ +//go:build !integration + +package log + +import ( + "fmt" + "os" + "strings" + + "github.com/letsencrypt/boulder/core" +) + +// getPrefix returns the prefix and clkFormat that should be used by the +// stdout logger. +func getPrefix() (string, string) { + shortHostname := "unknown" + datacenter := "unknown" + hostname, err := os.Hostname() + if err == nil { + splits := strings.SplitN(hostname, ".", 3) + shortHostname = splits[0] + if len(splits) > 1 { + datacenter = splits[1] + } + } + + prefix := fmt.Sprintf("%s %s %s[%d]: ", shortHostname, datacenter, core.Command(), os.Getpid()) + clkFormat := "2006-01-02T15:04:05.000000+00:00Z" + + return prefix, clkFormat +} diff --git a/log/test_prefix.go b/log/test_prefix.go new file mode 100644 index 00000000000..d1fb8949127 --- /dev/null +++ b/log/test_prefix.go @@ -0,0 +1,9 @@ +//go:build integration + +package log + +// getPrefix returns the prefix and clkFormat that should be used by the +// stdout logger. +func getPrefix() (string, string) { + return "", "15:04:05.000000" +} diff --git a/log/validator/tail_logger.go b/log/validator/tail_logger.go new file mode 100644 index 00000000000..0e1a9999274 --- /dev/null +++ b/log/validator/tail_logger.go @@ -0,0 +1,40 @@ +package validator + +import ( + "fmt" + + "github.com/letsencrypt/boulder/log" +) + +// tailLogger is an adapter to the nxadm/tail module's logging interface. +type tailLogger struct { + log.Logger +} + +func (tl tailLogger) Fatal(v ...any) { + tl.Errf(fmt.Sprint(v...)) //nolint: govet // just passing through +} +func (tl tailLogger) Fatalf(format string, v ...any) { + tl.Errf(format, v...) +} +func (tl tailLogger) Fatalln(v ...any) { + tl.Errf(fmt.Sprint(v...) + "\n") //nolint: govet // just passing through +} +func (tl tailLogger) Panic(v ...any) { + tl.Errf(fmt.Sprint(v...)) //nolint: govet // just passing through +} +func (tl tailLogger) Panicf(format string, v ...any) { + tl.Errf(format, v...) +} +func (tl tailLogger) Panicln(v ...any) { + tl.Errf(fmt.Sprint(v...) + "\n") //nolint: govet // just passing through +} +func (tl tailLogger) Print(v ...any) { + tl.Info(fmt.Sprint(v...)) +} +func (tl tailLogger) Printf(format string, v ...any) { + tl.Infof(format, v...) +} +func (tl tailLogger) Println(v ...any) { + tl.Info(fmt.Sprint(v...) + "\n") +} diff --git a/log/validator/validator.go b/log/validator/validator.go new file mode 100644 index 00000000000..5f309e5ae16 --- /dev/null +++ b/log/validator/validator.go @@ -0,0 +1,236 @@ +package validator + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/nxadm/tail" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/letsencrypt/boulder/log" +) + +var errInvalidChecksum = errors.New("invalid checksum length") + +type Validator struct { + // mu guards patterns and tailers to prevent Shutdown racing monitor + mu sync.Mutex + + // patterns is the list of glob patterns to monitor with filepath.Glob for logs + patterns []string + + // tailers is a map of filenames to the tailer which are currently being tailed + tailers map[string]*tail.Tail + + // monitorCancel cancels the monitor's context, so it exits + monitorCancel context.CancelFunc + + lineCounter *prometheus.CounterVec + log log.Logger +} + +// New Validator monitoring paths, which is a list of file globs. +func New(patterns []string, logger log.Logger, stats prometheus.Registerer) *Validator { + lineCounter := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ + Name: "log_lines", + Help: "A counter of log lines processed, with status", + }, []string{"filename", "status"}) + + monitorContext, monitorCancel := context.WithCancel(context.Background()) + + v := &Validator{ + patterns: patterns, + tailers: map[string]*tail.Tail{}, + log: logger, + monitorCancel: monitorCancel, + lineCounter: lineCounter, + } + + go v.monitor(monitorContext) + + return v +} + +// pollPaths expands v.patterns and calls v.tailValidateFile on each resulting file +func (v *Validator) pollPaths() { + v.mu.Lock() + defer v.mu.Unlock() + for _, pattern := range v.patterns { + paths, err := filepath.Glob(pattern) + if err != nil { + v.log.Errf("expanding file glob: %s", err) + } + + for _, path := range paths { + if _, ok := v.tailers[path]; ok { + // We are already tailing this file + continue + } + + t, err := tail.TailFile(path, tail.Config{ + ReOpen: true, + MustExist: false, // sometimes files won't exist, so we must tolerate that + Follow: true, + Logger: tailLogger{v.log}, + CompleteLines: true, + }) + if err != nil { + // TailFile shouldn't error when MustExist is false + v.log.Errf("unexpected error from TailFile: %v", err) + } + + go v.tailValidate(path, t.Lines) + + v.tailers[path] = t + } + } +} + +// Monitor calls v.pollPaths every minute until its context is cancelled +func (v *Validator) monitor(ctx context.Context) { + for { + v.pollPaths() + + // Wait a minute, unless cancelled + timer := time.NewTimer(time.Minute) + select { + case <-ctx.Done(): + return + case <-timer.C: + } + } +} + +func (v *Validator) tailValidate(filename string, lines chan *tail.Line) { + // Emit no more than 1 error line per second. This prevents consuming large + // amounts of disk space in case there is problem that causes all log lines to + // be invalid. + outputLimiter := time.NewTicker(time.Second) + defer outputLimiter.Stop() + + for line := range lines { + if line.Err != nil { + v.log.Errf("error while tailing %s: %s", filename, line.Err) + continue + } + err := lineValid(line.Text) + if err != nil { + if errors.Is(err, errInvalidChecksum) { + v.lineCounter.WithLabelValues(filename, "invalid checksum length").Inc() + } else { + v.lineCounter.WithLabelValues(filename, "bad").Inc() + } + select { + case <-outputLimiter.C: + v.log.Errf("%s: %s %q", filename, err, line.Text) + default: + } + } else { + v.lineCounter.WithLabelValues(filename, "ok").Inc() + } + } +} + +// Shutdown should be called before process shutdown +func (v *Validator) Shutdown() { + v.mu.Lock() + defer v.mu.Unlock() + + v.monitorCancel() + + for _, t := range v.tailers { + // The tail module seems to have a race condition that will generate + // errors like this on shutdown: + // failed to stop tailing file: : Failed to detect creation of + // : inotify watcher has been closed + // This is probably related to the module's shutdown logic triggering the + // "reopen" code path for files that are removed and then recreated. + // These errors are harmless so we ignore them to allow clean shutdown. + _ = t.Stop() + t.Cleanup() + } +} + +func lineValid(text string) error { + // Line format should match the following rsyslog omfile template: + // + // template( name="LELogFormat" type="list" ) { + // property(name="timereported" dateFormat="rfc3339") + // constant(value=" ") + // property(name="hostname" field.delimiter="46" field.number="1") + // constant(value=" datacenter ") + // property(name="syslogseverity") + // constant(value=" ") + // property(name="syslogtag") + // property(name="msg" spifno1stsp="on" ) + // property(name="msg" droplastlf="on" ) + // constant(value="\n") + // } + // + // This should result in a log line that looks like this: + // timestamp hostname datacenter syslogseverity binary-name[pid]: checksum msg + + fields := strings.Split(text, " ") + const errorPrefix = "log-validator:" + // Extract checksum from line + if len(fields) < 6 { + return fmt.Errorf("%s line doesn't match expected format", errorPrefix) + } + checksum := fields[5] + _, err := base64.RawURLEncoding.DecodeString(checksum) + if err != nil || len(checksum) != 6 { + return fmt.Errorf( + "%s expected a 6 character base64 raw URL decodable string, got %q: %w", + errorPrefix, + checksum, + errInvalidChecksum, + ) + } + + // Reconstruct just the message portion of the line + line := strings.Join(fields[6:], " ") + + // If we are fed our own output, treat it as always valid. This + // prevents runaway scenarios where we generate ever-longer output. + if strings.Contains(text, errorPrefix) { + return nil + } + // Check the extracted checksum against the computed checksum + computedChecksum := log.LogLineChecksum(line) + if checksum != computedChecksum { + return fmt.Errorf("%s invalid checksum (expected %q, got %q)", errorPrefix, computedChecksum, checksum) + } + return nil +} + +// ValidateFile validates a single file and returns +func ValidateFile(filename string) error { + file, err := os.ReadFile(filename) + if err != nil { + return err + } + badFile := false + for i, line := range strings.Split(string(file), "\n") { + if line == "" { + continue + } + err := lineValid(line) + if err != nil { + badFile = true + fmt.Fprintf(os.Stderr, "[line %d] %s: %s\n", i+1, err, line) + } + } + + if badFile { + return errors.New("file contained invalid lines") + } + return nil +} diff --git a/log/validator/validator_test.go b/log/validator/validator_test.go new file mode 100644 index 00000000000..72c8de08b65 --- /dev/null +++ b/log/validator/validator_test.go @@ -0,0 +1,37 @@ +package validator + +import ( + "testing" + + "github.com/letsencrypt/boulder/test" +) + +func TestLineValidAcceptsNew(t *testing.T) { + err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: kJBuDg Caught SIGTERM") + test.AssertNotError(t, err, "errored on valid checksum") +} + +func TestLineValidRejectsOld(t *testing.T) { + err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: kKG6cwA Caught SIGTERM") + test.AssertError(t, err, "didn't error on old checksum format") +} + +func TestLineValidRejects(t *testing.T) { + err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: xxxxxx Caught SIGTERM") + test.AssertError(t, err, "didn't error on invalid checksum") +} + +func TestLineValidRejectsNotAChecksum(t *testing.T) { + err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: xxxx Caught SIGTERM") + test.AssertError(t, err, "didn't error on invalid checksum") + test.AssertErrorIs(t, err, errInvalidChecksum) +} + +func TestLineValidNonOurobouros(t *testing.T) { + err := lineValid("2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 boulder-wfe[1595]: xxxxxx Caught SIGTERM") + test.AssertError(t, err, "didn't error on invalid checksum") + + selfOutput := "2020-07-06T18:07:43.109389+00:00 70877f679c72 datacenter 6 log-validator[1337]: xxxxxx " + err.Error() + err2 := lineValid(selfOutput) + test.AssertNotError(t, err2, "expected no error when feeding lineValid's error output into itself") +} diff --git a/mail/mailer.go b/mail/mailer.go deleted file mode 100644 index b20de9496da..00000000000 --- a/mail/mailer.go +++ /dev/null @@ -1,406 +0,0 @@ -package mail - -import ( - "bytes" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io" - "math" - "math/big" - "mime/quotedprintable" - "net" - "net/mail" - "net/smtp" - "net/textproto" - "strconv" - "strings" - "syscall" - "time" - - "github.com/jmhodges/clock" - "github.com/prometheus/client_golang/prometheus" - - "github.com/letsencrypt/boulder/core" - blog "github.com/letsencrypt/boulder/log" -) - -type idGenerator interface { - generate() *big.Int -} - -var maxBigInt = big.NewInt(math.MaxInt64) - -type realSource struct{} - -func (s realSource) generate() *big.Int { - randInt, err := rand.Int(rand.Reader, maxBigInt) - if err != nil { - panic(err) - } - return randInt -} - -// Mailer provides the interface for a mailer -type Mailer interface { - SendMail([]string, string, string) error - Connect() error - Close() error -} - -// MailerImpl defines a mail transfer agent to use for sending mail. It is not -// safe for concurrent access. -type MailerImpl struct { - log blog.Logger - dialer dialer - from mail.Address - client smtpClient - clk clock.Clock - csprgSource idGenerator - reconnectBase time.Duration - reconnectMax time.Duration - sendMailAttempts *prometheus.CounterVec -} - -type dialer interface { - Dial() (smtpClient, error) -} - -type smtpClient interface { - Mail(string) error - Rcpt(string) error - Data() (io.WriteCloser, error) - Reset() error - Close() error -} - -type dryRunClient struct { - log blog.Logger -} - -func (d dryRunClient) Dial() (smtpClient, error) { - return d, nil -} - -func (d dryRunClient) Mail(from string) error { - d.log.Debugf("MAIL FROM:<%s>", from) - return nil -} - -func (d dryRunClient) Rcpt(to string) error { - d.log.Debugf("RCPT TO:<%s>", to) - return nil -} - -func (d dryRunClient) Close() error { - return nil -} - -func (d dryRunClient) Data() (io.WriteCloser, error) { - return d, nil -} - -func (d dryRunClient) Write(p []byte) (n int, err error) { - for _, line := range strings.Split(string(p), "\n") { - d.log.Debugf("data: %s", line) - } - return len(p), nil -} - -func (d dryRunClient) Reset() (err error) { - d.log.Debugf("RESET") - return nil -} - -// New constructs a Mailer to represent an account on a particular mail -// transfer agent. -func New( - server, - port, - username, - password string, - rootCAs *x509.CertPool, - from mail.Address, - logger blog.Logger, - stats prometheus.Registerer, - reconnectBase time.Duration, - reconnectMax time.Duration) *MailerImpl { - - sendMailAttempts := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "send_mail_attempts", - Help: "A counter of send mail attempts labelled by result", - }, []string{"result", "error"}) - stats.MustRegister(sendMailAttempts) - - return &MailerImpl{ - dialer: &dialerImpl{ - username: username, - password: password, - server: server, - port: port, - rootCAs: rootCAs, - }, - log: logger, - from: from, - clk: clock.New(), - csprgSource: realSource{}, - reconnectBase: reconnectBase, - reconnectMax: reconnectMax, - sendMailAttempts: sendMailAttempts, - } -} - -// New constructs a Mailer suitable for doing a dry run. It simply logs each -// command that would have been run, at debug level. -func NewDryRun(from mail.Address, logger blog.Logger) *MailerImpl { - return &MailerImpl{ - dialer: dryRunClient{logger}, - from: from, - clk: clock.New(), - csprgSource: realSource{}, - sendMailAttempts: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "send_mail_attempts", - Help: "A counter of send mail attempts labelled by result", - }, []string{"result", "error"}), - } -} - -func (m *MailerImpl) generateMessage(to []string, subject, body string) ([]byte, error) { - mid := m.csprgSource.generate() - now := m.clk.Now().UTC() - addrs := []string{} - for _, a := range to { - if !core.IsASCII(a) { - return nil, fmt.Errorf("Non-ASCII email address") - } - addrs = append(addrs, strconv.Quote(a)) - } - headers := []string{ - fmt.Sprintf("To: %s", strings.Join(addrs, ", ")), - fmt.Sprintf("From: %s", m.from.String()), - fmt.Sprintf("Subject: %s", subject), - fmt.Sprintf("Date: %s", now.Format(time.RFC822)), - fmt.Sprintf("Message-Id: <%s.%s.%s>", now.Format("20060102T150405"), mid.String(), m.from.Address), - "MIME-Version: 1.0", - "Content-Type: text/plain; charset=UTF-8", - "Content-Transfer-Encoding: quoted-printable", - } - for i := range headers[1:] { - // strip LFs - headers[i] = strings.Replace(headers[i], "\n", "", -1) - } - bodyBuf := new(bytes.Buffer) - mimeWriter := quotedprintable.NewWriter(bodyBuf) - _, err := mimeWriter.Write([]byte(body)) - if err != nil { - return nil, err - } - err = mimeWriter.Close() - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf( - "%s\r\n\r\n%s\r\n", - strings.Join(headers, "\r\n"), - bodyBuf.String(), - )), nil -} - -func (m *MailerImpl) reconnect() { - for i := 0; ; i++ { - sleepDuration := core.RetryBackoff(i, m.reconnectBase, m.reconnectMax, 2) - m.log.Infof("sleeping for %s before reconnecting mailer", sleepDuration) - m.clk.Sleep(sleepDuration) - m.log.Info("attempting to reconnect mailer") - err := m.Connect() - if err != nil { - m.log.Warningf("reconnect error: %s", err) - continue - } - break - } - m.log.Info("reconnected successfully") -} - -// Connect opens a connection to the specified mail server. It must be called -// before SendMail. -func (m *MailerImpl) Connect() error { - client, err := m.dialer.Dial() - if err != nil { - return err - } - m.client = client - return nil -} - -type dialerImpl struct { - username, password, server, port string - rootCAs *x509.CertPool -} - -func (di *dialerImpl) Dial() (smtpClient, error) { - hostport := net.JoinHostPort(di.server, di.port) - var conn net.Conn - var err error - conn, err = tls.Dial("tcp", hostport, &tls.Config{ - RootCAs: di.rootCAs, - }) - if err != nil { - return nil, err - } - client, err := smtp.NewClient(conn, di.server) - if err != nil { - return nil, err - } - auth := smtp.PlainAuth("", di.username, di.password, di.server) - if err = client.Auth(auth); err != nil { - return nil, err - } - return client, nil -} - -// resetAndError resets the current mail transaction and then returns its -// argument as an error. If the reset command also errors, it combines both -// errors and returns them. Without this we would get `nested MAIL command`. -// https://github.com/letsencrypt/boulder/issues/3191 -func (m *MailerImpl) resetAndError(err error) error { - if err == io.EOF { - return err - } - if err2 := m.client.Reset(); err2 != nil { - return fmt.Errorf("%s (also, on sending RSET: %s)", err, err2) - } - return err -} - -func (m *MailerImpl) sendOne(to []string, subject, msg string) error { - if m.client == nil { - return errors.New("call Connect before SendMail") - } - body, err := m.generateMessage(to, subject, msg) - if err != nil { - return err - } - if err = m.client.Mail(m.from.String()); err != nil { - return err - } - for _, t := range to { - if err = m.client.Rcpt(t); err != nil { - return m.resetAndError(err) - } - } - w, err := m.client.Data() - if err != nil { - return m.resetAndError(err) - } - _, err = w.Write(body) - if err != nil { - return m.resetAndError(err) - } - err = w.Close() - if err != nil { - return m.resetAndError(err) - } - return nil -} - -// BadAddressSMTPError is returned by SendMail when the server rejects a message -// but for a reason that doesn't prevent us from continuing to send mail. The -// error message contains the error code and the error message returned from the -// server. -type BadAddressSMTPError struct { - Message string -} - -func (e BadAddressSMTPError) Error() string { - return e.Message -} - -// Based on reading of various SMTP documents these are a handful -// of errors we are likely to be able to continue sending mail after -// receiving. The majority of these errors boil down to 'bad address'. -var badAddressErrorCodes = map[int]bool{ - 401: true, // Invalid recipient - 422: true, // Recipient mailbox is full - 441: true, // Recipient server is not responding - 450: true, // User's mailbox is not available - 510: true, // Invalid recipient - 511: true, // Invalid recipient - 513: true, // Address type invalid - 541: true, // Recipient rejected message - 550: true, // Non-existent address - 553: true, // Non-existent address -} - -// SendMail sends an email to the provided list of recipients. The email body -// is simple text. -func (m *MailerImpl) SendMail(to []string, subject, msg string) error { - var protoErr *textproto.Error - for { - err := m.sendOne(to, subject, msg) - if err == nil { - // If the error is nil, we sent the mail without issue. nice! - break - } else if err == io.EOF { - m.sendMailAttempts.WithLabelValues("failure", "EOF").Inc() - // If the error is an EOF, we should try to reconnect on a backoff - // schedule, sleeping between attempts. - m.reconnect() - // After reconnecting, loop around and try `sendOne` again. - continue - } else if errors.Is(err, syscall.ECONNRESET) { - m.sendMailAttempts.WithLabelValues("failure", "TCP RST").Inc() - // If the error is `syscall.ECONNRESET`, we should try to reconnect on a backoff - // schedule, sleeping between attempts. - m.reconnect() - // After reconnecting, loop around and try `sendOne` again. - continue - } else if errors.Is(err, syscall.EPIPE) { - // EPIPE also seems to be a common way to signal TCP RST. - m.sendMailAttempts.WithLabelValues("failure", "EPIPE").Inc() - m.reconnect() - continue - } else if errors.As(err, &protoErr) && protoErr.Code == 421 { - m.sendMailAttempts.WithLabelValues("failure", "SMTP 421").Inc() - /* - * If the error is an instance of `textproto.Error` with a SMTP error code, - * and that error code is 421 then treat this as a reconnect-able event. - * - * The SMTP RFC defines this error code as: - * 421 Service not available, closing transmission channel - * (This may be a reply to any command if the service knows it - * must shut down) - * - * In practice we see this code being used by our production SMTP server - * when the connection has gone idle for too long. For more information - * see issue #2249[0]. - * - * [0] - https://github.com/letsencrypt/boulder/issues/2249 - */ - m.reconnect() - // After reconnecting, loop around and try `sendOne` again. - continue - } else if errors.As(err, &protoErr) && badAddressErrorCodes[protoErr.Code] { - m.sendMailAttempts.WithLabelValues("failure", fmt.Sprintf("SMTP %d", protoErr.Code)).Inc() - return BadAddressSMTPError{fmt.Sprintf("%d: %s", protoErr.Code, protoErr.Msg)} - } else { - // If it wasn't an EOF error or a recoverable SMTP error it is unexpected and we - // return from SendMail() with the error - m.sendMailAttempts.WithLabelValues("failure", "unexpected").Inc() - return err - } - } - - m.sendMailAttempts.WithLabelValues("success", "").Inc() - return nil -} - -// Close closes the connection. -func (m *MailerImpl) Close() error { - if m.client == nil { - return errors.New("call Connect before Close") - } - return m.client.Close() -} diff --git a/mail/mailer_test.go b/mail/mailer_test.go deleted file mode 100644 index a192e40dbf9..00000000000 --- a/mail/mailer_test.go +++ /dev/null @@ -1,522 +0,0 @@ -package mail - -import ( - "bufio" - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "math/big" - "net" - "net/mail" - "net/textproto" - "strings" - "testing" - "time" - - "github.com/jmhodges/clock" - - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/test" -) - -type fakeSource struct{} - -func (f fakeSource) generate() *big.Int { - return big.NewInt(1991) -} - -func TestGenerateMessage(t *testing.T) { - fc := clock.NewFake() - fromAddress, _ := mail.ParseAddress("happy sender ") - log := blog.UseMock() - m := New("", "", "", "", nil, *fromAddress, log, metrics.NoopRegisterer, 0, 0) - m.clk = fc - m.csprgSource = fakeSource{} - messageBytes, err := m.generateMessage([]string{"recv@email.com"}, "test subject", "this is the body\n") - test.AssertNotError(t, err, "Failed to generate email body") - message := string(messageBytes) - fields := strings.Split(message, "\r\n") - test.AssertEquals(t, len(fields), 12) - fmt.Println(message) - test.AssertEquals(t, fields[0], "To: \"recv@email.com\"") - test.AssertEquals(t, fields[1], "From: \"happy sender\" ") - test.AssertEquals(t, fields[2], "Subject: test subject") - test.AssertEquals(t, fields[3], "Date: 01 Jan 70 00:00 UTC") - test.AssertEquals(t, fields[4], "Message-Id: <19700101T000000.1991.send@email.com>") - test.AssertEquals(t, fields[5], "MIME-Version: 1.0") - test.AssertEquals(t, fields[6], "Content-Type: text/plain; charset=UTF-8") - test.AssertEquals(t, fields[7], "Content-Transfer-Encoding: quoted-printable") - test.AssertEquals(t, fields[8], "") - test.AssertEquals(t, fields[9], "this is the body") -} - -func TestFailNonASCIIAddress(t *testing.T) { - log := blog.UseMock() - fromAddress, _ := mail.ParseAddress("send@email.com") - m := New("", "", "", "", nil, *fromAddress, log, metrics.NoopRegisterer, 0, 0) - _, err := m.generateMessage([]string{"é—æ†¾@email.com"}, "test subject", "this is the body\n") - test.AssertError(t, err, "Allowed a non-ASCII to address incorrectly") -} - -func expect(t *testing.T, buf *bufio.Reader, expected string) error { - line, _, err := buf.ReadLine() - if err != nil { - t.Errorf("readline: %s expected: %s\n", err, expected) - return err - } - if string(line) != expected { - t.Errorf("Expected %s, got %s", expected, line) - return fmt.Errorf("Expected %s, got %s", expected, line) - } - return nil -} - -type connHandler func(int, *testing.T, net.Conn, *net.TCPConn) - -func listenForever(l *net.TCPListener, t *testing.T, handler connHandler) { - keyPair, err := tls.LoadX509KeyPair("../test/mail-test-srv/localhost/cert.pem", "../test/mail-test-srv/localhost/key.pem") - if err != nil { - t.Errorf("loading keypair: %s", err) - - } - tlsConf := &tls.Config{ - Certificates: []tls.Certificate{keyPair}, - } - connID := 0 - for { - tcpConn, err := l.AcceptTCP() - if err != nil { - return - } - - tlsConn := tls.Server(tcpConn, tlsConf) - connID++ - go handler(connID, t, tlsConn, tcpConn) - } -} - -func authenticateClient(t *testing.T, conn net.Conn) { - buf := bufio.NewReader(conn) - // we can ignore write errors because any - // failures will be caught on the connecting - // side - _, _ = conn.Write([]byte("220 smtp.example.com ESMTP\n")) - err := expect(t, buf, "EHLO localhost") - if err != nil { - return - } - - _, _ = conn.Write([]byte("250-PIPELINING\n")) - _, _ = conn.Write([]byte("250-AUTH PLAIN LOGIN\n")) - _, _ = conn.Write([]byte("250 8BITMIME\n")) - // Base64 encoding of "\0user@example.com\0passwd" - err = expect(t, buf, "AUTH PLAIN AHVzZXJAZXhhbXBsZS5jb20AcGFzc3dk") - if err != nil { - return - } - _, _ = conn.Write([]byte("235 2.7.0 Authentication successful\n")) -} - -// The normal handler authenticates the client and then disconnects without -// further command processing. It is sufficient for TestConnect() -func normalHandler(connID int, t *testing.T, tlsConn net.Conn, tcpConn *net.TCPConn) { - defer func() { - err := tlsConn.Close() - if err != nil { - t.Errorf("conn.Close: %s", err) - } - }() - authenticateClient(t, tlsConn) -} - -// The disconnectHandler authenticates the client like the normalHandler but -// additionally processes an email flow (e.g. MAIL, RCPT and DATA commands). -// When the `connID` is <= `closeFirst` the connection is closed immediately -// after the MAIL command is received and prior to issuing a 250 response. If -// a `goodbyeMsg` is provided, it is written to the client immediately before -// closing. In this way the first `closeFirst` connections will not complete -// normally and can be tested for reconnection logic. -func disconnectHandler(closeFirst int, goodbyeMsg string) connHandler { - return func(connID int, t *testing.T, conn net.Conn, _ *net.TCPConn) { - defer func() { - err := conn.Close() - if err != nil { - t.Errorf("conn.Close: %s", err) - } - }() - authenticateClient(t, conn) - - buf := bufio.NewReader(conn) - err := expect(t, buf, "MAIL FROM:<> BODY=8BITMIME") - if err != nil { - return - } - - if connID <= closeFirst { - // If there was a `goodbyeMsg` specified, write it to the client before - // closing the connection. This is a good way to deliver a SMTP error - // before closing - if goodbyeMsg != "" { - _, _ = fmt.Fprintf(conn, "%s\r\n", goodbyeMsg) - t.Logf("Wrote goodbye msg: %s", goodbyeMsg) - } - t.Log("Cutting off client early") - return - } - _, _ = conn.Write([]byte("250 Sure. Go on. \r\n")) - - err = expect(t, buf, "RCPT TO:") - if err != nil { - return - } - _, _ = conn.Write([]byte("250 Tell Me More \r\n")) - - err = expect(t, buf, "DATA") - if err != nil { - return - } - _, _ = conn.Write([]byte("354 Cool Data\r\n")) - _, _ = conn.Write([]byte("250 Peace Out\r\n")) - } -} - -func badEmailHandler(messagesToProcess int) connHandler { - return func(_ int, t *testing.T, conn net.Conn, _ *net.TCPConn) { - defer func() { - err := conn.Close() - if err != nil { - t.Errorf("conn.Close: %s", err) - } - }() - authenticateClient(t, conn) - - buf := bufio.NewReader(conn) - err := expect(t, buf, "MAIL FROM:<> BODY=8BITMIME") - if err != nil { - return - } - - _, _ = conn.Write([]byte("250 Sure. Go on. \r\n")) - - err = expect(t, buf, "RCPT TO:") - if err != nil { - return - } - _, _ = conn.Write([]byte("401 4.1.3 Bad recipient address syntax\r\n")) - err = expect(t, buf, "RSET") - if err != nil { - return - } - _, _ = conn.Write([]byte("250 Ok yr rset now\r\n")) - } -} - -// The rstHandler authenticates the client like the normalHandler but -// additionally processes an email flow (e.g. MAIL, RCPT and DATA -// commands). When the `connID` is <= `rstFirst` the socket of the -// listening connection is set to abruptively close (sends TCP RST but -// no FIN). The listening connection is closed immediately after the -// MAIL command is received and prior to issuing a 250 response. In this -// way the first `rstFirst` connections will not complete normally and -// can be tested for reconnection logic. -func rstHandler(rstFirst int) connHandler { - return func(connID int, t *testing.T, tlsConn net.Conn, tcpConn *net.TCPConn) { - defer func() { - err := tcpConn.Close() - if err != nil { - t.Errorf("conn.Close: %s", err) - } - }() - authenticateClient(t, tlsConn) - - buf := bufio.NewReader(tlsConn) - err := expect(t, buf, "MAIL FROM:<> BODY=8BITMIME") - if err != nil { - return - } - // Set the socket of the listening connection to abruptively - // close. - if connID <= rstFirst { - err := tcpConn.SetLinger(0) - if err != nil { - t.Error(err) - return - } - t.Log("Socket set for abruptive close. Cutting off client early") - return - } - _, _ = tlsConn.Write([]byte("250 Sure. Go on. \r\n")) - - err = expect(t, buf, "RCPT TO:") - if err != nil { - return - } - _, _ = tlsConn.Write([]byte("250 Tell Me More \r\n")) - - err = expect(t, buf, "DATA") - if err != nil { - return - } - _, _ = tlsConn.Write([]byte("354 Cool Data\r\n")) - _, _ = tlsConn.Write([]byte("250 Peace Out\r\n")) - } -} - -func setup(t *testing.T) (*MailerImpl, *net.TCPListener, func()) { - fromAddress, _ := mail.ParseAddress("you-are-a-winner@example.com") - log := blog.UseMock() - - // Listen on port 0 to get any free available port - tcpAddr, err := net.ResolveTCPAddr("tcp", ":0") - if err != nil { - t.Fatalf("resolving tcp addr: %s", err) - } - tcpl, err := net.ListenTCP("tcp", tcpAddr) - if err != nil { - t.Fatalf("listen: %s", err) - } - - cleanUp := func() { - err := tcpl.Close() - if err != nil { - t.Errorf("listen.Close: %s", err) - } - } - - pem, err := ioutil.ReadFile("../test/mail-test-srv/minica.pem") - if err != nil { - t.Fatalf("loading smtp root: %s", err) - } - smtpRoots := x509.NewCertPool() - ok := smtpRoots.AppendCertsFromPEM(pem) - if !ok { - t.Fatal("failed parsing SMTP root") - } - - // We can look at the listener Addr() to figure out which free port was - // assigned by the operating system - - _, port, err := net.SplitHostPort(tcpl.Addr().String()) - if err != nil { - t.Fatal("failed parsing port from tcp listen") - } - - m := New( - "localhost", - port, - "user@example.com", - "passwd", - smtpRoots, - *fromAddress, - log, - metrics.NoopRegisterer, - time.Second*2, time.Second*10) - - return m, tcpl, cleanUp -} - -func TestConnect(t *testing.T) { - m, l, cleanUp := setup(t) - defer cleanUp() - - go listenForever(l, t, normalHandler) - err := m.Connect() - if err != nil { - t.Errorf("Failed to connect: %s", err) - } - err = m.Close() - if err != nil { - t.Errorf("Failed to clean up: %s", err) - } -} - -func TestReconnectSuccess(t *testing.T) { - m, l, cleanUp := setup(t) - defer cleanUp() - const closedConns = 5 - - // Configure a test server that will disconnect the first `closedConns` - // connections after the MAIL cmd - go listenForever(l, t, disconnectHandler(closedConns, "")) - - // With a mailer client that has a max attempt > `closedConns` we expect no - // error. The message should be delivered after `closedConns` reconnect - // attempts. - err := m.Connect() - if err != nil { - t.Errorf("Failed to connect: %s", err) - } - err = m.SendMail([]string{"hi@bye.com"}, "You are already a winner!", "Just kidding") - if err != nil { - t.Errorf("Expected SendMail() to not fail. Got err: %s", err) - } -} - -func TestBadEmailError(t *testing.T) { - m, l, cleanUp := setup(t) - defer cleanUp() - const messages = 3 - - go listenForever(l, t, badEmailHandler(messages)) - - err := m.Connect() - if err != nil { - t.Errorf("Failed to connect: %s", err) - } - - err = m.SendMail([]string{"hi@bye.com"}, "You are already a winner!", "Just kidding") - // We expect there to be an error - if err == nil { - t.Errorf("Expected SendMail() to return an BadAddressSMTPError, got nil") - } - expected := "401: 4.1.3 Bad recipient address syntax" - var badAddrErr BadAddressSMTPError - test.AssertErrorWraps(t, err, &badAddrErr) - test.AssertEquals(t, badAddrErr.Message, expected) -} - -func TestReconnectSMTP421(t *testing.T) { - m, l, cleanUp := setup(t) - defer cleanUp() - const closedConns = 5 - - // A SMTP 421 can be generated when the server times out an idle connection. - // For more information see https://github.com/letsencrypt/boulder/issues/2249 - smtp421 := "421 1.2.3 green.eggs.and.spam Error: timeout exceeded" - - // Configure a test server that will disconnect the first `closedConns` - // connections after the MAIL cmd with a SMTP 421 error - go listenForever(l, t, disconnectHandler(closedConns, smtp421)) - - // With a mailer client that has a max attempt > `closedConns` we expect no - // error. The message should be delivered after `closedConns` reconnect - // attempts. - err := m.Connect() - if err != nil { - t.Errorf("Failed to connect: %s", err) - } - err = m.SendMail([]string{"hi@bye.com"}, "You are already a winner!", "Just kidding") - if err != nil { - t.Errorf("Expected SendMail() to not fail. Got err: %s", err) - } -} - -func TestOtherError(t *testing.T) { - m, l, cleanUp := setup(t) - defer cleanUp() - - go listenForever(l, t, func(_ int, t *testing.T, conn net.Conn, _ *net.TCPConn) { - defer func() { - err := conn.Close() - if err != nil { - t.Errorf("conn.Close: %s", err) - } - }() - authenticateClient(t, conn) - - buf := bufio.NewReader(conn) - err := expect(t, buf, "MAIL FROM:<> BODY=8BITMIME") - if err != nil { - return - } - - _, _ = conn.Write([]byte("250 Sure. Go on. \r\n")) - - err = expect(t, buf, "RCPT TO:") - if err != nil { - return - } - - _, _ = conn.Write([]byte("999 1.1.1 This would probably be bad?\r\n")) - - err = expect(t, buf, "RSET") - if err != nil { - return - } - - _, _ = conn.Write([]byte("250 Ok yr rset now\r\n")) - }) - - err := m.Connect() - if err != nil { - t.Errorf("Failed to connect: %s", err) - } - - err = m.SendMail([]string{"hi@bye.com"}, "You are already a winner!", "Just kidding") - // We expect there to be an error - if err == nil { - t.Errorf("Expected SendMail() to return an error, got nil") - } - expected := "999 1.1.1 This would probably be bad?" - var rcptErr *textproto.Error - test.AssertErrorWraps(t, err, &rcptErr) - test.AssertEquals(t, rcptErr.Error(), expected) - - m, l, cleanUp = setup(t) - defer cleanUp() - - go listenForever(l, t, func(_ int, t *testing.T, conn net.Conn, _ *net.TCPConn) { - defer func() { - err := conn.Close() - if err != nil { - t.Errorf("conn.Close: %s", err) - } - }() - authenticateClient(t, conn) - - buf := bufio.NewReader(conn) - err := expect(t, buf, "MAIL FROM:<> BODY=8BITMIME") - if err != nil { - return - } - - _, _ = conn.Write([]byte("250 Sure. Go on. \r\n")) - - err = expect(t, buf, "RCPT TO:") - if err != nil { - return - } - - _, _ = conn.Write([]byte("999 1.1.1 This would probably be bad?\r\n")) - - err = expect(t, buf, "RSET") - if err != nil { - return - } - - _, _ = conn.Write([]byte("nop\r\n")) - }) - err = m.Connect() - if err != nil { - t.Errorf("Failed to connect: %s", err) - } - - err = m.SendMail([]string{"hi@bye.com"}, "You are already a winner!", "Just kidding") - // We expect there to be an error - test.AssertError(t, err, "SendMail didn't fail as expected") - test.AssertEquals(t, err.Error(), "999 1.1.1 This would probably be bad? (also, on sending RSET: short response: nop)") -} - -func TestReconnectAfterRST(t *testing.T) { - m, l, cleanUp := setup(t) - defer cleanUp() - const rstConns = 5 - - // Configure a test server that will RST and disconnect the first - // `closedConns` connections - go listenForever(l, t, rstHandler(rstConns)) - - // With a mailer client that has a max attempt > `closedConns` we expect no - // error. The message should be delivered after `closedConns` reconnect - // attempts. - err := m.Connect() - if err != nil { - t.Errorf("Failed to connect: %s", err) - } - err = m.SendMail([]string{"hi@bye.com"}, "You are already a winner!", "Just kidding") - if err != nil { - t.Errorf("Expected SendMail() to not fail. Got err: %s", err) - } -} diff --git a/metrics/measured_http/http.go b/metrics/measured_http/http.go index b50234bb7a9..64b17e143e8 100644 --- a/metrics/measured_http/http.go +++ b/metrics/measured_http/http.go @@ -6,6 +6,8 @@ import ( "github.com/jmhodges/clock" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" ) // responseWriterWithStatus satisfies http.ResponseWriter, but keeps track of the @@ -30,10 +32,9 @@ func (r *responseWriterWithStatus) Write(body []byte) (int, error) { return r.ResponseWriter.Write(body) } -// serveMux is a partial interface wrapper for the method http.ServeMux -// exposes that we use. This is needed so that we can replace the default -// http.ServeMux in ocsp-responder where we don't want to use its path -// canonicalization. +// serveMux is a partial interface wrapper for the one method http.ServeMux +// exposes that we use. This prevents us from accidentally developing an +// overly-specific reliance on that concrete type. type serveMux interface { Handler(*http.Request) (http.Handler, string) } @@ -44,27 +45,38 @@ type MeasuredHandler struct { clk clock.Clock // Normally this is always responseTime, but we override it for testing. stat *prometheus.HistogramVec + // inFlightRequestsGauge is a gauge that tracks the number of requests + // currently in flight, labeled by endpoint. + inFlightRequestsGauge *prometheus.GaugeVec } -func New(m serveMux, clk clock.Clock, stats prometheus.Registerer) *MeasuredHandler { - responseTime := prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "response_time", - Help: "Time taken to respond to a request", - }, - []string{"endpoint", "method", "code"}) - stats.MustRegister(responseTime) - return &MeasuredHandler{ - serveMux: m, - clk: clk, - stat: responseTime, - } +func New(m serveMux, clk clock.Clock, stats prometheus.Registerer, opts ...otelhttp.Option) http.Handler { + responseTime := promauto.With(stats).NewHistogramVec(prometheus.HistogramOpts{ + Name: "response_time", + Help: "Time taken to respond to a request", + }, []string{"endpoint", "method", "code"}) + + inFlightRequestsGauge := promauto.With(stats).NewGaugeVec(prometheus.GaugeOpts{ + Name: "in_flight_requests", + Help: "Tracks the number of WFE requests currently in flight, labeled by endpoint.", + }, []string{"endpoint"}) + + return otelhttp.NewHandler(&MeasuredHandler{ + serveMux: m, + clk: clk, + stat: responseTime, + inFlightRequestsGauge: inFlightRequestsGauge, + }, "server", opts...) } func (h *MeasuredHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { begin := h.clk.Now() rwws := &responseWriterWithStatus{w, 0} + subHandler, pattern := h.Handler(r) + h.inFlightRequestsGauge.WithLabelValues(pattern).Inc() + defer h.inFlightRequestsGauge.WithLabelValues(pattern).Dec() + // Use the method string only if it's a recognized HTTP method. This avoids // ballooning timeseries with invalid methods from public input. var method string @@ -77,7 +89,6 @@ func (h *MeasuredHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { method = "unknown" } - subHandler, pattern := h.Handler(r) defer func() { h.stat.With(prometheus.Labels{ "endpoint": pattern, diff --git a/metrics/measured_http/http_test.go b/metrics/measured_http/http_test.go index ee435c353d3..6f836250c33 100644 --- a/metrics/measured_http/http_test.go +++ b/metrics/measured_http/http_test.go @@ -42,12 +42,21 @@ func TestMeasuring(t *testing.T) { }, []string{"endpoint", "method", "code"}) + inFlightRequestsGauge := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "in_flight_requests", + Help: "Tracks the number of WFE requests currently in flight, labeled by endpoint.", + }, + []string{"endpoint"}, + ) + mux := http.NewServeMux() mux.Handle("/foo", sleepyHandler{clk}) mh := MeasuredHandler{ - serveMux: mux, - clk: clk, - stat: stat, + serveMux: mux, + clk: clk, + stat: stat, + inFlightRequestsGauge: inFlightRequestsGauge, } mh.ServeHTTP(httptest.NewRecorder(), &http.Request{ URL: &url.URL{Path: "/foo"}, @@ -95,13 +104,21 @@ func TestUnknownMethod(t *testing.T) { Help: "fake", }, []string{"endpoint", "method", "code"}) + inFlightRequestsGauge := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "in_flight_requests", + Help: "Tracks the number of WFE requests currently in flight, labeled by endpoint.", + }, + []string{"endpoint"}, + ) mux := http.NewServeMux() mux.Handle("/foo", sleepyHandler{clk}) mh := MeasuredHandler{ - serveMux: mux, - clk: clk, - stat: stat, + serveMux: mux, + clk: clk, + stat: stat, + inFlightRequestsGauge: inFlightRequestsGauge, } mh.ServeHTTP(httptest.NewRecorder(), &http.Request{ URL: &url.URL{Path: "/foo"}, @@ -140,14 +157,22 @@ func TestWrite(t *testing.T) { }, []string{"endpoint", "method", "code"}) + inFlightRequestsGauge := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "in_flight_requests", + Help: "Tracks the number of WFE requests currently in flight, labeled by endpoint.", + }, + []string{"endpoint"}) + mux := http.NewServeMux() mux.HandleFunc("/foo", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte{}) }) mh := MeasuredHandler{ - serveMux: mux, - clk: clk, - stat: stat, + serveMux: mux, + clk: clk, + stat: stat, + inFlightRequestsGauge: inFlightRequestsGauge, } mh.ServeHTTP(httptest.NewRecorder(), &http.Request{ URL: &url.URL{Path: "/foo"}, @@ -162,6 +187,7 @@ func TestWrite(t *testing.T) { }, []string{"endpoint", "method", "code"}) mh.stat = stat + mh.inFlightRequestsGauge = inFlightRequestsGauge expectedLabels := map[string]string{ "endpoint": "/foo", "method": "GET", diff --git a/mocks/ca.go b/mocks/ca.go index ee6e0f56b75..4945c76b2a8 100644 --- a/mocks/ca.go +++ b/mocks/ca.go @@ -6,9 +6,9 @@ import ( "encoding/pem" "fmt" - capb "github.com/letsencrypt/boulder/ca/proto" - corepb "github.com/letsencrypt/boulder/core/proto" "google.golang.org/grpc" + + capb "github.com/letsencrypt/boulder/ca/proto" ) // MockCA is a mock of a CA that always returns the cert from PEM in response to @@ -17,34 +17,22 @@ type MockCA struct { PEM []byte } -// IssuePrecertificate is a mock -func (ca *MockCA) IssuePrecertificate(ctx context.Context, _ *capb.IssueCertificateRequest, _ ...grpc.CallOption) (*capb.IssuePrecertificateResponse, error) { +// IssueCertificate is a mock +func (ca *MockCA) IssueCertificate(ctx context.Context, req *capb.IssueCertificateRequest, _ ...grpc.CallOption) (*capb.IssueCertificateResponse, error) { if ca.PEM == nil { return nil, fmt.Errorf("MockCA's PEM field must be set before calling IssueCertificate") } block, _ := pem.Decode(ca.PEM) - cert, err := x509.ParseCertificate(block.Bytes) + sampleDER, err := x509.ParseCertificate(block.Bytes) if err != nil { return nil, err } - return &capb.IssuePrecertificateResponse{ - DER: cert.Raw, - }, nil + return &capb.IssueCertificateResponse{DER: sampleDER.Raw}, nil } -// IssueCertificateForPrecertificate is a mock -func (ca *MockCA) IssueCertificateForPrecertificate(ctx context.Context, req *capb.IssueCertificateForPrecertificateRequest, _ ...grpc.CallOption) (*corepb.Certificate, error) { - return &corepb.Certificate{ - Der: req.DER, - RegistrationID: 1, - Serial: "mock", - Digest: "mock", - Issued: 1, - Expires: 1, - }, nil -} +type MockCRLGenerator struct{} -// GenerateOCSP is a mock -func (ca *MockCA) GenerateOCSP(ctx context.Context, req *capb.GenerateOCSPRequest, _ ...grpc.CallOption) (*capb.OCSPResponse, error) { +// GenerateCRL is a mock +func (ca *MockCRLGenerator) GenerateCRL(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[capb.GenerateCRLRequest, capb.GenerateCRLResponse], error) { return nil, nil } diff --git a/mocks/emailexporter.go b/mocks/emailexporter.go new file mode 100644 index 00000000000..a2891588845 --- /dev/null +++ b/mocks/emailexporter.go @@ -0,0 +1,108 @@ +package mocks + +import ( + "context" + "slices" + "sync" + + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/letsencrypt/boulder/salesforce" + salesforcepb "github.com/letsencrypt/boulder/salesforce/proto" +) + +var _ salesforce.SalesforceClient = (*MockSalesforceClientImpl)(nil) + +// MockSalesforceClientImpl is a mock implementation of salesforce.SalesforceClient. +type MockSalesforceClientImpl struct { + sync.Mutex + CreatedContacts []string + CreatedCases []salesforce.Case +} + +// NewMockSalesforceClientImpl returns a MockSalesforceClientImpl, which implements +// the PardotClient interface. It returns the underlying concrete type, so callers +// have access to its struct members and helper methods. +func NewMockSalesforceClientImpl() *MockSalesforceClientImpl { + return &MockSalesforceClientImpl{} +} + +// SendContact adds an email to CreatedContacts. +func (m *MockSalesforceClientImpl) SendContact(email string) error { + m.Lock() + defer m.Unlock() + + m.CreatedContacts = append(m.CreatedContacts, email) + return nil +} + +// GetCreatedContacts is used for testing to retrieve the list of created +// contacts in a thread-safe manner. +func (m *MockSalesforceClientImpl) GetCreatedContacts() []string { + m.Lock() + defer m.Unlock() + + // Return a copy to avoid race conditions. + return slices.Clone(m.CreatedContacts) +} + +// SendCase adds a case payload to CreatedCases. +func (m *MockSalesforceClientImpl) SendCase(payload salesforce.Case) error { + m.Lock() + defer m.Unlock() + + m.CreatedCases = append(m.CreatedCases, payload) + return nil +} + +// GetCreatedCases is used for testing to retrieve the list of created cases in +// a thread-safe manner. +func (m *MockSalesforceClientImpl) GetCreatedCases() []salesforce.Case { + m.Lock() + defer m.Unlock() + + // Return a copy to avoid race conditions. + return slices.Clone(m.CreatedCases) +} + +var _ salesforcepb.ExporterClient = (*MockExporterClientImpl)(nil) + +// MockExporterClientImpl is a mock implementation of ExporterClient. +type MockExporterClientImpl struct { + SalesforceClient salesforce.SalesforceClient +} + +// NewMockExporterImpl returns a MockExporterClientImpl as an ExporterClient. +func NewMockExporterImpl(salesforceClient salesforce.SalesforceClient) salesforcepb.ExporterClient { + return &MockExporterClientImpl{ + SalesforceClient: salesforceClient, + } +} + +// SendContacts submits emails to the inner salesforce.SalesforceClient, returning an +// error if any fail. +func (m *MockExporterClientImpl) SendContacts(ctx context.Context, req *salesforcepb.SendContactsRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + for _, e := range req.Emails { + err := m.SalesforceClient.SendContact(e) + if err != nil { + return nil, err + } + } + return &emptypb.Empty{}, nil +} + +// SendCase submits a Case using the inner salesforce.SalesforceClient. +func (m *MockExporterClientImpl) SendCase(ctx context.Context, req *salesforcepb.SendCaseRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + return &emptypb.Empty{}, m.SalesforceClient.SendCase(salesforce.Case{ + Origin: req.Origin, + Subject: req.Subject, + Description: req.Description, + ContactEmail: req.ContactEmail, + Organization: req.Organization, + AccountId: req.AccountId, + RateLimitName: req.RateLimitName, + RateLimitTier: req.RateLimitTier, + UseCase: req.UseCase, + }) +} diff --git a/mocks/grpc.go b/mocks/grpc.go new file mode 100644 index 00000000000..f1c18f2c7f1 --- /dev/null +++ b/mocks/grpc.go @@ -0,0 +1,31 @@ +package mocks + +import ( + "io" + + "google.golang.org/grpc" +) + +// ServerStreamClient is a mock which satisfies the grpc.ClientStream interface, +// allowing it to be returned by methods where the server returns a stream of +// results. It can be populated with a list of results to return, or an error +// to return. +type ServerStreamClient[T any] struct { + grpc.ClientStream + Results []*T + Err error +} + +// Recv returns the error, if populated. Otherwise it returns the next item from +// the list of results. If it has returned all items already, it returns EOF. +func (c *ServerStreamClient[T]) Recv() (*T, error) { + if c.Err != nil { + return nil, c.Err + } + if len(c.Results) == 0 { + return nil, io.EOF + } + res := c.Results[0] + c.Results = c.Results[1:] + return res, nil +} diff --git a/mocks/mocks.go b/mocks/mocks.go deleted file mode 100644 index 9a855c40c9b..00000000000 --- a/mocks/mocks.go +++ /dev/null @@ -1,638 +0,0 @@ -package mocks - -import ( - "bytes" - "context" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "math/rand" - "net" - "time" - - "github.com/jmhodges/clock" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" - jose "gopkg.in/square/go-jose.v2" - - "github.com/letsencrypt/boulder/core" - corepb "github.com/letsencrypt/boulder/core/proto" - berrors "github.com/letsencrypt/boulder/errors" - bgrpc "github.com/letsencrypt/boulder/grpc" - "github.com/letsencrypt/boulder/identifier" - "github.com/letsencrypt/boulder/probs" - pubpb "github.com/letsencrypt/boulder/publisher/proto" - sapb "github.com/letsencrypt/boulder/sa/proto" -) - -// StorageAuthority is a mock -type StorageAuthority struct { - clk clock.Clock -} - -// NewStorageAuthority creates a new mock storage authority -// with the given clock. -func NewStorageAuthority(clk clock.Clock) *StorageAuthority { - return &StorageAuthority{clk: clk} -} - -const ( - test1KeyPublicJSON = `{"kty":"RSA","n":"yNWVhtYEKJR21y9xsHV-PD_bYwbXSeNuFal46xYxVfRL5mqha7vttvjB_vc7Xg2RvgCxHPCqoxgMPTzHrZT75LjCwIW2K_klBYN8oYvTwwmeSkAz6ut7ZxPv-nZaT5TJhGk0NT2kh_zSpdriEJ_3vW-mqxYbbBmpvHqsa1_zx9fSuHYctAZJWzxzUZXykbWMWQZpEiE0J4ajj51fInEzVn7VxV-mzfMyboQjujPh7aNJxAWSq4oQEJJDgWwSh9leyoJoPpONHxh5nEE5AjE01FkGICSxjpZsF-w8hOTI3XXohUdu29Se26k2B0PolDSuj0GIQU6-W9TdLXSjBb2SpQ","e":"AQAB"}` - test2KeyPublicJSON = `{"kty":"RSA","n":"qnARLrT7Xz4gRcKyLdydmCr-ey9OuPImX4X40thk3on26FkMznR3fRjs66eLK7mmPcBZ6uOJseURU6wAaZNmemoYx1dMvqvWWIyiQleHSD7Q8vBrhR6uIoO4jAzJZR-ChzZuSDt7iHN-3xUVspu5XGwXU_MVJZshTwp4TaFx5elHIT_ObnTvTOU3Xhish07AbgZKmWsVbXh5s-CrIicU4OexJPgunWZ_YJJueOKmTvnLlTV4MzKR2oZlBKZ27S0-SfdV_QDx_ydle5oMAyKVtlAV35cyPMIsYNwgUGBCdY_2Uzi5eX0lTc7MPRwz6qR1kip-i59VcGcUQgqHV6Fyqw","e":"AQAB"}` - testE1KeyPublicJSON = `{"kty":"EC","crv":"P-256","x":"FwvSZpu06i3frSk_mz9HcD9nETn4wf3mQ-zDtG21Gao","y":"S8rR-0dWa8nAcw1fbunF_ajS3PQZ-QwLps-2adgLgPk"}` - testE2KeyPublicJSON = `{"kty":"EC","crv":"P-256","x":"S8FOmrZ3ywj4yyFqt0etAD90U-EnkNaOBSLfQmf7pNg","y":"vMvpDyqFDRHjGfZ1siDOm5LS6xNdR5xTpyoQGLDOX2Q"}` - test3KeyPublicJSON = `{"kty":"RSA","n":"uTQER6vUA1RDixS8xsfCRiKUNGRzzyIK0MhbS2biClShbb0hSx2mPP7gBvis2lizZ9r-y9hL57kNQoYCKndOBg0FYsHzrQ3O9AcoV1z2Mq-XhHZbFrVYaXI0M3oY9BJCWog0dyi3XC0x8AxC1npd1U61cToHx-3uSvgZOuQA5ffEn5L38Dz1Ti7OV3E4XahnRJvejadUmTkki7phLBUXm5MnnyFm0CPpf6ApV7zhLjN5W-nV0WL17o7v8aDgV_t9nIdi1Y26c3PlCEtiVHZcebDH5F1Deta3oLLg9-g6rWnTqPbY3knffhp4m0scLD6e33k8MtzxDX_D7vHsg0_X1w","e":"AQAB"}` - test4KeyPublicJSON = `{"kty":"RSA","n":"qih-cx32M0wq8MhhN-kBi2xPE-wnw4_iIg1hWO5wtBfpt2PtWikgPuBT6jvK9oyQwAWbSfwqlVZatMPY_-3IyytMNb9R9OatNr6o5HROBoyZnDVSiC4iMRd7bRl_PWSIqj_MjhPNa9cYwBdW5iC3jM5TaOgmp0-YFm4tkLGirDcIBDkQYlnv9NKILvuwqkapZ7XBixeqdCcikUcTRXW5unqygO6bnapzw-YtPsPPlj4Ih3SvK4doyziPV96U8u5lbNYYEzYiW1mbu9n0KLvmKDikGcdOpf6-yRa_10kMZyYQatY1eclIKI0xb54kbluEl0GQDaL5FxLmiKeVnsapzw","e":"AQAB"}` - - agreementURL = "http://example.invalid/terms" -) - -// GetRegistration is a mock -func (sa *StorageAuthority) GetRegistration(_ context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*corepb.Registration, error) { - if req.Id == 100 { - // Tag meaning "Missing" - return nil, errors.New("missing") - } - if req.Id == 101 { - // Tag meaning "Malformed" - return &corepb.Registration{}, nil - } - if req.Id == 102 { - // Tag meaning "Not Found" - return nil, berrors.NotFoundError("Dave's not here man") - } - - goodReg := &corepb.Registration{ - Id: req.Id, - Key: []byte(test1KeyPublicJSON), - Agreement: agreementURL, - Contact: []string{"mailto:person@mail.com"}, - ContactsPresent: true, - Status: string(core.StatusValid), - } - - // Return a populated registration with contacts for ID == 1 or ID == 5 - if req.Id == 1 || req.Id == 5 { - return goodReg, nil - } - - // Return a populated registration with a different key for ID == 2 - if req.Id == 2 { - goodReg.Key = []byte(test2KeyPublicJSON) - return goodReg, nil - } - - // Return a deactivated registration with a different key for ID == 3 - if req.Id == 3 { - goodReg.Key = []byte(test3KeyPublicJSON) - goodReg.Status = string(core.StatusDeactivated) - return goodReg, nil - } - - // Return a populated registration with a different key for ID == 4 - if req.Id == 4 { - goodReg.Key = []byte(test4KeyPublicJSON) - return goodReg, nil - } - - // Return a registration without the agreement set for ID == 6 - if req.Id == 6 { - goodReg.Agreement = "" - return goodReg, nil - } - - goodReg.InitialIP, _ = net.ParseIP("5.6.7.8").MarshalText() - createdAt := time.Date(2003, 9, 27, 0, 0, 0, 0, time.UTC) - goodReg.CreatedAt = createdAt.UnixNano() - return goodReg, nil -} - -// GetRegistrationByKey is a mock -func (sa *StorageAuthority) GetRegistrationByKey(_ context.Context, req *sapb.JSONWebKey, _ ...grpc.CallOption) (*corepb.Registration, error) { - test5KeyBytes, err := ioutil.ReadFile("../test/test-key-5.der") - if err != nil { - return nil, err - } - test5KeyPriv, err := x509.ParsePKCS1PrivateKey(test5KeyBytes) - if err != nil { - return nil, err - } - test5KeyPublic := jose.JSONWebKey{Key: test5KeyPriv.Public()} - test5KeyPublicJSON, err := test5KeyPublic.MarshalJSON() - if err != nil { - return nil, err - } - - contacts := []string{"mailto:person@mail.com"} - - if bytes.Equal(req.Jwk, []byte(test1KeyPublicJSON)) { - return &corepb.Registration{ - Id: 1, - Key: req.Jwk, - Agreement: agreementURL, - Contact: contacts, - ContactsPresent: true, - Status: string(core.StatusValid), - }, nil - } - - if bytes.Equal(req.Jwk, []byte(test2KeyPublicJSON)) { - // No key found - return &corepb.Registration{Id: 2}, berrors.NotFoundError("reg not found") - } - - if bytes.Equal(req.Jwk, []byte(test4KeyPublicJSON)) { - // No key found - return &corepb.Registration{Id: 5}, berrors.NotFoundError("reg not found") - } - - if bytes.Equal(req.Jwk, []byte(test5KeyPublicJSON)) { - // No key found - return &corepb.Registration{Id: 5}, berrors.NotFoundError("reg not found") - } - - if bytes.Equal(req.Jwk, []byte(testE1KeyPublicJSON)) { - return &corepb.Registration{Id: 3, Key: req.Jwk, Agreement: agreementURL}, nil - } - - if bytes.Equal(req.Jwk, []byte(testE2KeyPublicJSON)) { - return &corepb.Registration{Id: 4}, berrors.NotFoundError("reg not found") - } - - if bytes.Equal(req.Jwk, []byte(test3KeyPublicJSON)) { - // deactivated registration - return &corepb.Registration{ - Id: 2, - Key: req.Jwk, - Agreement: agreementURL, - Contact: contacts, - ContactsPresent: true, - Status: string(core.StatusDeactivated), - }, nil - } - - // Return a fake registration. Make sure to fill the key field to avoid marshaling errors. - return &corepb.Registration{ - Id: 1, - Key: []byte(test1KeyPublicJSON), - Agreement: agreementURL, - Status: string(core.StatusValid), - }, nil -} - -// GetSerialMetadata is a mock -func (sa *StorageAuthority) GetSerialMetadata(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.SerialMetadata, error) { - return &sapb.SerialMetadata{ - Serial: req.Serial, - RegistrationID: 1, - Created: sa.clk.Now().Add(-1 * time.Hour).UnixNano(), - Expires: sa.clk.Now().Add(2159 * time.Hour).UnixNano(), - }, nil -} - -// GetCertificate is a mock -func (sa *StorageAuthority) GetCertificate(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { - // Serial ee == 238.crt - if req.Serial == "0000000000000000000000000000000000ee" { - certPemBytes, _ := ioutil.ReadFile("test/238.crt") - certBlock, _ := pem.Decode(certPemBytes) - return &corepb.Certificate{ - RegistrationID: 1, - Der: certBlock.Bytes, - Issued: sa.clk.Now().Add(-1 * time.Hour).UnixNano(), - }, nil - } else if req.Serial == "0000000000000000000000000000000000b2" { - certPemBytes, _ := ioutil.ReadFile("test/178.crt") - certBlock, _ := pem.Decode(certPemBytes) - return &corepb.Certificate{ - RegistrationID: 1, - Der: certBlock.Bytes, - Issued: sa.clk.Now().Add(-1 * time.Hour).UnixNano(), - }, nil - } else if req.Serial == "000000000000000000000000000000626164" { - return nil, errors.New("bad") - } else { - return nil, berrors.NotFoundError("No cert") - } -} - -// GetPrecertificate is a mock -func (sa *StorageAuthority) GetPrecertificate(_ context.Context, _ *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { - return nil, nil -} - -// GetCertificateStatus is a mock -func (sa *StorageAuthority) GetCertificateStatus(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.CertificateStatus, error) { - // Serial ee == 238.crt - if req.Serial == "0000000000000000000000000000000000ee" { - return &corepb.CertificateStatus{ - Status: string(core.OCSPStatusGood), - }, nil - } else if req.Serial == "0000000000000000000000000000000000b2" { - return &corepb.CertificateStatus{ - Status: string(core.OCSPStatusRevoked), - }, nil - } else { - return nil, errors.New("No cert status") - } -} - -// AddPrecertificate is a mock -func (sa *StorageAuthority) AddPrecertificate(ctx context.Context, req *sapb.AddCertificateRequest, _ ...grpc.CallOption) (empty *emptypb.Empty, err error) { - return -} - -// AddSerial is a mock -func (sa *StorageAuthority) AddSerial(ctx context.Context, req *sapb.AddSerialRequest, _ ...grpc.CallOption) (empty *emptypb.Empty, err error) { - return -} - -// AddCertificate is a mock -func (sa *StorageAuthority) AddCertificate(_ context.Context, _ *sapb.AddCertificateRequest, _ ...grpc.CallOption) (*sapb.AddCertificateResponse, error) { - return nil, nil -} - -// NewRegistration is a mock -func (sa *StorageAuthority) NewRegistration(_ context.Context, _ *corepb.Registration, _ ...grpc.CallOption) (*corepb.Registration, error) { - return &corepb.Registration{}, nil -} - -// UpdateRegistration is a mock -func (sa *StorageAuthority) UpdateRegistration(_ context.Context, _ *corepb.Registration, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return &emptypb.Empty{}, nil -} - -// CountFQDNSets is a mock -func (sa *StorageAuthority) CountFQDNSets(_ context.Context, _ *sapb.CountFQDNSetsRequest, _ ...grpc.CallOption) (*sapb.Count, error) { - return &sapb.Count{}, nil -} - -// FQDNSetExists is a mock -func (sa *StorageAuthority) FQDNSetExists(_ context.Context, _ *sapb.FQDNSetExistsRequest, _ ...grpc.CallOption) (*sapb.Exists, error) { - return &sapb.Exists{Exists: false}, nil -} - -func (sa *StorageAuthority) PreviousCertificateExists(_ context.Context, _ *sapb.PreviousCertificateExistsRequest, _ ...grpc.CallOption) (*sapb.Exists, error) { - return &sapb.Exists{Exists: false}, nil -} - -// CountCertificatesByNames is a mock -func (sa *StorageAuthority) CountCertificatesByNames(_ context.Context, _ *sapb.CountCertificatesByNamesRequest, _ ...grpc.CallOption) (*sapb.CountByNames, error) { - return &sapb.CountByNames{}, nil -} - -// CountRegistrationsByIP is a mock -func (sa *StorageAuthority) CountRegistrationsByIP(_ context.Context, _ *sapb.CountRegistrationsByIPRequest, _ ...grpc.CallOption) (*sapb.Count, error) { - return &sapb.Count{}, nil -} - -// CountRegistrationsByIPRange is a mock -func (sa *StorageAuthority) CountRegistrationsByIPRange(_ context.Context, _ *sapb.CountRegistrationsByIPRequest, _ ...grpc.CallOption) (*sapb.Count, error) { - return &sapb.Count{}, nil -} - -// CountOrders is a mock -func (sa *StorageAuthority) CountOrders(_ context.Context, _ *sapb.CountOrdersRequest, _ ...grpc.CallOption) (*sapb.Count, error) { - return &sapb.Count{}, nil -} - -// DeactivateRegistration is a mock -func (sa *StorageAuthority) DeactivateRegistration(_ context.Context, _ *sapb.RegistrationID, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return &emptypb.Empty{}, nil -} - -// NewOrder is a mock -func (sa *StorageAuthority) NewOrder(_ context.Context, req *sapb.NewOrderRequest, _ ...grpc.CallOption) (*corepb.Order, error) { - rand.Seed(time.Now().UnixNano()) - response := &corepb.Order{ - // Fields from the input new order request. - RegistrationID: req.RegistrationID, - Expires: req.Expires, - Names: req.Names, - V2Authorizations: req.V2Authorizations, - // Mock new fields generated by the database transaction. - Id: rand.Int63(), - Created: time.Now().UnixNano(), - // A new order is never processing because it can't have been finalized yet. - BeganProcessing: false, - Status: string(core.StatusPending), - } - return response, nil -} - -// NewOrderAndAuthzs is a mock -func (sa *StorageAuthority) NewOrderAndAuthzs(_ context.Context, req *sapb.NewOrderAndAuthzsRequest, _ ...grpc.CallOption) (*corepb.Order, error) { - return sa.NewOrder(context.TODO(), req.NewOrder) -} - -// SetOrderProcessing is a mock -func (sa *StorageAuthority) SetOrderProcessing(_ context.Context, req *sapb.OrderRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return &emptypb.Empty{}, nil -} - -// SetOrderError is a mock -func (sa *StorageAuthority) SetOrderError(_ context.Context, req *sapb.SetOrderErrorRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return &emptypb.Empty{}, nil -} - -// FinalizeOrder is a mock -func (sa *StorageAuthority) FinalizeOrder(_ context.Context, req *sapb.FinalizeOrderRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return &emptypb.Empty{}, nil -} - -// GetOrder is a mock -func (sa *StorageAuthority) GetOrder(_ context.Context, req *sapb.OrderRequest, _ ...grpc.CallOption) (*corepb.Order, error) { - if req.Id == 2 { - return nil, berrors.NotFoundError("bad") - } else if req.Id == 3 { - return nil, errors.New("very bad") - } - - created := sa.clk.Now().AddDate(-30, 0, 0).Unix() - exp := sa.clk.Now().AddDate(30, 0, 0).Unix() - validOrder := &corepb.Order{ - Id: req.Id, - RegistrationID: 1, - Created: created, - Expires: exp, - Names: []string{"example.com"}, - Status: string(core.StatusValid), - V2Authorizations: []int64{1}, - CertificateSerial: "serial", - Error: nil, - } - - // Order ID doesn't have a certificate serial yet - if req.Id == 4 { - validOrder.Status = string(core.StatusPending) - validOrder.Id = req.Id - validOrder.CertificateSerial = "" - validOrder.Error = nil - return validOrder, nil - } - - // Order ID 6 belongs to reg ID 6 - if req.Id == 6 { - validOrder.Id = 6 - validOrder.RegistrationID = 6 - } - - // Order ID 7 is ready, but expired - if req.Id == 7 { - validOrder.Status = string(core.StatusReady) - validOrder.Expires = sa.clk.Now().AddDate(-30, 0, 0).Unix() - } - - if req.Id == 8 { - validOrder.Status = string(core.StatusReady) - } - - // Order 9 is fresh - if req.Id == 9 { - validOrder.Created = sa.clk.Now().AddDate(0, 0, 1).Unix() - } - - return validOrder, nil -} - -func (sa *StorageAuthority) GetOrderForNames(_ context.Context, _ *sapb.GetOrderForNamesRequest, _ ...grpc.CallOption) (*corepb.Order, error) { - return nil, nil -} - -// NewAuthorizations is a mock -func (sa *StorageAuthority) NewAuthorizations2(ctx context.Context, req *sapb.AddPendingAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorization2IDs, error) { - return &sapb.Authorization2IDs{}, nil -} - -func (sa *StorageAuthority) FinalizeAuthorization2(ctx context.Context, req *sapb.FinalizeAuthorizationRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return &emptypb.Empty{}, nil -} - -func (sa *StorageAuthority) DeactivateAuthorization2(ctx context.Context, req *sapb.AuthorizationID2, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return nil, nil -} - -func (sa *StorageAuthority) CountPendingAuthorizations2(ctx context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*sapb.Count, error) { - return &sapb.Count{}, nil -} - -func (sa *StorageAuthority) GetValidOrderAuthorizations2(ctx context.Context, req *sapb.GetValidOrderAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) { - return nil, nil -} - -func (sa *StorageAuthority) CountInvalidAuthorizations2(ctx context.Context, req *sapb.CountInvalidAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Count, error) { - return &sapb.Count{}, nil -} - -func (sa *StorageAuthority) GetValidAuthorizations2(ctx context.Context, req *sapb.GetValidAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) { - if req.RegistrationID != 1 && req.RegistrationID != 5 && req.RegistrationID != 4 { - return &sapb.Authorizations{}, nil - } - now := time.Unix(0, req.Now) - auths := &sapb.Authorizations{} - for _, name := range req.Domains { - exp := now.AddDate(100, 0, 0) - authzPB, err := bgrpc.AuthzToPB(core.Authorization{ - Status: core.StatusValid, - RegistrationID: req.RegistrationID, - Expires: &exp, - Identifier: identifier.ACMEIdentifier{ - Type: "dns", - Value: name, - }, - Challenges: []core.Challenge{ - { - Status: core.StatusValid, - Type: core.ChallengeTypeDNS01, - Token: "exampleToken", - Validated: &now, - }, - }, - }) - if err != nil { - return nil, err - } - auths.Authz = append(auths.Authz, &sapb.Authorizations_MapElement{ - Domain: name, - Authz: authzPB, - }) - } - return auths, nil -} - -func (sa *StorageAuthority) GetAuthorizations2(ctx context.Context, req *sapb.GetAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) { - return &sapb.Authorizations{}, nil -} - -func (sa *StorageAuthority) GetPendingAuthorization2(ctx context.Context, req *sapb.GetPendingAuthorizationRequest, _ ...grpc.CallOption) (*corepb.Authorization, error) { - return nil, nil -} - -var ( - authzIdValid = int64(1) - authzIdPending = int64(2) - authzIdExpired = int64(3) - authzIdErrorResult = int64(4) - authzIdDiffAccount = int64(5) -) - -// GetAuthorization2 is a mock -func (sa *StorageAuthority) GetAuthorization2(ctx context.Context, id *sapb.AuthorizationID2, _ ...grpc.CallOption) (*corepb.Authorization, error) { - authz := core.Authorization{ - Status: core.StatusValid, - RegistrationID: 1, - Identifier: identifier.DNSIdentifier("not-an-example.com"), - Challenges: []core.Challenge{ - { - Status: "pending", - Token: "token", - Type: "dns", - }, - }, - } - - switch id.Id { - case authzIdValid: - exp := sa.clk.Now().AddDate(100, 0, 0) - authz.Expires = &exp - authz.ID = fmt.Sprintf("%d", authzIdValid) - return bgrpc.AuthzToPB(authz) - case authzIdPending: - exp := sa.clk.Now().AddDate(100, 0, 0) - authz.Expires = &exp - authz.ID = fmt.Sprintf("%d", authzIdPending) - authz.Status = core.StatusPending - return bgrpc.AuthzToPB(authz) - case authzIdExpired: - exp := sa.clk.Now().AddDate(0, -1, 0) - authz.Expires = &exp - authz.ID = fmt.Sprintf("%d", authzIdExpired) - return bgrpc.AuthzToPB(authz) - case authzIdErrorResult: - return nil, fmt.Errorf("Unspecified database error") - case authzIdDiffAccount: - exp := sa.clk.Now().AddDate(100, 0, 0) - authz.RegistrationID = 2 - authz.Expires = &exp - authz.ID = fmt.Sprintf("%d", authzIdDiffAccount) - return bgrpc.AuthzToPB(authz) - } - - return nil, berrors.NotFoundError("no authorization found with id %q", id) -} - -// RevokeCertificate is a mock -func (sa *StorageAuthority) RevokeCertificate(ctx context.Context, req *sapb.RevokeCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return nil, nil -} - -// RevokeCertificate is a mock -func (sa *StorageAuthority) UpdateRevokedCertificate(ctx context.Context, req *sapb.RevokeCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return nil, nil -} - -// AddBlockedKey is a mock -func (sa *StorageAuthority) AddBlockedKey(ctx context.Context, req *sapb.AddBlockedKeyRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return &emptypb.Empty{}, nil -} - -// KeyBlocked is a mock -func (sa *StorageAuthority) KeyBlocked(ctx context.Context, req *sapb.KeyBlockedRequest, _ ...grpc.CallOption) (*sapb.Exists, error) { - return &sapb.Exists{Exists: false}, nil -} - -// Publisher is a mock -type PublisherClient struct { - // empty -} - -// SubmitToSingleCTWithResult is a mock -func (*PublisherClient) SubmitToSingleCTWithResult(_ context.Context, _ *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) { - return nil, nil -} - -// Mailer is a mock -type Mailer struct { - Messages []MailerMessage -} - -// MailerMessage holds the captured emails from SendMail() -type MailerMessage struct { - To string - Subject string - Body string -} - -// Clear removes any previously recorded messages -func (m *Mailer) Clear() { - m.Messages = nil -} - -// SendMail is a mock -func (m *Mailer) SendMail(to []string, subject, msg string) error { - for _, rcpt := range to { - m.Messages = append(m.Messages, MailerMessage{ - To: rcpt, - Subject: subject, - Body: msg, - }) - } - return nil -} - -// Close is a mock -func (m *Mailer) Close() error { - return nil -} - -// Connect is a mock -func (m *Mailer) Connect() error { - return nil -} - -// SAWithFailedChallenges is a mocks.StorageAuthority that has -// a `GetAuthorization` implementation that can return authorizations with -// failed challenges. -type SAWithFailedChallenges struct { - StorageAuthority - Clk clock.FakeClock -} - -func (sa *SAWithFailedChallenges) GetAuthorization2(ctx context.Context, id *sapb.AuthorizationID2, _ ...grpc.CallOption) (*corepb.Authorization, error) { - authz := core.Authorization{ - ID: "55", - Status: core.StatusValid, - RegistrationID: 1, - Identifier: identifier.DNSIdentifier("not-an-example.com"), - Challenges: []core.Challenge{ - { - Status: core.StatusInvalid, - Type: "dns", - Token: "exampleToken", - }, - }, - } - prob := &probs.ProblemDetails{ - Type: "things:are:whack", - Detail: "whack attack", - HTTPStatus: 555, - } - exp := sa.Clk.Now().AddDate(100, 0, 0) - authz.Expires = &exp - // 55 returns an authz with a failed challenge that has the problem type - // statically prefixed by the V1ErrorNS - if id.Id == 55 { - prob.Type = probs.V1ErrorNS + prob.Type - authz.Challenges[0].Error = prob - return bgrpc.AuthzToPB(authz) - } - // 56 returns an authz with a failed challenge that has no error - // namespace on the problem type. - if id.Id == 56 { - authz.Challenges[0].Error = prob - return bgrpc.AuthzToPB(authz) - } - return nil, berrors.NotFoundError("no authorization found with id %q", id) -} diff --git a/mocks/publisher.go b/mocks/publisher.go new file mode 100644 index 00000000000..256215718ce --- /dev/null +++ b/mocks/publisher.go @@ -0,0 +1,19 @@ +package mocks + +import ( + "context" + + "google.golang.org/grpc" + + pubpb "github.com/letsencrypt/boulder/publisher/proto" +) + +// PublisherClient is a mock +type PublisherClient struct { + // empty +} + +// SubmitToSingleCTWithResult is a mock +func (*PublisherClient) SubmitToSingleCTWithResult(_ context.Context, _ *pubpb.Request, _ ...grpc.CallOption) (*pubpb.Result, error) { + return &pubpb.Result{}, nil +} diff --git a/mocks/sa.go b/mocks/sa.go new file mode 100644 index 00000000000..04e989cfb86 --- /dev/null +++ b/mocks/sa.go @@ -0,0 +1,384 @@ +package mocks + +import ( + "bytes" + "context" + "crypto/x509" + "errors" + "os" + "time" + + "github.com/go-jose/go-jose/v4" + "github.com/jmhodges/clock" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/letsencrypt/boulder/core" + corepb "github.com/letsencrypt/boulder/core/proto" + berrors "github.com/letsencrypt/boulder/errors" + bgrpc "github.com/letsencrypt/boulder/grpc" + "github.com/letsencrypt/boulder/identifier" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +// StorageAuthorityReadOnly is a mock of sapb.StorageAuthorityReadOnlyClient +type StorageAuthorityReadOnly struct { + clk clock.Clock +} + +// NewStorageAuthorityReadOnly creates a new mock read-only storage authority +// with the given clock. +func NewStorageAuthorityReadOnly(clk clock.Clock) *StorageAuthorityReadOnly { + return &StorageAuthorityReadOnly{clk} +} + +const ( + test1KeyPublicJSON = `{"kty":"RSA","n":"yNWVhtYEKJR21y9xsHV-PD_bYwbXSeNuFal46xYxVfRL5mqha7vttvjB_vc7Xg2RvgCxHPCqoxgMPTzHrZT75LjCwIW2K_klBYN8oYvTwwmeSkAz6ut7ZxPv-nZaT5TJhGk0NT2kh_zSpdriEJ_3vW-mqxYbbBmpvHqsa1_zx9fSuHYctAZJWzxzUZXykbWMWQZpEiE0J4ajj51fInEzVn7VxV-mzfMyboQjujPh7aNJxAWSq4oQEJJDgWwSh9leyoJoPpONHxh5nEE5AjE01FkGICSxjpZsF-w8hOTI3XXohUdu29Se26k2B0PolDSuj0GIQU6-W9TdLXSjBb2SpQ","e":"AQAB"}` + test2KeyPublicJSON = `{"kty":"RSA","n":"qnARLrT7Xz4gRcKyLdydmCr-ey9OuPImX4X40thk3on26FkMznR3fRjs66eLK7mmPcBZ6uOJseURU6wAaZNmemoYx1dMvqvWWIyiQleHSD7Q8vBrhR6uIoO4jAzJZR-ChzZuSDt7iHN-3xUVspu5XGwXU_MVJZshTwp4TaFx5elHIT_ObnTvTOU3Xhish07AbgZKmWsVbXh5s-CrIicU4OexJPgunWZ_YJJueOKmTvnLlTV4MzKR2oZlBKZ27S0-SfdV_QDx_ydle5oMAyKVtlAV35cyPMIsYNwgUGBCdY_2Uzi5eX0lTc7MPRwz6qR1kip-i59VcGcUQgqHV6Fyqw","e":"AQAB"}` + testE1KeyPublicJSON = `{"kty":"EC","crv":"P-256","x":"FwvSZpu06i3frSk_mz9HcD9nETn4wf3mQ-zDtG21Gao","y":"S8rR-0dWa8nAcw1fbunF_ajS3PQZ-QwLps-2adgLgPk"}` + testE2KeyPublicJSON = `{"kty":"EC","crv":"P-256","x":"S8FOmrZ3ywj4yyFqt0etAD90U-EnkNaOBSLfQmf7pNg","y":"vMvpDyqFDRHjGfZ1siDOm5LS6xNdR5xTpyoQGLDOX2Q"}` + test3KeyPublicJSON = `{"kty":"RSA","n":"uTQER6vUA1RDixS8xsfCRiKUNGRzzyIK0MhbS2biClShbb0hSx2mPP7gBvis2lizZ9r-y9hL57kNQoYCKndOBg0FYsHzrQ3O9AcoV1z2Mq-XhHZbFrVYaXI0M3oY9BJCWog0dyi3XC0x8AxC1npd1U61cToHx-3uSvgZOuQA5ffEn5L38Dz1Ti7OV3E4XahnRJvejadUmTkki7phLBUXm5MnnyFm0CPpf6ApV7zhLjN5W-nV0WL17o7v8aDgV_t9nIdi1Y26c3PlCEtiVHZcebDH5F1Deta3oLLg9-g6rWnTqPbY3knffhp4m0scLD6e33k8MtzxDX_D7vHsg0_X1w","e":"AQAB"}` + test4KeyPublicJSON = `{"kty":"RSA","n":"qih-cx32M0wq8MhhN-kBi2xPE-wnw4_iIg1hWO5wtBfpt2PtWikgPuBT6jvK9oyQwAWbSfwqlVZatMPY_-3IyytMNb9R9OatNr6o5HROBoyZnDVSiC4iMRd7bRl_PWSIqj_MjhPNa9cYwBdW5iC3jM5TaOgmp0-YFm4tkLGirDcIBDkQYlnv9NKILvuwqkapZ7XBixeqdCcikUcTRXW5unqygO6bnapzw-YtPsPPlj4Ih3SvK4doyziPV96U8u5lbNYYEzYiW1mbu9n0KLvmKDikGcdOpf6-yRa_10kMZyYQatY1eclIKI0xb54kbluEl0GQDaL5FxLmiKeVnsapzw","e":"AQAB"}` + + agreementURL = "http://example.invalid/terms" +) + +// GetRegistration is a mock +func (sa *StorageAuthorityReadOnly) GetRegistration(_ context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*corepb.Registration, error) { + if req.Id == 100 { + // Tag meaning "Missing" + return nil, errors.New("missing") + } + if req.Id == 101 { + // Tag meaning "Malformed" + return &corepb.Registration{}, nil + } + if req.Id == 102 { + // Tag meaning "Not Found" + return nil, berrors.NotFoundError("Dave's not here man") + } + + goodReg := &corepb.Registration{ + Id: req.Id, + Key: []byte(test1KeyPublicJSON), + Agreement: agreementURL, + Status: string(core.StatusValid), + } + + // Return a populated registration with contacts for ID == 1 or ID == 5 + if req.Id == 1 || req.Id == 5 { + return goodReg, nil + } + + // Return a populated registration with a different key for ID == 2 + if req.Id == 2 { + goodReg.Key = []byte(test2KeyPublicJSON) + return goodReg, nil + } + + // Return a deactivated registration with a different key for ID == 3 + if req.Id == 3 { + goodReg.Key = []byte(test3KeyPublicJSON) + goodReg.Status = string(core.StatusDeactivated) + return goodReg, nil + } + + // Return a populated registration with a different key for ID == 4 + if req.Id == 4 { + goodReg.Key = []byte(test4KeyPublicJSON) + return goodReg, nil + } + + // Return a registration without the agreement set for ID == 6 + if req.Id == 6 { + goodReg.Agreement = "" + return goodReg, nil + } + + goodReg.CreatedAt = timestamppb.New(time.Date(2003, 9, 27, 0, 0, 0, 0, time.UTC)) + return goodReg, nil +} + +// GetRegistrationByKey is a mock +func (sa *StorageAuthorityReadOnly) GetRegistrationByKey(_ context.Context, req *sapb.JSONWebKey, _ ...grpc.CallOption) (*corepb.Registration, error) { + test5KeyBytes, err := os.ReadFile("../test/test-key-5.der") + if err != nil { + return nil, err + } + test5KeyPriv, err := x509.ParsePKCS1PrivateKey(test5KeyBytes) + if err != nil { + return nil, err + } + test5KeyPublic := jose.JSONWebKey{Key: test5KeyPriv.Public()} + test5KeyPublicJSON, err := test5KeyPublic.MarshalJSON() + if err != nil { + return nil, err + } + + if bytes.Equal(req.Jwk, []byte(test1KeyPublicJSON)) { + return &corepb.Registration{ + Id: 1, + Key: req.Jwk, + Agreement: agreementURL, + Status: string(core.StatusValid), + }, nil + } + + if bytes.Equal(req.Jwk, []byte(test2KeyPublicJSON)) { + // No key found + return &corepb.Registration{Id: 2}, berrors.NotFoundError("reg not found") + } + + if bytes.Equal(req.Jwk, []byte(test4KeyPublicJSON)) { + // No key found + return &corepb.Registration{Id: 5}, berrors.NotFoundError("reg not found") + } + + if bytes.Equal(req.Jwk, test5KeyPublicJSON) { + // No key found + return &corepb.Registration{Id: 5}, berrors.NotFoundError("reg not found") + } + + if bytes.Equal(req.Jwk, []byte(testE1KeyPublicJSON)) { + return &corepb.Registration{Id: 3, Key: req.Jwk, Agreement: agreementURL}, nil + } + + if bytes.Equal(req.Jwk, []byte(testE2KeyPublicJSON)) { + return &corepb.Registration{Id: 4}, berrors.NotFoundError("reg not found") + } + + if bytes.Equal(req.Jwk, []byte(test3KeyPublicJSON)) { + // deactivated registration + return &corepb.Registration{ + Id: 2, + Key: req.Jwk, + Agreement: agreementURL, + Status: string(core.StatusDeactivated), + }, nil + } + + // Return a fake registration. Make sure to fill the key field to avoid marshaling errors. + return &corepb.Registration{ + Id: 1, + Key: []byte(test1KeyPublicJSON), + Agreement: agreementURL, + Status: string(core.StatusValid), + }, nil +} + +// GetSerialMetadata is a mock +func (sa *StorageAuthorityReadOnly) GetSerialMetadata(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.SerialMetadata, error) { + now := sa.clk.Now() + created := now.Add(-1 * time.Hour) + expires := now.Add(2159 * time.Hour) + return &sapb.SerialMetadata{ + Serial: req.Serial, + RegistrationID: 1, + Created: timestamppb.New(created), + Expires: timestamppb.New(expires), + }, nil +} + +// GetCertificate is a mock +func (sa *StorageAuthorityReadOnly) GetCertificate(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { + if req.Serial == "000000000000000000000000000000626164" { + return nil, errors.New("bad") + } else { + return nil, berrors.NotFoundError("No cert") + } +} + +// GetLintPrecertificate is a mock +func (sa *StorageAuthorityReadOnly) GetLintPrecertificate(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { + return nil, berrors.NotFoundError("No cert") +} + +// GetCertificateStatus is a mock +func (sa *StorageAuthorityReadOnly) GetCertificateStatus(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.CertificateStatus, error) { + return nil, errors.New("no cert status") +} + +// GetRevocationStatus is a mock +func (sa *StorageAuthorityReadOnly) GetRevocationStatus(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.RevocationStatus, error) { + return nil, nil +} + +// SerialsForIncident is a mock +func (sa *StorageAuthorityReadOnly) SerialsForIncident(ctx context.Context, _ *sapb.SerialsForIncidentRequest, _ ...grpc.CallOption) (sapb.StorageAuthorityReadOnly_SerialsForIncidentClient, error) { + return &ServerStreamClient[sapb.IncidentSerial]{}, nil +} + +// CheckIdentifiersPaused is a mock +func (sa *StorageAuthorityReadOnly) CheckIdentifiersPaused(_ context.Context, _ *sapb.PauseRequest, _ ...grpc.CallOption) (*sapb.Identifiers, error) { + return nil, nil +} + +// GetPausedIdentifiers is a mock +func (sa *StorageAuthorityReadOnly) GetPausedIdentifiers(_ context.Context, _ *sapb.RegistrationID, _ ...grpc.CallOption) (*sapb.Identifiers, error) { + return nil, nil +} + +// GetRevokedCertsByShard is a mock +func (sa *StorageAuthorityReadOnly) GetRevokedCertsByShard(ctx context.Context, _ *sapb.GetRevokedCertsByShardRequest, _ ...grpc.CallOption) (grpc.ServerStreamingClient[corepb.CRLEntry], error) { + return &ServerStreamClient[corepb.CRLEntry]{}, nil +} + +// GetRateLimitOverride is a mock +func (sa *StorageAuthorityReadOnly) GetRateLimitOverride(_ context.Context, req *sapb.GetRateLimitOverrideRequest, _ ...grpc.CallOption) (*sapb.RateLimitOverrideResponse, error) { + return nil, nil +} + +// GetEnabledRateLimitOverrides is a mock +func (sa *StorageAuthorityReadOnly) GetEnabledRateLimitOverrides(_ context.Context, _ *emptypb.Empty, _ ...grpc.CallOption) (sapb.StorageAuthorityReadOnly_GetEnabledRateLimitOverridesClient, error) { + return nil, nil +} + +// FQDNSetTimestampsForWindow is a mock +func (sa *StorageAuthorityReadOnly) FQDNSetTimestampsForWindow(_ context.Context, _ *sapb.CountFQDNSetsRequest, _ ...grpc.CallOption) (*sapb.Timestamps, error) { + return &sapb.Timestamps{}, nil +} + +// FQDNSetExists is a mock +func (sa *StorageAuthorityReadOnly) FQDNSetExists(_ context.Context, _ *sapb.FQDNSetExistsRequest, _ ...grpc.CallOption) (*sapb.Exists, error) { + return &sapb.Exists{Exists: false}, nil +} + +// GetOrder is a mock +func (sa *StorageAuthorityReadOnly) GetOrder(_ context.Context, req *sapb.OrderRequest, _ ...grpc.CallOption) (*corepb.Order, error) { + switch req.Id { + case 2: + return nil, berrors.NotFoundError("bad") + case 3: + return nil, errors.New("very bad") + } + + now := sa.clk.Now() + created := now.AddDate(-30, 0, 0) + exp := now.AddDate(30, 0, 0) + validOrder := &corepb.Order{ + Id: req.Id, + RegistrationID: 1, + Created: timestamppb.New(created), + Expires: timestamppb.New(exp), + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, + Status: string(core.StatusValid), + V2Authorizations: []int64{1}, + CertificateSerial: "serial", + Error: nil, + CertificateProfileName: "default", + } + + // Order ID doesn't have a certificate serial yet + if req.Id == 4 { + validOrder.Status = string(core.StatusPending) + validOrder.Id = req.Id + validOrder.CertificateSerial = "" + validOrder.Error = nil + return validOrder, nil + } + + // Order ID 6 belongs to reg ID 6 + if req.Id == 6 { + validOrder.Id = 6 + validOrder.RegistrationID = 6 + } + + // Order ID 7 is ready, but expired + if req.Id == 7 { + validOrder.Status = string(core.StatusReady) + validOrder.Expires = timestamppb.New(now.AddDate(-30, 0, 0)) + } + + if req.Id == 8 { + validOrder.Status = string(core.StatusReady) + } + + // Order 9 is fresh + if req.Id == 9 { + validOrder.Created = timestamppb.New(now.AddDate(0, 0, 1)) + } + + // Order 10 is processing + if req.Id == 10 { + validOrder.Status = string(core.StatusProcessing) + } + + return validOrder, nil +} + +func (sa *StorageAuthorityReadOnly) GetOrderForNames(_ context.Context, _ *sapb.GetOrderForNamesRequest, _ ...grpc.CallOption) (*corepb.Order, error) { + return nil, nil +} + +func (sa *StorageAuthorityReadOnly) CountPendingAuthorizations2(ctx context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*sapb.Count, error) { + return &sapb.Count{}, nil +} + +func (sa *StorageAuthorityReadOnly) GetValidOrderAuthorizations2(ctx context.Context, req *sapb.GetValidOrderAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) { + return nil, nil +} + +func (sa *StorageAuthorityReadOnly) CountInvalidAuthorizations2(ctx context.Context, req *sapb.CountInvalidAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Count, error) { + return &sapb.Count{}, nil +} + +func (sa *StorageAuthorityReadOnly) GetValidAuthorizations2(ctx context.Context, req *sapb.GetValidAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) { + if req.RegistrationID != 1 && req.RegistrationID != 5 && req.RegistrationID != 4 { + return &sapb.Authorizations{}, nil + } + expiryCutoff := req.ValidUntil.AsTime() + auths := &sapb.Authorizations{} + for _, ident := range req.Identifiers { + exp := expiryCutoff.AddDate(100, 0, 0) + authzPB, err := bgrpc.AuthzToPB(core.Authorization{ + Status: core.StatusValid, + RegistrationID: req.RegistrationID, + Expires: &exp, + Identifier: identifier.FromProto(ident), + Challenges: []core.Challenge{ + { + Status: core.StatusValid, + Type: core.ChallengeTypeDNS01, + Token: "exampleToken", + Validated: &expiryCutoff, + }, + }, + }) + if err != nil { + return nil, err + } + auths.Authzs = append(auths.Authzs, authzPB) + } + return auths, nil +} + +// GetAuthorization2 is a mock +func (sa *StorageAuthorityReadOnly) GetAuthorization2(ctx context.Context, id *sapb.AuthorizationID2, _ ...grpc.CallOption) (*corepb.Authorization, error) { + return &corepb.Authorization{}, nil +} + +// GetSerialsByKey is a mock +func (sa *StorageAuthorityReadOnly) GetSerialsByKey(ctx context.Context, _ *sapb.SPKIHash, _ ...grpc.CallOption) (sapb.StorageAuthorityReadOnly_GetSerialsByKeyClient, error) { + return &ServerStreamClient[sapb.Serial]{}, nil +} + +// GetSerialsByAccount is a mock +func (sa *StorageAuthorityReadOnly) GetSerialsByAccount(ctx context.Context, _ *sapb.RegistrationID, _ ...grpc.CallOption) (sapb.StorageAuthorityReadOnly_GetSerialsByAccountClient, error) { + return &ServerStreamClient[sapb.Serial]{}, nil +} + +// KeyBlocked is a mock +func (sa *StorageAuthorityReadOnly) KeyBlocked(ctx context.Context, req *sapb.SPKIHash, _ ...grpc.CallOption) (*sapb.Exists, error) { + return &sapb.Exists{Exists: false}, nil +} + +// IncidentsForSerial is a mock. +func (sa *StorageAuthorityReadOnly) IncidentsForSerial(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.Incidents, error) { + return &sapb.Incidents{}, nil +} + +// ReplacementOrderExists is a mock. +func (sa *StorageAuthorityReadOnly) ReplacementOrderExists(ctx context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.Exists, error) { + return nil, nil +} diff --git a/must/must.go b/must/must.go new file mode 100644 index 00000000000..a7b13373189 --- /dev/null +++ b/must/must.go @@ -0,0 +1,15 @@ +package must + +// Do panics if err is not nil, otherwise returns t. +// It is useful in wrapping a two-value function call +// where you know statically that the call will succeed. +// +// Example: +// +// url := must.Do(url.Parse("http://example.com")) +func Do[T any](t T, err error) T { + if err != nil { + panic(err) + } + return t +} diff --git a/must/must_test.go b/must/must_test.go new file mode 100644 index 00000000000..7078fb35d6c --- /dev/null +++ b/must/must_test.go @@ -0,0 +1,13 @@ +package must + +import ( + "net/url" + "testing" +) + +func TestDo(t *testing.T) { + url := Do(url.Parse("http://example.com")) + if url.Host != "example.com" { + t.Errorf("expected host to be example.com, got %s", url.Host) + } +} diff --git a/nonce/nonce.go b/nonce/nonce.go index 764fcdc43c0..91f1eabc78c 100644 --- a/nonce/nonce.go +++ b/nonce/nonce.go @@ -18,7 +18,9 @@ import ( "context" "crypto/aes" "crypto/cipher" + "crypto/hmac" "crypto/rand" + "crypto/sha256" "encoding/base64" "errors" "fmt" @@ -26,19 +28,44 @@ import ( "sync" "time" - noncepb "github.com/letsencrypt/boulder/nonce/proto" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + + berrors "github.com/letsencrypt/boulder/errors" + noncepb "github.com/letsencrypt/boulder/nonce/proto" ) const ( + // PrefixLen is the character length of a nonce prefix. + PrefixLen = 8 + + // NonceLen is the character length of a nonce, excluding the prefix. + NonceLen = 32 defaultMaxUsed = 65536 - nonceLen = 32 ) var errInvalidNonceLength = errors.New("invalid nonce length") +// PrefixCtxKey is exported for use as a key in a context.Context. +type PrefixCtxKey struct{} + +// HMACKeyCtxKey is exported for use as a key in a context.Context. +type HMACKeyCtxKey struct{} + +// DerivePrefix derives a nonce prefix from the provided listening address and +// key. The prefix is derived by take the first 8 characters of the base64url +// encoded HMAC-SHA256 hash of the listening address using the provided key. +func DerivePrefix(grpcAddr string, key []byte) string { + h := hmac.New(sha256.New, key) + h.Write([]byte(grpcAddr)) + return base64.RawURLEncoding.EncodeToString(h.Sum(nil))[:PrefixLen] +} + // NonceService generates, cancels, and tracks Nonces. type NonceService struct { + noncepb.UnsafeNonceServiceServer mu sync.Mutex latest int64 earliest int64 @@ -48,7 +75,10 @@ type NonceService struct { maxUsed int prefix string nonceCreates prometheus.Counter + nonceEarliest prometheus.Gauge + nonceLatest prometheus.Gauge nonceRedeems *prometheus.CounterVec + nonceAges *prometheus.HistogramVec nonceHeapLatency prometheus.Histogram } @@ -58,11 +88,11 @@ func (h int64Heap) Len() int { return len(h) } func (h int64Heap) Less(i, j int) bool { return h[i] < h[j] } func (h int64Heap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h *int64Heap) Push(x interface{}) { +func (h *int64Heap) Push(x any) { *h = append(*h, x.(int64)) } -func (h *int64Heap) Pop() interface{} { +func (h *int64Heap) Pop() any { old := *h n := len(old) x := old[n-1] @@ -72,15 +102,18 @@ func (h *int64Heap) Pop() interface{} { // NewNonceService constructs a NonceService with defaults func NewNonceService(stats prometheus.Registerer, maxUsed int, prefix string) (*NonceService, error) { - // If a prefix is provided it must be four characters and valid - // base64. The prefix is required to be base64url as RFC8555 - // section 6.5.1 requires that nonces use that encoding. - // As base64 operates on three byte binary segments we require - // the prefix to be three bytes (four characters) so that the - // bytes preceding the prefix wouldn't impact the encoding. + // If a prefix is provided it must be eight characters and valid base64. The + // prefix is required to be base64url as RFC8555 section 6.5.1 requires that + // nonces use that encoding. As base64 operates on three byte binary segments + // we require the prefix to be six bytes (eight characters) so that the bytes + // preceding the prefix wouldn't impact the encoding. if prefix != "" { - if len(prefix) != 4 { - return nil, errors.New("nonce prefix must be 4 characters") + if len(prefix) != PrefixLen { + return nil, fmt.Errorf( + "nonce prefix must be %d characters, not %d", + PrefixLen, + len(prefix), + ) } if _, err := base64.RawURLEncoding.DecodeString(prefix); err != nil { return nil, errors.New("nonce prefix must be valid base64url") @@ -105,21 +138,31 @@ func NewNonceService(stats prometheus.Registerer, maxUsed int, prefix string) (* maxUsed = defaultMaxUsed } - nonceCreates := prometheus.NewCounter(prometheus.CounterOpts{ + nonceCreates := promauto.With(stats).NewCounter(prometheus.CounterOpts{ Name: "nonce_creates", Help: "A counter of nonces generated", }) - stats.MustRegister(nonceCreates) - nonceRedeems := prometheus.NewCounterVec(prometheus.CounterOpts{ + nonceEarliest := promauto.With(stats).NewGauge(prometheus.GaugeOpts{ + Name: "nonce_earliest", + Help: "A gauge with the current earliest valid nonce value", + }) + nonceLatest := promauto.With(stats).NewGauge(prometheus.GaugeOpts{ + Name: "nonce_latest", + Help: "A gauge with the current latest valid nonce value", + }) + nonceRedeems := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ Name: "nonce_redeems", Help: "A counter of nonce validations labelled by result", }, []string{"result", "error"}) - stats.MustRegister(nonceRedeems) - nonceHeapLatency := prometheus.NewHistogram(prometheus.HistogramOpts{ + nonceAges := promauto.With(stats).NewHistogramVec(prometheus.HistogramOpts{ + Name: "nonce_ages", + Help: "A histogram of nonce ages at the time they were (attempted to be) redeemed, expressed as fractions of the valid nonce window", + Buckets: []float64{-0.01, 0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1, 1.1, 1.2, 1.5, 2, 5}, + }, []string{"result"}) + nonceHeapLatency := promauto.With(stats).NewHistogram(prometheus.HistogramOpts{ Name: "nonce_heap_latency", Help: "A histogram of latencies of heap pop operations", }) - stats.MustRegister(nonceHeapLatency) return &NonceService{ earliest: 0, @@ -130,7 +173,10 @@ func NewNonceService(stats prometheus.Registerer, maxUsed int, prefix string) (* maxUsed: maxUsed, prefix: prefix, nonceCreates: nonceCreates, + nonceEarliest: nonceEarliest, + nonceLatest: nonceLatest, nonceRedeems: nonceRedeems, + nonceAges: nonceAges, nonceHeapLatency: nonceHeapLatency, }, nil } @@ -138,10 +184,11 @@ func NewNonceService(stats prometheus.Registerer, maxUsed int, prefix string) (* func (ns *NonceService) encrypt(counter int64) (string, error) { // Generate a nonce with upper 4 bytes zero nonce := make([]byte, 12) - for i := 0; i < 4; i++ { + for i := range 4 { nonce[i] = 0 } - if _, err := rand.Read(nonce[4:]); err != nil { + _, err := rand.Read(nonce[4:]) + if err != nil { return "", err } @@ -152,7 +199,7 @@ func (ns *NonceService) encrypt(counter int64) (string, error) { copy(pt[pad:], ctr.Bytes()) // Encrypt - ret := make([]byte, nonceLen) + ret := make([]byte, NonceLen) ct := ns.gcm.Seal(nil, nonce, pt, nil) copy(ret, nonce[4:]) copy(ret[8:], ct) @@ -165,7 +212,7 @@ func (ns *NonceService) decrypt(nonce string) (int64, error) { if ns.prefix != "" { var prefix string var err error - prefix, body, err = splitNonce(nonce) + prefix, body, err = ns.splitNonce(nonce) if err != nil { return 0, err } @@ -177,12 +224,12 @@ func (ns *NonceService) decrypt(nonce string) (int64, error) { if err != nil { return 0, err } - if len(decoded) != nonceLen { + if len(decoded) != NonceLen { return 0, errInvalidNonceLength } n := make([]byte, 12) - for i := 0; i < 4; i++ { + for i := range 4 { n[i] = 0 } copy(n[4:], decoded[:8]) @@ -197,40 +244,53 @@ func (ns *NonceService) decrypt(nonce string) (int64, error) { return ctr.Int64(), nil } -// Nonce provides a new Nonce. -func (ns *NonceService) Nonce() (string, error) { +// nonce provides a new Nonce. +func (ns *NonceService) nonce() (string, error) { ns.mu.Lock() ns.latest++ latest := ns.latest ns.mu.Unlock() - defer ns.nonceCreates.Inc() + ns.nonceCreates.Inc() + ns.nonceLatest.Set(float64(latest)) return ns.encrypt(latest) } -// Valid determines whether the provided Nonce string is valid, returning +// valid determines whether the provided Nonce string is valid, returning // true if so. -func (ns *NonceService) Valid(nonce string) bool { +func (ns *NonceService) valid(nonce string) error { c, err := ns.decrypt(nonce) if err != nil { ns.nonceRedeems.WithLabelValues("invalid", "decrypt").Inc() - return false + return berrors.BadNonceError("unable to decrypt nonce: %s", err) } ns.mu.Lock() defer ns.mu.Unlock() - if c > ns.latest { + + // age represents how "far back" in the valid nonce window this nonce is. + // If it is very recent, then the numerator is very small and the age is close + // to zero. If it is old but still valid, the numerator is slightly smaller + // than the denominator, and the age is close to one. If it is too old, then + // the age is greater than one. If it is magically too new (i.e. greater than + // the largest nonce we've actually handed out), then the age is negative. + age := float64(ns.latest-c) / float64(ns.latest-ns.earliest) + + if c > ns.latest { // i.e. age < 0 ns.nonceRedeems.WithLabelValues("invalid", "too high").Inc() - return false + ns.nonceAges.WithLabelValues("invalid").Observe(age) + return berrors.BadNonceError("nonce greater than highest dispensed nonce: %d > %d", c, ns.latest) } - if c <= ns.earliest { + if c <= ns.earliest { // i.e. age >= 1 ns.nonceRedeems.WithLabelValues("invalid", "too low").Inc() - return false + ns.nonceAges.WithLabelValues("invalid").Observe(age) + return berrors.BadNonceError("nonce less than lowest eligible nonce: %d < %d", c, ns.earliest) } if ns.used[c] { ns.nonceRedeems.WithLabelValues("invalid", "already used").Inc() - return false + ns.nonceAges.WithLabelValues("invalid").Observe(age) + return berrors.BadNonceError("nonce already marked as used: %d in [%d]used", c, len(ns.used)) } ns.used[c] = true @@ -238,35 +298,60 @@ func (ns *NonceService) Valid(nonce string) bool { if len(ns.used) > ns.maxUsed { s := time.Now() ns.earliest = heap.Pop(ns.usedHeap).(int64) + ns.nonceEarliest.Set(float64(ns.earliest)) ns.nonceHeapLatency.Observe(time.Since(s).Seconds()) delete(ns.used, ns.earliest) } ns.nonceRedeems.WithLabelValues("valid", "").Inc() - return true + ns.nonceAges.WithLabelValues("valid").Observe(age) + return nil } -func splitNonce(nonce string) (string, string, error) { - if len(nonce) < 4 { +// splitNonce splits a nonce into a prefix and a body. +func (ns *NonceService) splitNonce(nonce string) (string, string, error) { + if len(nonce) < PrefixLen { return "", "", errInvalidNonceLength } - return nonce[:4], nonce[4:], nil + return nonce[:PrefixLen], nonce[PrefixLen:], nil } -// RemoteRedeem checks the nonce prefix and routes the Redeem RPC -// to the associated remote nonce service -func RemoteRedeem(ctx context.Context, noncePrefixMap map[string]noncepb.NonceServiceClient, nonce string) (bool, error) { - prefix, _, err := splitNonce(nonce) +// Redeem accepts a nonce from a gRPC client and redeems it using the inner nonce service. +func (ns *NonceService) Redeem(ctx context.Context, msg *noncepb.NonceMessage) (*noncepb.ValidMessage, error) { + err := ns.valid(msg.Nonce) if err != nil { - return false, nil - } - nonceService, present := noncePrefixMap[prefix] - if !present { - return false, nil + return nil, err } - resp, err := nonceService.Redeem(ctx, &noncepb.NonceMessage{Nonce: nonce}) + return &noncepb.ValidMessage{Valid: true}, nil +} + +// Nonce generates a nonce and sends it to a gRPC client. +func (ns *NonceService) Nonce(_ context.Context, _ *emptypb.Empty) (*noncepb.NonceMessage, error) { + nonce, err := ns.nonce() if err != nil { - return false, err + return nil, err } - return resp.Valid, nil + return &noncepb.NonceMessage{Nonce: nonce}, nil +} + +// Getter is an interface for an RPC client that can get a nonce. +type Getter interface { + Nonce(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*noncepb.NonceMessage, error) +} + +// Redeemer is an interface for an RPC client that can redeem a nonce. +type Redeemer interface { + Redeem(ctx context.Context, in *noncepb.NonceMessage, opts ...grpc.CallOption) (*noncepb.ValidMessage, error) +} + +// NewGetter returns a new noncepb.NonceServiceClient which can only be used to +// get nonces. +func NewGetter(cc grpc.ClientConnInterface) Getter { + return noncepb.NewNonceServiceClient(cc) +} + +// NewRedeemer returns a new noncepb.NonceServiceClient which can only be used +// to redeem nonces. +func NewRedeemer(cc grpc.ClientConnInterface) Redeemer { + return noncepb.NewNonceServiceClient(cc) } diff --git a/nonce/nonce_test.go b/nonce/nonce_test.go index c54e4a4a9d1..edc2885b695 100644 --- a/nonce/nonce_test.go +++ b/nonce/nonce_test.go @@ -1,47 +1,62 @@ package nonce import ( - "context" - "errors" "fmt" "testing" + "github.com/prometheus/client_golang/prometheus" + "github.com/letsencrypt/boulder/metrics" - noncepb "github.com/letsencrypt/boulder/nonce/proto" "github.com/letsencrypt/boulder/test" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" ) func TestValidNonce(t *testing.T) { ns, err := NewNonceService(metrics.NoopRegisterer, 0, "") test.AssertNotError(t, err, "Could not create nonce service") - n, err := ns.Nonce() + n, err := ns.nonce() test.AssertNotError(t, err, "Could not create nonce") - test.Assert(t, ns.Valid(n), fmt.Sprintf("Did not recognize fresh nonce %s", n)) + test.AssertNotError(t, ns.valid(n), fmt.Sprintf("Did not recognize fresh nonce %s", n)) + test.AssertMetricWithLabelsEquals(t, ns.nonceRedeems, prometheus.Labels{ + "result": "valid", "error": "", + }, 1) + test.AssertHistogramBucketCount(t, ns.nonceAges, prometheus.Labels{ + "result": "valid", + }, 0, 1) } func TestAlreadyUsed(t *testing.T) { ns, err := NewNonceService(metrics.NoopRegisterer, 0, "") test.AssertNotError(t, err, "Could not create nonce service") - n, err := ns.Nonce() + n, err := ns.nonce() test.AssertNotError(t, err, "Could not create nonce") - test.Assert(t, ns.Valid(n), "Did not recognize fresh nonce") - test.Assert(t, !ns.Valid(n), "Recognized the same nonce twice") + test.AssertNotError(t, ns.valid(n), "Did not recognize fresh nonce") + test.AssertError(t, ns.valid(n), "Recognized the same nonce twice") + test.AssertMetricWithLabelsEquals(t, ns.nonceRedeems, prometheus.Labels{ + "result": "invalid", "error": "already used", + }, 1) + test.AssertHistogramBucketCount(t, ns.nonceAges, prometheus.Labels{ + "result": "invalid", + }, 0, 1) } func TestRejectMalformed(t *testing.T) { ns, err := NewNonceService(metrics.NoopRegisterer, 0, "") test.AssertNotError(t, err, "Could not create nonce service") - n, err := ns.Nonce() + n, err := ns.nonce() test.AssertNotError(t, err, "Could not create nonce") - test.Assert(t, !ns.Valid("asdf"+n), "Accepted an invalid nonce") + test.AssertError(t, ns.valid("asdf"+n), "Accepted an invalid nonce") + test.AssertMetricWithLabelsEquals(t, ns.nonceRedeems, prometheus.Labels{ + "result": "invalid", "error": "decrypt", + }, 1) } func TestRejectShort(t *testing.T) { ns, err := NewNonceService(metrics.NoopRegisterer, 0, "") test.AssertNotError(t, err, "Could not create nonce service") - test.Assert(t, !ns.Valid("aGkK"), "Accepted an invalid nonce") + test.AssertError(t, ns.valid("aGkK"), "Accepted an invalid nonce") + test.AssertMetricWithLabelsEquals(t, ns.nonceRedeems, prometheus.Labels{ + "result": "invalid", "error": "decrypt", + }, 1) } func TestRejectUnknown(t *testing.T) { @@ -50,9 +65,12 @@ func TestRejectUnknown(t *testing.T) { ns2, err := NewNonceService(metrics.NoopRegisterer, 0, "") test.AssertNotError(t, err, "Could not create nonce service") - n, err := ns1.Nonce() + n, err := ns1.nonce() test.AssertNotError(t, err, "Could not create nonce") - test.Assert(t, !ns2.Valid(n), "Accepted a foreign nonce") + test.AssertError(t, ns2.valid(n), "Accepted a foreign nonce") + test.AssertMetricWithLabelsEquals(t, ns2.nonceRedeems, prometheus.Labels{ + "result": "invalid", "error": "decrypt", + }, 1) } func TestRejectTooLate(t *testing.T) { @@ -60,38 +78,79 @@ func TestRejectTooLate(t *testing.T) { test.AssertNotError(t, err, "Could not create nonce service") ns.latest = 2 - n, err := ns.Nonce() + n, err := ns.nonce() test.AssertNotError(t, err, "Could not create nonce") ns.latest = 1 - test.Assert(t, !ns.Valid(n), "Accepted a nonce with a too-high counter") + test.AssertError(t, ns.valid(n), "Accepted a nonce with a too-high counter") + test.AssertMetricWithLabelsEquals(t, ns.nonceRedeems, prometheus.Labels{ + "result": "invalid", "error": "too high", + }, 1) + test.AssertHistogramBucketCount(t, ns.nonceAges, prometheus.Labels{ + "result": "invalid", + }, -1, 1) } func TestRejectTooEarly(t *testing.T) { - ns, err := NewNonceService(metrics.NoopRegisterer, 0, "") + // Use a very low value for maxUsed so the loop below can be short. + ns, err := NewNonceService(metrics.NoopRegisterer, 2, "") test.AssertNotError(t, err, "Could not create nonce service") - n0, err := ns.Nonce() + n, err := ns.nonce() test.AssertNotError(t, err, "Could not create nonce") - for i := 0; i < ns.maxUsed; i++ { - n, err := ns.Nonce() + // Generate and redeem enough nonces to surpass maxUsed, forcing the nonce + // service to move ns.earliest upwards, invalidating n. + for range ns.maxUsed + 1 { + n, err := ns.nonce() test.AssertNotError(t, err, "Could not create nonce") - if !ns.Valid(n) { - t.Errorf("generated invalid nonce") - } + test.AssertNotError(t, ns.valid(n), "Rejected a valid nonce") } - n1, err := ns.Nonce() - test.AssertNotError(t, err, "Could not create nonce") - n2, err := ns.Nonce() - test.AssertNotError(t, err, "Could not create nonce") - n3, err := ns.Nonce() - test.AssertNotError(t, err, "Could not create nonce") + test.AssertError(t, ns.valid(n), "Accepted a nonce that we should have forgotten") + test.AssertMetricWithLabelsEquals(t, ns.nonceRedeems, prometheus.Labels{ + "result": "invalid", "error": "too low", + }, 1) + test.AssertHistogramBucketCount(t, ns.nonceAges, prometheus.Labels{ + "result": "invalid", + }, 1.5, 1) +} + +func TestNonceMetrics(t *testing.T) { + // Use a low value for maxUsed so the loop below can be short. + ns, err := NewNonceService(metrics.NoopRegisterer, 2, "") + test.AssertNotError(t, err, "Could not create nonce service") - test.Assert(t, ns.Valid(n3), "Rejected a valid nonce") - test.Assert(t, ns.Valid(n2), "Rejected a valid nonce") - test.Assert(t, ns.Valid(n1), "Rejected a valid nonce") - test.Assert(t, !ns.Valid(n0), "Accepted a nonce that we should have forgotten") + // After issuing (but not redeeming) many nonces, the latest should have + // increased by the same amount and the earliest should have moved at all. + var nonces []string + for range 10 * ns.maxUsed { + n, err := ns.nonce() + test.AssertNotError(t, err, "Could not create nonce") + nonces = append(nonces, n) + } + test.AssertMetricWithLabelsEquals(t, ns.nonceEarliest, nil, 0) + test.AssertMetricWithLabelsEquals(t, ns.nonceLatest, nil, 20) + + // Redeeming maxUsed nonces shouldn't cause either metric to change, because + // no redeemed nonces have been dropped from the used heap yet. + test.AssertNotError(t, ns.valid(nonces[0]), "Rejected a valid nonce") + test.AssertNotError(t, ns.valid(nonces[1]), "Rejected a valid nonce") + test.AssertMetricWithLabelsEquals(t, ns.nonceEarliest, nil, 0) + test.AssertMetricWithLabelsEquals(t, ns.nonceLatest, nil, 20) + + // Redeeming one more nonce should cause the earliest to move forward one, as + // the earliest redeemed nonce is popped from the heap. + test.AssertNotError(t, ns.valid(nonces[2]), "Rejected a valid nonce") + test.AssertMetricWithLabelsEquals(t, ns.nonceEarliest, nil, 1) + test.AssertMetricWithLabelsEquals(t, ns.nonceLatest, nil, 20) + + // Redeeming maxUsed+1 much later nonces should cause the earliest to skip + // forward to the first of those. + test.AssertNotError(t, ns.valid(nonces[17]), "Rejected a valid nonce") + test.AssertNotError(t, ns.valid(nonces[18]), "Rejected a valid nonce") + test.AssertNotError(t, ns.valid(nonces[19]), "Rejected a valid nonce") + test.AssertMetricWithLabelsEquals(t, ns.nonceEarliest, nil, 18) + test.AssertMetricWithLabelsEquals(t, ns.nonceLatest, nil, 20) } func BenchmarkNonces(b *testing.B) { @@ -100,12 +159,12 @@ func BenchmarkNonces(b *testing.B) { b.Fatal("creating nonce service", err) } - for i := 0; i < ns.maxUsed; i++ { - n, err := ns.Nonce() + for range ns.maxUsed { + n, err := ns.nonce() if err != nil { b.Fatal("noncing", err) } - if !ns.Valid(n) { + if ns.valid(n) != nil { b.Fatal("generated invalid nonce") } } @@ -113,11 +172,11 @@ func BenchmarkNonces(b *testing.B) { b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { - n, err := ns.Nonce() + n, err := ns.nonce() if err != nil { b.Fatal("noncing", err) } - if !ns.Valid(n) { + if ns.valid(n) != nil { b.Fatal("generated invalid nonce") } } @@ -125,88 +184,33 @@ func BenchmarkNonces(b *testing.B) { } func TestNoncePrefixing(t *testing.T) { - ns, err := NewNonceService(metrics.NoopRegisterer, 0, "zinc") + ns, err := NewNonceService(metrics.NoopRegisterer, 0, "aluminum") test.AssertNotError(t, err, "Could not create nonce service") - n, err := ns.Nonce() + n, err := ns.nonce() test.AssertNotError(t, err, "Could not create nonce") - test.Assert(t, ns.Valid(n), "Valid nonce rejected") + test.AssertNotError(t, ns.valid(n), "Valid nonce rejected") - n, err = ns.Nonce() + n, err = ns.nonce() test.AssertNotError(t, err, "Could not create nonce") n = n[1:] - test.Assert(t, !ns.Valid(n), "Valid nonce with incorrect prefix accepted") + test.AssertError(t, ns.valid(n), "Valid nonce with incorrect prefix accepted") - n, err = ns.Nonce() + n, err = ns.nonce() test.AssertNotError(t, err, "Could not create nonce") - test.Assert(t, !ns.Valid(n[6:]), "Valid nonce without prefix accepted") -} - -type malleableNonceClient struct { - redeem func(ctx context.Context, in *noncepb.NonceMessage, opts ...grpc.CallOption) (*noncepb.ValidMessage, error) -} - -func (mnc *malleableNonceClient) Redeem(ctx context.Context, in *noncepb.NonceMessage, opts ...grpc.CallOption) (*noncepb.ValidMessage, error) { - return mnc.redeem(ctx, in, opts...) -} - -func (mnc *malleableNonceClient) Nonce(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*noncepb.NonceMessage, error) { - return nil, errors.New("unimplemented") -} - -func TestRemoteRedeem(t *testing.T) { - valid, err := RemoteRedeem(context.Background(), nil, "q") - test.AssertNotError(t, err, "RemoteRedeem failed") - test.Assert(t, !valid, "RemoteRedeem accepted an invalid nonce") - valid, err = RemoteRedeem(context.Background(), nil, "") - test.AssertNotError(t, err, "RemoteRedeem failed") - test.Assert(t, !valid, "RemoteRedeem accepted an empty nonce") - - prefixMap := map[string]noncepb.NonceServiceClient{ - "abcd": &malleableNonceClient{ - redeem: func(ctx context.Context, in *noncepb.NonceMessage, opts ...grpc.CallOption) (*noncepb.ValidMessage, error) { - return nil, errors.New("wrong one!") - }, - }, - "wxyz": &malleableNonceClient{ - redeem: func(ctx context.Context, in *noncepb.NonceMessage, opts ...grpc.CallOption) (*noncepb.ValidMessage, error) { - return &noncepb.ValidMessage{Valid: false}, nil - }, - }, - } - // Attempt to redeem a nonce with a prefix not in the prefix map, expect return false, nil - valid, err = RemoteRedeem(context.Background(), prefixMap, "asddCQEC") - test.AssertNotError(t, err, "RemoteRedeem failed") - test.Assert(t, !valid, "RemoteRedeem accepted nonce not in prefix map") - - // Attempt to redeem a nonce with a prefix in the prefix map, remote returns error - // expect false, err - _, err = RemoteRedeem(context.Background(), prefixMap, "abcdbeef") - test.AssertError(t, err, "RemoteRedeem didn't return error when remote did") - - // Attempt to redeem a nonce with a prefix in the prefix map, remote returns valid - // expect true, nil - valid, err = RemoteRedeem(context.Background(), prefixMap, "wxyzdead") - test.AssertNotError(t, err, "RemoteRedeem failed") - test.Assert(t, !valid, "RemoteRedeem didn't honor remote result") - - // Attempt to redeem a nonce with a prefix in the prefix map, remote returns invalid - // expect false, nil - prefixMap["wxyz"] = &malleableNonceClient{ - redeem: func(ctx context.Context, in *noncepb.NonceMessage, opts ...grpc.CallOption) (*noncepb.ValidMessage, error) { - return &noncepb.ValidMessage{Valid: true}, nil - }, - } - valid, err = RemoteRedeem(context.Background(), prefixMap, "wxyzdead") - test.AssertNotError(t, err, "RemoteRedeem failed") - test.Assert(t, valid, "RemoteRedeem didn't honor remote result") + test.AssertError(t, ns.valid(n[6:]), "Valid nonce without prefix accepted") } func TestNoncePrefixValidation(t *testing.T) { - _, err := NewNonceService(metrics.NoopRegisterer, 0, "hey") + _, err := NewNonceService(metrics.NoopRegisterer, 0, "whatsup") test.AssertError(t, err, "NewNonceService didn't fail with short prefix") - _, err = NewNonceService(metrics.NoopRegisterer, 0, "hey!") + _, err = NewNonceService(metrics.NoopRegisterer, 0, "whatsup!") test.AssertError(t, err, "NewNonceService didn't fail with invalid base64") - _, err = NewNonceService(metrics.NoopRegisterer, 0, "heyy") + _, err = NewNonceService(metrics.NoopRegisterer, 0, "whatsupp") test.AssertNotError(t, err, "NewNonceService failed with valid nonce prefix") } + +func TestDerivePrefix(t *testing.T) { + prefix := DerivePrefix("192.168.1.1:8080", []byte("3b8c758dd85e113ea340ce0b3a99f389d40a308548af94d1730a7692c1874f1f")) + test.AssertEquals(t, prefix, "P9qQaK4o") +} diff --git a/nonce/proto/nonce.pb.go b/nonce/proto/nonce.pb.go index 73ca67cb3e2..3ae86bd12f1 100644 --- a/nonce/proto/nonce.pb.go +++ b/nonce/proto/nonce.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.6 +// protoc-gen-go v1.36.5 +// protoc v3.20.1 // source: nonce.proto package proto @@ -12,6 +12,7 @@ import ( emptypb "google.golang.org/protobuf/types/known/emptypb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -22,20 +23,17 @@ const ( ) type NonceMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Nonce string `protobuf:"bytes,1,opt,name=nonce,proto3" json:"nonce,omitempty"` unknownFields protoimpl.UnknownFields - - Nonce string `protobuf:"bytes,1,opt,name=nonce,proto3" json:"nonce,omitempty"` + sizeCache protoimpl.SizeCache } func (x *NonceMessage) Reset() { *x = NonceMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_nonce_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_nonce_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NonceMessage) String() string { @@ -46,7 +44,7 @@ func (*NonceMessage) ProtoMessage() {} func (x *NonceMessage) ProtoReflect() protoreflect.Message { mi := &file_nonce_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -69,20 +67,17 @@ func (x *NonceMessage) GetNonce() string { } type ValidMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` unknownFields protoimpl.UnknownFields - - Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ValidMessage) Reset() { *x = ValidMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_nonce_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_nonce_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ValidMessage) String() string { @@ -93,7 +88,7 @@ func (*ValidMessage) ProtoMessage() {} func (x *ValidMessage) ProtoReflect() protoreflect.Message { mi := &file_nonce_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -117,7 +112,7 @@ func (x *ValidMessage) GetValid() bool { var File_nonce_proto protoreflect.FileDescriptor -var file_nonce_proto_rawDesc = []byte{ +var file_nonce_proto_rawDesc = string([]byte{ 0x0a, 0x0b, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, @@ -138,22 +133,22 @@ var file_nonce_proto_rawDesc = []byte{ 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_nonce_proto_rawDescOnce sync.Once - file_nonce_proto_rawDescData = file_nonce_proto_rawDesc + file_nonce_proto_rawDescData []byte ) func file_nonce_proto_rawDescGZIP() []byte { file_nonce_proto_rawDescOnce.Do(func() { - file_nonce_proto_rawDescData = protoimpl.X.CompressGZIP(file_nonce_proto_rawDescData) + file_nonce_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_nonce_proto_rawDesc), len(file_nonce_proto_rawDesc))) }) return file_nonce_proto_rawDescData } var file_nonce_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_nonce_proto_goTypes = []interface{}{ +var file_nonce_proto_goTypes = []any{ (*NonceMessage)(nil), // 0: nonce.NonceMessage (*ValidMessage)(nil), // 1: nonce.ValidMessage (*emptypb.Empty)(nil), // 2: google.protobuf.Empty @@ -175,37 +170,11 @@ func file_nonce_proto_init() { if File_nonce_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_nonce_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NonceMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_nonce_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_nonce_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_nonce_proto_rawDesc), len(file_nonce_proto_rawDesc)), NumEnums: 0, NumMessages: 2, NumExtensions: 0, @@ -216,7 +185,6 @@ func file_nonce_proto_init() { MessageInfos: file_nonce_proto_msgTypes, }.Build() File_nonce_proto = out.File - file_nonce_proto_rawDesc = nil file_nonce_proto_goTypes = nil file_nonce_proto_depIdxs = nil } diff --git a/nonce/proto/nonce_grpc.pb.go b/nonce/proto/nonce_grpc.pb.go index 6e8e182c884..d0525e8795e 100644 --- a/nonce/proto/nonce_grpc.pb.go +++ b/nonce/proto/nonce_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.20.1 +// source: nonce.proto package proto @@ -12,8 +16,13 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + NonceService_Nonce_FullMethodName = "/nonce.NonceService/Nonce" + NonceService_Redeem_FullMethodName = "/nonce.NonceService/Redeem" +) // NonceServiceClient is the client API for NonceService service. // @@ -32,8 +41,9 @@ func NewNonceServiceClient(cc grpc.ClientConnInterface) NonceServiceClient { } func (c *nonceServiceClient) Nonce(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*NonceMessage, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(NonceMessage) - err := c.cc.Invoke(ctx, "/nonce.NonceService/Nonce", in, out, opts...) + err := c.cc.Invoke(ctx, NonceService_Nonce_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -41,8 +51,9 @@ func (c *nonceServiceClient) Nonce(ctx context.Context, in *emptypb.Empty, opts } func (c *nonceServiceClient) Redeem(ctx context.Context, in *NonceMessage, opts ...grpc.CallOption) (*ValidMessage, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ValidMessage) - err := c.cc.Invoke(ctx, "/nonce.NonceService/Redeem", in, out, opts...) + err := c.cc.Invoke(ctx, NonceService_Redeem_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -51,16 +62,19 @@ func (c *nonceServiceClient) Redeem(ctx context.Context, in *NonceMessage, opts // NonceServiceServer is the server API for NonceService service. // All implementations must embed UnimplementedNonceServiceServer -// for forward compatibility +// for forward compatibility. type NonceServiceServer interface { Nonce(context.Context, *emptypb.Empty) (*NonceMessage, error) Redeem(context.Context, *NonceMessage) (*ValidMessage, error) mustEmbedUnimplementedNonceServiceServer() } -// UnimplementedNonceServiceServer must be embedded to have forward compatible implementations. -type UnimplementedNonceServiceServer struct { -} +// UnimplementedNonceServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedNonceServiceServer struct{} func (UnimplementedNonceServiceServer) Nonce(context.Context, *emptypb.Empty) (*NonceMessage, error) { return nil, status.Errorf(codes.Unimplemented, "method Nonce not implemented") @@ -69,6 +83,7 @@ func (UnimplementedNonceServiceServer) Redeem(context.Context, *NonceMessage) (* return nil, status.Errorf(codes.Unimplemented, "method Redeem not implemented") } func (UnimplementedNonceServiceServer) mustEmbedUnimplementedNonceServiceServer() {} +func (UnimplementedNonceServiceServer) testEmbeddedByValue() {} // UnsafeNonceServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to NonceServiceServer will @@ -78,6 +93,13 @@ type UnsafeNonceServiceServer interface { } func RegisterNonceServiceServer(s grpc.ServiceRegistrar, srv NonceServiceServer) { + // If the following call pancis, it indicates UnimplementedNonceServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&NonceService_ServiceDesc, srv) } @@ -91,7 +113,7 @@ func _NonceService_Nonce_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/nonce.NonceService/Nonce", + FullMethod: NonceService_Nonce_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(NonceServiceServer).Nonce(ctx, req.(*emptypb.Empty)) @@ -109,7 +131,7 @@ func _NonceService_Redeem_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/nonce.NonceService/Redeem", + FullMethod: NonceService_Redeem_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(NonceServiceServer).Redeem(ctx, req.(*NonceMessage)) diff --git a/observer/mon_conf.go b/observer/mon_conf.go index f04535929db..44ecb1a5719 100644 --- a/observer/mon_conf.go +++ b/observer/mon_conf.go @@ -2,19 +2,20 @@ package observer import ( "errors" - "strings" "time" - "github.com/letsencrypt/boulder/cmd" + "github.com/prometheus/client_golang/prometheus" + "gopkg.in/yaml.v3" + + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/observer/probers" - "gopkg.in/yaml.v2" ) // MonConf is exported to receive YAML configuration in `ObsConf`. type MonConf struct { - Period cmd.ConfigDuration `yaml:"period"` - Kind string `yaml:"kind"` - Settings probers.Settings `yaml:"settings"` + Period config.Duration `yaml:"period"` + Kind string `yaml:"kind" validate:"required,oneof=DNS HTTP CRL TLS TCP"` + Settings probers.Settings `yaml:"settings" validate:"min=1,dive"` } // validatePeriod ensures the received `Period` field is at least 1µs. @@ -30,8 +31,7 @@ func (c *MonConf) validatePeriod() error { // `UnmarshalSettings` method of the `Configurer` type specified by the // `Kind` field. func (c MonConf) unmarshalConfigurer() (probers.Configurer, error) { - kind := strings.Trim(strings.ToLower(c.Kind), " ") - configurer, err := probers.GetConfigurer(kind) + configurer, err := probers.GetConfigurer(c.Kind) if err != nil { return nil, err } @@ -46,7 +46,7 @@ func (c MonConf) unmarshalConfigurer() (probers.Configurer, error) { // makeMonitor constructs a `monitor` object from the contents of the // bound `MonConf`. If the `MonConf` cannot be validated, an error // appropriate for end-user consumption is returned instead. -func (c MonConf) makeMonitor() (*monitor, error) { +func (c MonConf) makeMonitor(collectors map[string]prometheus.Collector) (*monitor, error) { err := c.validatePeriod() if err != nil { return nil, err @@ -55,7 +55,7 @@ func (c MonConf) makeMonitor() (*monitor, error) { if err != nil { return nil, err } - prober, err := probeConf.MakeProber() + prober, err := probeConf.MakeProber(collectors) if err != nil { return nil, err } diff --git a/observer/mon_conf_test.go b/observer/mon_conf_test.go index 7bcd1be217c..24c5b711065 100644 --- a/observer/mon_conf_test.go +++ b/observer/mon_conf_test.go @@ -4,22 +4,22 @@ import ( "testing" "time" - "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/test" ) func TestMonConf_validatePeriod(t *testing.T) { type fields struct { - Period cmd.ConfigDuration + Period config.Duration } tests := []struct { name string fields fields wantErr bool }{ - {"valid", fields{cmd.ConfigDuration{Duration: 1 * time.Microsecond}}, false}, - {"1 nanosecond", fields{cmd.ConfigDuration{Duration: 1 * time.Nanosecond}}, true}, - {"none supplied", fields{cmd.ConfigDuration{}}, true}, + {"valid", fields{config.Duration{Duration: 1 * time.Microsecond}}, false}, + {"1 nanosecond", fields{config.Duration{Duration: 1 * time.Nanosecond}}, true}, + {"none supplied", fields{config.Duration{}}, true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/observer/monitor.go b/observer/monitor.go index 37c2bc97752..c3073a86034 100644 --- a/observer/monitor.go +++ b/observer/monitor.go @@ -18,8 +18,8 @@ type monitor struct { func (m monitor) start(logger blog.Logger) { ticker := time.NewTicker(m.period) timeout := m.period / 2 - go func() { - for range ticker.C { + for { + go func() { // Attempt to probe the configured target. success, dur := m.prober.Probe(timeout) @@ -32,6 +32,7 @@ func (m monitor) start(logger blog.Logger) { logger.Infof( "kind=[%s] success=[%v] duration=[%f] name=[%s]", m.prober.Kind(), success, dur.Seconds(), m.prober.Name()) - } - }() + }() + <-ticker.C + } } diff --git a/observer/obs_conf.go b/observer/obs_conf.go index b1c423d4871..f7d66a1b51b 100644 --- a/observer/obs_conf.go +++ b/observer/obs_conf.go @@ -3,11 +3,12 @@ package observer import ( "errors" "fmt" - "net" "strconv" - "github.com/letsencrypt/boulder/cmd" "github.com/prometheus/client_golang/prometheus" + + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/observer/probers" ) var ( @@ -23,59 +24,59 @@ var ( // ObsConf is exported to receive YAML configuration. type ObsConf struct { - DebugAddr string `yaml:"debugaddr"` - Buckets []float64 `yaml:"buckets"` - Syslog cmd.SyslogConfig `yaml:"syslog"` - MonConfs []*MonConf `yaml:"monitors"` -} - -// validateSyslog ensures the the `Syslog` field received by `ObsConf` -// contains valid log levels. -func (c *ObsConf) validateSyslog() error { - syslog, stdout := c.Syslog.SyslogLevel, c.Syslog.StdoutLevel - if stdout < 0 || stdout > 7 || syslog < 0 || syslog > 7 { - return fmt.Errorf( - "invalid 'syslog', '%+v', valid log levels are 0-7", c.Syslog) - } - return nil -} - -// validateDebugAddr ensures the `debugAddr` received by `ObsConf` is -// properly formatted and a valid port. -func (c *ObsConf) validateDebugAddr() error { - _, p, err := net.SplitHostPort(c.DebugAddr) - if err != nil { - return fmt.Errorf( - "invalid 'debugaddr', %q, not expected format", c.DebugAddr) - } - port, _ := strconv.Atoi(p) - if port <= 0 || port > 65535 { - return fmt.Errorf( - "invalid 'debugaddr','%d' is not a valid port", port) - } - return nil + DebugAddr string `yaml:"debugaddr" validate:"omitempty,hostname_port"` + Buckets []float64 `yaml:"buckets" validate:"min=1,dive"` + Syslog cmd.SyslogConfig `yaml:"syslog"` + OpenTelemetry cmd.OpenTelemetryConfig + MonConfs []*MonConf `yaml:"monitors" validate:"min=1,dive"` } -func (c *ObsConf) makeMonitors() ([]*monitor, []error, error) { +func (c *ObsConf) makeMonitors(metrics prometheus.Registerer) ([]*monitor, []error, error) { var errs []error var monitors []*monitor + proberSpecificMetrics := make(map[string]map[string]prometheus.Collector) for e, m := range c.MonConfs { entry := strconv.Itoa(e + 1) - monitor, err := m.makeMonitor() + proberConf, err := probers.GetConfigurer(m.Kind) + if err != nil { + // append error to errs + errs = append(errs, fmt.Errorf("'monitors' entry #%s couldn't be validated: %w", entry, err)) + // increment metrics + countMonitors.WithLabelValues(m.Kind, "false").Inc() + // bail out before constructing the monitor. with no configurer, it will fail + continue + } + kind := proberConf.Kind() + + // set up custom metrics internal to each prober kind + _, exist := proberSpecificMetrics[kind] + if !exist { + // we haven't seen this prober kind before, so we need to request + // any custom metrics it may have and register them with the + // prometheus registry + proberSpecificMetrics[kind] = make(map[string]prometheus.Collector) + for name, collector := range proberConf.Instrument() { + // register the collector with the prometheus registry + metrics.MustRegister(collector) + // store the registered collector so we can pass it to every + // monitor that will construct this kind of prober + proberSpecificMetrics[kind][name] = collector + } + } + + monitor, err := m.makeMonitor(proberSpecificMetrics[kind]) if err != nil { // append validation error to errs - errs = append( - errs, fmt.Errorf( - "'monitors' entry #%s couldn't be validated: %v", entry, err)) + errs = append(errs, fmt.Errorf("'monitors' entry #%s couldn't be validated: %w", entry, err)) // increment metrics - countMonitors.WithLabelValues(m.Kind, "false").Inc() + countMonitors.WithLabelValues(kind, "false").Inc() } else { // append monitor to monitors monitors = append(monitors, monitor) // increment metrics - countMonitors.WithLabelValues(m.Kind, "true").Inc() + countMonitors.WithLabelValues(kind, "true").Inc() } } if len(c.MonConfs) == len(errs) { @@ -88,26 +89,8 @@ func (c *ObsConf) makeMonitors() ([]*monitor, []error, error) { // bound `ObsConf`. If the `ObsConf` cannot be validated, an error // appropriate for end-user consumption is returned instead. func (c *ObsConf) MakeObserver() (*Observer, error) { - err := c.validateSyslog() - if err != nil { - return nil, err - } - - err = c.validateDebugAddr() - if err != nil { - return nil, err - } - - if len(c.MonConfs) == 0 { - return nil, errors.New("no monitors provided") - } - - if len(c.Buckets) == 0 { - return nil, errors.New("no histogram buckets provided") - } - // Start monitoring and logging. - metrics, logger := cmd.StatsAndLogging(c.Syslog, c.DebugAddr) + metrics, logger, shutdown := cmd.StatsAndLogging(c.Syslog, c.OpenTelemetry, c.DebugAddr) histObservations = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "obs_observations", @@ -116,12 +99,12 @@ func (c *ObsConf) MakeObserver() (*Observer, error) { }, []string{"name", "kind", "success"}) metrics.MustRegister(countMonitors) metrics.MustRegister(histObservations) - defer logger.AuditPanic() - logger.Info(cmd.VersionString()) + defer cmd.AuditPanic() + cmd.LogStartup(logger) logger.Infof("Initializing boulder-observer daemon") logger.Debugf("Using config: %+v", c) - monitors, errs, err := c.makeMonitors() + monitors, errs, err := c.makeMonitors(metrics) if len(errs) != 0 { logger.Errf("%d of %d monitors failed validation", len(errs), len(c.MonConfs)) for _, err := range errs { @@ -133,5 +116,5 @@ func (c *ObsConf) MakeObserver() (*Observer, error) { if err != nil { return nil, err } - return &Observer{logger, monitors}, nil + return &Observer{logger, monitors, shutdown}, nil } diff --git a/observer/obs_conf_test.go b/observer/obs_conf_test.go index 2a41a25b441..dc2269e2e57 100644 --- a/observer/obs_conf_test.go +++ b/observer/obs_conf_test.go @@ -6,21 +6,22 @@ import ( "time" "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/metrics" "github.com/letsencrypt/boulder/observer/probers" _ "github.com/letsencrypt/boulder/observer/probers/mock" - "github.com/letsencrypt/boulder/test" ) const ( debugAddr = ":8040" errDBZMsg = "over 9000" - mockConf = "MockConf" + mockConf = "Mock" ) func TestObsConf_makeMonitors(t *testing.T) { var errDBZ = errors.New(errDBZMsg) var cfgSyslog = cmd.SyslogConfig{StdoutLevel: 6, SyslogLevel: 6} - var cfgDur = cmd.ConfigDuration{Duration: time.Second * 5} + var cfgDur = config.Duration{Duration: time.Second * 5} var cfgBuckets = []float64{.001} var validMonConf = &MonConf{ cfgDur, mockConf, probers.Settings{"valid": true, "pname": "foo", "pkind": "bar"}} @@ -58,7 +59,7 @@ func TestObsConf_makeMonitors(t *testing.T) { DebugAddr: tt.fields.DebugAddr, MonConfs: tt.fields.MonConfs, } - _, errs, err := c.makeMonitors() + _, errs, err := c.makeMonitors(metrics.NoopRegisterer) if len(errs) != len(tt.errs) { t.Errorf("ObsConf.validateMonConfs() errs = %d, want %d", len(errs), len(tt.errs)) t.Logf("%v", errs) @@ -69,72 +70,3 @@ func TestObsConf_makeMonitors(t *testing.T) { }) } } - -func TestObsConf_ValidateDebugAddr(t *testing.T) { - type fields struct { - DebugAddr string - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - // valid - {"max len and range", fields{":65535"}, false}, - {"min len and range", fields{":1"}, false}, - {"2 digits", fields{":80"}, false}, - // invalid - {"out of range high", fields{":65536"}, true}, - {"out of range low", fields{":0"}, true}, - {"not even a port", fields{":foo"}, true}, - {"missing :", fields{"foo"}, true}, - {"missing port", fields{"foo:"}, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &ObsConf{ - DebugAddr: tt.fields.DebugAddr, - } - err := c.validateDebugAddr() - if tt.wantErr { - test.AssertError(t, err, "ObsConf.ValidateDebugAddr() should have errored") - } else { - test.AssertNotError(t, err, "ObsConf.ValidateDebugAddr() shouldn't have errored") - } - }) - } -} - -func TestObsConf_validateSyslog(t *testing.T) { - type fields struct { - Syslog cmd.SyslogConfig - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - // valid - {"valid", fields{cmd.SyslogConfig{StdoutLevel: 6, SyslogLevel: 6}}, false}, - // invalid - {"both too high", fields{cmd.SyslogConfig{StdoutLevel: 9, SyslogLevel: 9}}, true}, - {"stdout too high", fields{cmd.SyslogConfig{StdoutLevel: 9, SyslogLevel: 6}}, true}, - {"syslog too high", fields{cmd.SyslogConfig{StdoutLevel: 6, SyslogLevel: 9}}, true}, - {"both too low", fields{cmd.SyslogConfig{StdoutLevel: -1, SyslogLevel: -1}}, true}, - {"stdout too low", fields{cmd.SyslogConfig{StdoutLevel: -1, SyslogLevel: 6}}, true}, - {"syslog too low", fields{cmd.SyslogConfig{StdoutLevel: 6, SyslogLevel: -1}}, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &ObsConf{ - Syslog: tt.fields.Syslog, - } - err := c.validateSyslog() - if tt.wantErr { - test.AssertError(t, err, "ObsConf.validateSyslog() should have errored") - } else { - test.AssertNotError(t, err, "ObsConf.validateSyslog() shouldn't have errored") - } - }) - } -} diff --git a/observer/obsdialer/obsdialer.go b/observer/obsdialer/obsdialer.go new file mode 100644 index 00000000000..222f44308a0 --- /dev/null +++ b/observer/obsdialer/obsdialer.go @@ -0,0 +1,10 @@ +// package obsdialer contains a custom dialer for use in observers. +package obsdialer + +import "net" + +// Dialer is a custom dialer for use in observers. It disables IPv6-to-IPv4 +// fallback so we don't mask failures of IPv6 connectivity. +var Dialer = net.Dialer{ + FallbackDelay: -1, // Disable IPv6-to-IPv4 fallback +} diff --git a/observer/observer.go b/observer/observer.go index 6ef7075a1f4..d42b28d07ee 100644 --- a/observer/observer.go +++ b/observer/observer.go @@ -1,21 +1,30 @@ package observer import ( + "context" + + "github.com/letsencrypt/boulder/cmd" blog "github.com/letsencrypt/boulder/log" + _ "github.com/letsencrypt/boulder/observer/probers/crl" _ "github.com/letsencrypt/boulder/observer/probers/dns" _ "github.com/letsencrypt/boulder/observer/probers/http" + _ "github.com/letsencrypt/boulder/observer/probers/tcp" + _ "github.com/letsencrypt/boulder/observer/probers/tls" ) // Observer is the steward of goroutines started for each `monitor`. type Observer struct { logger blog.Logger monitors []*monitor + shutdown func(ctx context.Context) } -// Start spins off a goroutine for each monitor and then runs forever. +// Start spins off a goroutine for each monitor, and waits for a signal to exit func (o Observer) Start() { for _, mon := range o.monitors { go mon.start(o.logger) } - select {} + + defer o.shutdown(context.Background()) + cmd.WaitForSignal() } diff --git a/observer/probers/crl/crl.go b/observer/probers/crl/crl.go new file mode 100644 index 00000000000..2f3c2de1056 --- /dev/null +++ b/observer/probers/crl/crl.go @@ -0,0 +1,73 @@ +package probers + +import ( + "crypto/x509" + "io" + "net/http" + "slices" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/letsencrypt/boulder/crl/idp" +) + +// CRLProbe is the exported 'Prober' object for monitors configured to +// monitor CRL availability & characteristics. +type CRLProbe struct { + url string + partitioned bool + cNextUpdate *prometheus.GaugeVec + cThisUpdate *prometheus.GaugeVec + cCertCount *prometheus.GaugeVec +} + +// Name returns a string that uniquely identifies the monitor. +func (p CRLProbe) Name() string { + return p.url +} + +// Kind returns a name that uniquely identifies the `Kind` of `Prober`. +func (p CRLProbe) Kind() string { + return "CRL" +} + +// Probe requests the configured CRL and publishes metrics about it if found. +func (p CRLProbe) Probe(timeout time.Duration) (bool, time.Duration) { + start := time.Now() + resp, err := http.Get(p.url) + if err != nil { + return false, time.Since(start) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return false, time.Since(start) + } + dur := time.Since(start) + + crl, err := x509.ParseRevocationList(body) + if err != nil { + return false, dur + } + + // Partitioned CRLs MUST contain an issuingDistributionPoint extension, which + // MUST contain the URL from which they were fetched, to prevent substitution + // attacks. + if p.partitioned { + idps, err := idp.GetIDPURIs(crl.Extensions) + if err != nil { + return false, dur + } + if !slices.Contains(idps, p.url) { + return false, dur + } + } + + // Report metrics for this CRL + p.cThisUpdate.WithLabelValues(p.url).Set(float64(crl.ThisUpdate.Unix())) + p.cNextUpdate.WithLabelValues(p.url).Set(float64(crl.NextUpdate.Unix())) + p.cCertCount.WithLabelValues(p.url).Set(float64(len(crl.RevokedCertificateEntries))) + + return true, dur +} diff --git a/observer/probers/crl/crl_conf.go b/observer/probers/crl/crl_conf.go new file mode 100644 index 00000000000..b414d3072da --- /dev/null +++ b/observer/probers/crl/crl_conf.go @@ -0,0 +1,129 @@ +package probers + +import ( + "fmt" + "net/url" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/letsencrypt/boulder/observer/probers" + "github.com/letsencrypt/boulder/strictyaml" +) + +const ( + nextUpdateName = "obs_crl_next_update" + thisUpdateName = "obs_crl_this_update" + certCountName = "obs_crl_revoked_cert_count" +) + +// CRLConf is exported to receive YAML configuration +type CRLConf struct { + URL string `yaml:"url"` + Partitioned bool `yaml:"partitioned"` +} + +// Kind returns a name that uniquely identifies the `Kind` of `Configurer`. +func (c CRLConf) Kind() string { + return "CRL" +} + +// UnmarshalSettings constructs a CRLConf object from YAML as bytes. +func (c CRLConf) UnmarshalSettings(settings []byte) (probers.Configurer, error) { + var conf CRLConf + err := strictyaml.Unmarshal(settings, &conf) + + if err != nil { + return nil, err + } + return conf, nil +} + +func (c CRLConf) validateURL() error { + url, err := url.Parse(c.URL) + if err != nil { + return fmt.Errorf( + "invalid 'url', got: %q, expected a valid url", c.URL) + } + if url.Scheme == "" { + return fmt.Errorf( + "invalid 'url', got: %q, missing scheme", c.URL) + } + return nil +} + +// MakeProber constructs a `CRLProbe` object from the contents of the +// bound `CRLConf` object. If the `CRLConf` cannot be validated, an +// error appropriate for end-user consumption is returned instead. +func (c CRLConf) MakeProber(collectors map[string]prometheus.Collector) (probers.Prober, error) { // validate `url` err := c.validateURL() + // validate `url` + err := c.validateURL() + if err != nil { + return nil, err + } + + // validate the prometheus collectors that were passed in + coll, ok := collectors[nextUpdateName] + if !ok { + return nil, fmt.Errorf("crl prober did not receive collector %q", nextUpdateName) + } + nextUpdateColl, ok := coll.(*prometheus.GaugeVec) + if !ok { + return nil, fmt.Errorf("crl prober received collector %q of wrong type, got: %T, expected *prometheus.GaugeVec", nextUpdateName, coll) + } + + coll, ok = collectors[thisUpdateName] + if !ok { + return nil, fmt.Errorf("crl prober did not receive collector %q", thisUpdateName) + } + thisUpdateColl, ok := coll.(*prometheus.GaugeVec) + if !ok { + return nil, fmt.Errorf("crl prober received collector %q of wrong type, got: %T, expected *prometheus.GaugeVec", thisUpdateName, coll) + } + + coll, ok = collectors[certCountName] + if !ok { + return nil, fmt.Errorf("crl prober did not receive collector %q", certCountName) + } + certCountColl, ok := coll.(*prometheus.GaugeVec) + if !ok { + return nil, fmt.Errorf("crl prober received collector %q of wrong type, got: %T, expected *prometheus.GaugeVec", certCountName, coll) + } + + return CRLProbe{c.URL, c.Partitioned, nextUpdateColl, thisUpdateColl, certCountColl}, nil +} + +// Instrument constructs any `prometheus.Collector` objects the `CRLProbe` will +// need to report its own metrics. A map is returned containing the constructed +// objects, indexed by the name of the prometheus metric. If no objects were +// constructed, nil is returned. +func (c CRLConf) Instrument() map[string]prometheus.Collector { + nextUpdate := prometheus.Collector(prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: nextUpdateName, + Help: "CRL nextUpdate Unix timestamp in seconds", + }, []string{"url"}, + )) + thisUpdate := prometheus.Collector(prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: thisUpdateName, + Help: "CRL thisUpdate Unix timestamp in seconds", + }, []string{"url"}, + )) + certCount := prometheus.Collector(prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: certCountName, + Help: "number of certificates revoked in CRL", + }, []string{"url"}, + )) + return map[string]prometheus.Collector{ + nextUpdateName: nextUpdate, + thisUpdateName: thisUpdate, + certCountName: certCount, + } +} + +// init is called at runtime and registers `CRLConf`, a `Prober` +// `Configurer` type, as "CRL". +func init() { + probers.Register(CRLConf{}) +} diff --git a/observer/probers/crl/crl_conf_test.go b/observer/probers/crl/crl_conf_test.go new file mode 100644 index 00000000000..2c998fb3daf --- /dev/null +++ b/observer/probers/crl/crl_conf_test.go @@ -0,0 +1,99 @@ +package probers + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "gopkg.in/yaml.v3" + + "github.com/letsencrypt/boulder/observer/probers" + "github.com/letsencrypt/boulder/test" +) + +func TestCRLConf_MakeProber(t *testing.T) { + conf := CRLConf{} + colls := conf.Instrument() + badColl := prometheus.Collector(prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "obs_crl_foo", + Help: "Hmmm, this shouldn't be here...", + }, + []string{}, + )) + type fields struct { + URL string + } + tests := []struct { + name string + fields fields + colls map[string]prometheus.Collector + wantErr bool + }{ + // valid + {"valid fqdn", fields{"http://example.com"}, colls, false}, + {"valid fqdn with path", fields{"http://example.com/foo/bar"}, colls, false}, + {"valid hostname", fields{"http://example"}, colls, false}, + // invalid + {"bad fqdn", fields{":::::"}, colls, true}, + {"missing scheme", fields{"example.com"}, colls, true}, + { + "unexpected collector", + fields{"http://example.com"}, + map[string]prometheus.Collector{"obs_crl_foo": badColl}, + true, + }, + { + "missing collectors", + fields{"http://example.com"}, + map[string]prometheus.Collector{}, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := CRLConf{ + URL: tt.fields.URL, + } + p, err := c.MakeProber(tt.colls) + if tt.wantErr { + test.AssertError(t, err, "CRLConf.MakeProber()") + } else { + test.AssertNotError(t, err, "CRLConf.MakeProber()") + + test.AssertNotNil(t, p, "CRLConf.MakeProber(): nil prober") + prober := p.(CRLProbe) + test.AssertNotNil(t, prober.cThisUpdate, "CRLConf.MakeProber(): nil cThisUpdate") + test.AssertNotNil(t, prober.cNextUpdate, "CRLConf.MakeProber(): nil cNextUpdate") + test.AssertNotNil(t, prober.cCertCount, "CRLConf.MakeProber(): nil cCertCount") + } + }) + } +} + +func TestCRLConf_UnmarshalSettings(t *testing.T) { + tests := []struct { + name string + fields probers.Settings + want probers.Configurer + wantErr bool + }{ + {"valid", probers.Settings{"url": "google.com"}, CRLConf{"google.com", false}, false}, + {"valid with partitioned", probers.Settings{"url": "google.com", "partitioned": true}, CRLConf{"google.com", true}, false}, + {"invalid (map)", probers.Settings{"url": make(map[string]any)}, nil, true}, + {"invalid (list)", probers.Settings{"url": make([]string, 0)}, nil, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + settingsBytes, _ := yaml.Marshal(tt.fields) + t.Log(string(settingsBytes)) + c := CRLConf{} + got, err := c.UnmarshalSettings(settingsBytes) + if tt.wantErr { + test.AssertError(t, err, "CRLConf.UnmarshalSettings()") + } else { + test.AssertNotError(t, err, "CRLConf.UnmarshalSettings()") + } + test.AssertDeepEquals(t, got, tt.want) + }) + } +} diff --git a/observer/probers/dns/dns_conf.go b/observer/probers/dns/dns_conf.go index 76a7d543a5d..836780ee8b5 100644 --- a/observer/probers/dns/dns_conf.go +++ b/observer/probers/dns/dns_conf.go @@ -3,12 +3,16 @@ package probers import ( "fmt" "net" + "net/netip" + "slices" "strconv" "strings" - "github.com/letsencrypt/boulder/observer/probers" "github.com/miekg/dns" - "gopkg.in/yaml.v2" + "github.com/prometheus/client_golang/prometheus" + + "github.com/letsencrypt/boulder/observer/probers" + "github.com/letsencrypt/boulder/strictyaml" ) var ( @@ -24,10 +28,15 @@ type DNSConf struct { QType string `yaml:"query_type"` } +// Kind returns a name that uniquely identifies the `Kind` of `Configurer`. +func (c DNSConf) Kind() string { + return "DNS" +} + // UnmarshalSettings constructs a DNSConf object from YAML as bytes. func (c DNSConf) UnmarshalSettings(settings []byte) (probers.Configurer, error) { var conf DNSConf - err := yaml.Unmarshal(settings, &conf) + err := strictyaml.Unmarshal(settings, &conf) if err != nil { return nil, err } @@ -52,13 +61,12 @@ func (c DNSConf) validateServer() error { return fmt.Errorf( "invalid `server`, %q, port number must be one in [1-65535]", c.Server) } - // Ensure `server` is a valid FQDN or IPv4 / IPv6 address. - IPv6 := net.ParseIP(host).To16() - IPv4 := net.ParseIP(host).To4() + // Ensure `server` is a valid FQDN or IP address. + _, err = netip.ParseAddr(host) FQDN := dns.IsFqdn(dns.Fqdn(host)) - if IPv6 == nil && IPv4 == nil && !FQDN { + if err != nil && !FQDN { return fmt.Errorf( - "invalid `server`, %q, is not an FQDN or IPv4 / IPv6 address", c.Server) + "invalid `server`, %q, is not an FQDN or IP address", c.Server) } return nil } @@ -66,10 +74,8 @@ func (c DNSConf) validateServer() error { func (c DNSConf) validateProto() error { validProtos := []string{"udp", "tcp"} proto := strings.Trim(strings.ToLower(c.Proto), " ") - for _, i := range validProtos { - if proto == i { - return nil - } + if slices.Contains(validProtos, proto) { + return nil } return fmt.Errorf( "invalid `protocol`, got: %q, expected one in: %s", c.Proto, validProtos) @@ -92,7 +98,7 @@ func (c DNSConf) validateQType() error { // MakeProber constructs a `DNSProbe` object from the contents of the // bound `DNSConf` object. If the `DNSConf` cannot be validated, an // error appropriate for end-user consumption is returned instead. -func (c DNSConf) MakeProber() (probers.Prober, error) { +func (c DNSConf) MakeProber(_ map[string]prometheus.Collector) (probers.Prober, error) { // validate `query_name` if !dns.IsFqdn(dns.Fqdn(c.QName)) { return nil, fmt.Errorf( @@ -126,8 +132,13 @@ func (c DNSConf) MakeProber() (probers.Prober, error) { }, nil } +// Instrument is a no-op to implement the `Configurer` interface. +func (c DNSConf) Instrument() map[string]prometheus.Collector { + return nil +} + // init is called at runtime and registers `DNSConf`, a `Prober` // `Configurer` type, as "DNS". func init() { - probers.Register("DNS", DNSConf{}) + probers.Register(DNSConf{}) } diff --git a/observer/probers/dns/dns_conf_test.go b/observer/probers/dns/dns_conf_test.go index f3e7bf63dda..906a81746a6 100644 --- a/observer/probers/dns/dns_conf_test.go +++ b/observer/probers/dns/dns_conf_test.go @@ -6,7 +6,7 @@ import ( "github.com/letsencrypt/boulder/observer/probers" "github.com/letsencrypt/boulder/test" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) func TestDNSConf_validateServer(t *testing.T) { @@ -157,7 +157,7 @@ func TestDNSConf_MakeProber(t *testing.T) { QName: tt.fields.QName, QType: tt.fields.QType, } - _, err := c.MakeProber() + _, err := c.MakeProber(nil) if tt.wantErr { test.AssertError(t, err, "DNSConf.MakeProber() should have errored") } else { @@ -169,11 +169,11 @@ func TestDNSConf_MakeProber(t *testing.T) { func TestDNSConf_UnmarshalSettings(t *testing.T) { type fields struct { - protocol interface{} - server interface{} - recurse interface{} - query_name interface{} - query_type interface{} + protocol any + server any + recurse any + query_name any + query_type any } tests := []struct { name string diff --git a/observer/probers/http/http.go b/observer/probers/http/http.go index 3b187a35a10..1a6e93bcc09 100644 --- a/observer/probers/http/http.go +++ b/observer/probers/http/http.go @@ -1,9 +1,14 @@ package probers import ( + "context" + "crypto/tls" "fmt" "net/http" + "slices" "time" + + "github.com/letsencrypt/boulder/observer/obsdialer" ) // HTTPProbe is the exported 'Prober' object for monitors configured to @@ -12,11 +17,17 @@ type HTTPProbe struct { url string rcodes []int useragent string + insecure bool } // Name returns a string that uniquely identifies the monitor. + func (p HTTPProbe) Name() string { - return fmt.Sprintf("%s-%d-%s", p.url, p.rcodes, p.useragent) + insecure := "" + if p.insecure { + insecure = "-insecure" + } + return fmt.Sprintf("%s-%d-%s%s", p.url, p.rcodes, p.useragent, insecure) } // Kind returns a name that uniquely identifies the `Kind` of `Prober`. @@ -27,18 +38,19 @@ func (p HTTPProbe) Kind() string { // isExpected ensures that the received HTTP response code matches one // that's expected. func (p HTTPProbe) isExpected(received int) bool { - for _, c := range p.rcodes { - if received == c { - return true - } - } - return false + return slices.Contains(p.rcodes, received) } // Probe performs the configured HTTP request. func (p HTTPProbe) Probe(timeout time.Duration) (bool, time.Duration) { - client := http.Client{Timeout: timeout} - req, err := http.NewRequest("GET", p.url, nil) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + client := http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: p.insecure}, + DialContext: obsdialer.Dialer.DialContext, + }} + req, err := http.NewRequestWithContext(ctx, "GET", p.url, nil) if err != nil { return false, 0 } diff --git a/observer/probers/http/http_conf.go b/observer/probers/http/http_conf.go index 648511d1e72..b40065be4fc 100644 --- a/observer/probers/http/http_conf.go +++ b/observer/probers/http/http_conf.go @@ -5,7 +5,8 @@ import ( "net/url" "github.com/letsencrypt/boulder/observer/probers" - "gopkg.in/yaml.v2" + "github.com/letsencrypt/boulder/strictyaml" + "github.com/prometheus/client_golang/prometheus" ) // HTTPConf is exported to receive YAML configuration. @@ -13,13 +14,19 @@ type HTTPConf struct { URL string `yaml:"url"` RCodes []int `yaml:"rcodes"` UserAgent string `yaml:"useragent"` + Insecure bool `yaml:"insecure"` +} + +// Kind returns a name that uniquely identifies the `Kind` of `Configurer`. +func (c HTTPConf) Kind() string { + return "HTTP" } // UnmarshalSettings takes YAML as bytes and unmarshals it to the to an // HTTPConf object. func (c HTTPConf) UnmarshalSettings(settings []byte) (probers.Configurer, error) { var conf HTTPConf - err := yaml.Unmarshal(settings, &conf) + err := strictyaml.Unmarshal(settings, &conf) if err != nil { return nil, err } @@ -57,7 +64,7 @@ func (c HTTPConf) validateRCodes() error { // MakeProber constructs a `HTTPProbe` object from the contents of the // bound `HTTPConf` object. If the `HTTPConf` cannot be validated, an // error appropriate for end-user consumption is returned instead. -func (c HTTPConf) MakeProber() (probers.Prober, error) { +func (c HTTPConf) MakeProber(_ map[string]prometheus.Collector) (probers.Prober, error) { // validate `url` err := c.validateURL() if err != nil { @@ -74,11 +81,16 @@ func (c HTTPConf) MakeProber() (probers.Prober, error) { if c.UserAgent == "" { c.UserAgent = "letsencrypt/boulder-observer-http-client" } - return HTTPProbe{c.URL, c.RCodes, c.UserAgent}, nil + return HTTPProbe{c.URL, c.RCodes, c.UserAgent, c.Insecure}, nil +} + +// Instrument is a no-op to implement the `Configurer` interface. +func (c HTTPConf) Instrument() map[string]prometheus.Collector { + return nil } // init is called at runtime and registers `HTTPConf`, a `Prober` // `Configurer` type, as "HTTP". func init() { - probers.Register("HTTP", HTTPConf{}) + probers.Register(HTTPConf{}) } diff --git a/observer/probers/http/http_conf_test.go b/observer/probers/http/http_conf_test.go index 6eb2bd287e2..d53d5211725 100644 --- a/observer/probers/http/http_conf_test.go +++ b/observer/probers/http/http_conf_test.go @@ -6,7 +6,7 @@ import ( "github.com/letsencrypt/boulder/observer/probers" "github.com/letsencrypt/boulder/test" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) func TestHTTPConf_MakeProber(t *testing.T) { @@ -35,7 +35,7 @@ func TestHTTPConf_MakeProber(t *testing.T) { URL: tt.fields.URL, RCodes: tt.fields.RCodes, } - if _, err := c.MakeProber(); (err != nil) != tt.wantErr { + if _, err := c.MakeProber(nil); (err != nil) != tt.wantErr { t.Errorf("HTTPConf.Validate() error = %v, wantErr %v", err, tt.wantErr) } }) @@ -44,9 +44,10 @@ func TestHTTPConf_MakeProber(t *testing.T) { func TestHTTPConf_UnmarshalSettings(t *testing.T) { type fields struct { - url interface{} - rcodes interface{} - useragent interface{} + url any + rcodes any + useragent any + insecure any } tests := []struct { name string @@ -54,8 +55,8 @@ func TestHTTPConf_UnmarshalSettings(t *testing.T) { want probers.Configurer wantErr bool }{ - {"valid", fields{"google.com", []int{200}, "boulder_observer"}, HTTPConf{"google.com", []int{200}, "boulder_observer"}, false}, - {"invalid", fields{42, 42, 42}, nil, true}, + {"valid", fields{"google.com", []int{200}, "boulder_observer", false}, HTTPConf{"google.com", []int{200}, "boulder_observer", false}, false}, + {"invalid", fields{42, 42, 42, 42}, nil, true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -63,6 +64,7 @@ func TestHTTPConf_UnmarshalSettings(t *testing.T) { "url": tt.fields.url, "rcodes": tt.fields.rcodes, "useragent": tt.fields.useragent, + "insecure": tt.fields.insecure, } settingsBytes, _ := yaml.Marshal(settings) c := HTTPConf{} @@ -84,13 +86,14 @@ func TestHTTPProberName(t *testing.T) { url: https://www.google.com rcodes: [ 200 ] useragent: "" +insecure: true ` c := HTTPConf{} configurer, err := c.UnmarshalSettings([]byte(proberYAML)) test.AssertNotError(t, err, "Got error for valid prober config") - prober, err := configurer.MakeProber() + prober, err := configurer.MakeProber(nil) test.AssertNotError(t, err, "Got error for valid prober config") - test.AssertEquals(t, prober.Name(), "https://www.google.com-[200]-letsencrypt/boulder-observer-http-client") + test.AssertEquals(t, prober.Name(), "https://www.google.com-[200]-letsencrypt/boulder-observer-http-client-insecure") // Test with custom `useragent` proberYAML = ` @@ -101,7 +104,7 @@ useragent: fancy-custom-http-client c = HTTPConf{} configurer, err = c.UnmarshalSettings([]byte(proberYAML)) test.AssertNotError(t, err, "Got error for valid prober config") - prober, err = configurer.MakeProber() + prober, err = configurer.MakeProber(nil) test.AssertNotError(t, err, "Got error for valid prober config") test.AssertEquals(t, prober.Name(), "https://www.google.com-[200]-fancy-custom-http-client") diff --git a/observer/probers/mock/mock_conf.go b/observer/probers/mock/mock_conf.go index 7840a1949fb..3640cb7fcf5 100644 --- a/observer/probers/mock/mock_conf.go +++ b/observer/probers/mock/mock_conf.go @@ -3,36 +3,47 @@ package probers import ( "errors" - "github.com/letsencrypt/boulder/cmd" + "github.com/prometheus/client_golang/prometheus" + + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/observer/probers" - "gopkg.in/yaml.v2" + "github.com/letsencrypt/boulder/strictyaml" ) type MockConfigurer struct { - Valid bool `yaml:"valid"` - ErrMsg string `yaml:"errmsg"` - PName string `yaml:"pname"` - PKind string `yaml:"pkind"` - PTook cmd.ConfigDuration `yaml:"ptook"` - PSuccess bool `yaml:"psuccess"` + Valid bool `yaml:"valid"` + ErrMsg string `yaml:"errmsg"` + PName string `yaml:"pname"` + PKind string `yaml:"pkind"` + PTook config.Duration `yaml:"ptook"` + PSuccess bool `yaml:"psuccess"` +} + +// Kind returns a name that uniquely identifies the `Kind` of `Configurer`. +func (c MockConfigurer) Kind() string { + return "Mock" } func (c MockConfigurer) UnmarshalSettings(settings []byte) (probers.Configurer, error) { var conf MockConfigurer - err := yaml.Unmarshal(settings, &conf) + err := strictyaml.Unmarshal(settings, &conf) if err != nil { return nil, err } return conf, nil } -func (c MockConfigurer) MakeProber() (probers.Prober, error) { +func (c MockConfigurer) MakeProber(_ map[string]prometheus.Collector) (probers.Prober, error) { if !c.Valid { return nil, errors.New("could not be validated") } return MockProber{c.PName, c.PKind, c.PTook, c.PSuccess}, nil } +func (c MockConfigurer) Instrument() map[string]prometheus.Collector { + return nil +} + func init() { - probers.Register("MockConf", MockConfigurer{}) + probers.Register(MockConfigurer{}) } diff --git a/observer/probers/mock/mock_prober.go b/observer/probers/mock/mock_prober.go index 446dc3a527f..2446da75095 100644 --- a/observer/probers/mock/mock_prober.go +++ b/observer/probers/mock/mock_prober.go @@ -3,13 +3,13 @@ package probers import ( "time" - "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" ) type MockProber struct { name string kind string - took cmd.ConfigDuration + took config.Duration success bool } diff --git a/observer/probers/prober.go b/observer/probers/prober.go index 76c6aafb35b..3bf1ec2b6ac 100644 --- a/observer/probers/prober.go +++ b/observer/probers/prober.go @@ -6,6 +6,7 @@ import ( "time" "github.com/letsencrypt/boulder/cmd" + "github.com/prometheus/client_golang/prometheus" ) var ( @@ -32,6 +33,10 @@ type Prober interface { // Configurer is the interface for `Configurer` types. type Configurer interface { + // Kind returns a name that uniquely identifies the `Kind` of + // `Configurer`. + Kind() string + // UnmarshalSettings unmarshals YAML as bytes to a `Configurer` // object. UnmarshalSettings([]byte) (Configurer, error) @@ -39,21 +44,33 @@ type Configurer interface { // MakeProber constructs a `Prober` object from the contents of the // bound `Configurer` object. If the `Configurer` cannot be // validated, an error appropriate for end-user consumption is - // returned instead. - MakeProber() (Prober, error) + // returned instead. The map of `prometheus.Collector` objects passed to + // MakeProber should be the same as the return value from Instrument() + MakeProber(map[string]prometheus.Collector) (Prober, error) + + // Instrument constructs any `prometheus.Collector` objects that a prober of + // the configured type will need to report its own metrics. A map is + // returned containing the constructed objects, indexed by the name of the + // prometheus metric. If no objects were constructed, nil is returned. + Instrument() map[string]prometheus.Collector } // Settings is exported as a temporary receiver for the `settings` field // of `MonConf`. `Settings` is always marshaled back to bytes and then // unmarshalled into the `Configurer` specified by the `Kind` field of // the `MonConf`. -type Settings map[string]interface{} +type Settings map[string]any + +// normalizeKind normalizes the input string by stripping spaces and +// transforming it into lowercase +func normalizeKind(kind string) string { + return strings.Trim(strings.ToLower(kind), " ") +} // GetConfigurer returns the probe configurer specified by name from // `Registry`. func GetConfigurer(kind string) (Configurer, error) { - // normalize - name := strings.Trim(strings.ToLower(kind), " ") + name := normalizeKind(kind) // check if exists if _, ok := Registry[name]; ok { return Registry[name], nil @@ -65,13 +82,12 @@ func GetConfigurer(kind string) (Configurer, error) { // add the caller to the global `Registry` map. If the caller attempts // to add a `Configurer` to the registry using the same name as a prior // `Configurer` Observer will exit after logging an error. -func Register(kind string, c Configurer) { - // normalize - name := strings.Trim(strings.ToLower(kind), " ") +func Register(c Configurer) { + name := normalizeKind(c.Kind()) // check for name collision if _, exists := Registry[name]; exists { cmd.Fail(fmt.Sprintf( - "problem registering configurer %s: name collision", kind)) + "problem registering configurer %s: name collision", c.Kind())) } Registry[name] = c } diff --git a/observer/probers/tcp/tcp.go b/observer/probers/tcp/tcp.go new file mode 100644 index 00000000000..b978892fda0 --- /dev/null +++ b/observer/probers/tcp/tcp.go @@ -0,0 +1,36 @@ +package tcp + +import ( + "context" + "time" + + "github.com/letsencrypt/boulder/observer/obsdialer" +) + +type TCPProbe struct { + hostport string +} + +// Name returns a string that uniquely identifies the monitor. + +func (p TCPProbe) Name() string { + return p.hostport +} + +// Kind returns a name that uniquely identifies the `Kind` of `Prober`. +func (p TCPProbe) Kind() string { + return "TCP" +} + +// Probe performs the configured TCP dial. +func (p TCPProbe) Probe(timeout time.Duration) (bool, time.Duration) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + start := time.Now() + c, err := obsdialer.Dialer.DialContext(ctx, "tcp", p.hostport) + if err != nil { + return false, time.Since(start) + } + c.Close() + return true, time.Since(start) +} diff --git a/observer/probers/tcp/tcp_conf.go b/observer/probers/tcp/tcp_conf.go new file mode 100644 index 00000000000..17576ecd78a --- /dev/null +++ b/observer/probers/tcp/tcp_conf.go @@ -0,0 +1,45 @@ +package tcp + +import ( + "github.com/letsencrypt/boulder/observer/probers" + "github.com/letsencrypt/boulder/strictyaml" + "github.com/prometheus/client_golang/prometheus" +) + +// TCPConf is exported to receive YAML configuration. +type TCPConf struct { + Hostport string `yaml:"hostport"` +} + +// Kind returns a name that uniquely identifies the `Kind` of `Configurer`. +func (c TCPConf) Kind() string { + return "TCP" +} + +// UnmarshalSettings takes YAML as bytes and unmarshals it to the to an +// TCPConf object. +func (c TCPConf) UnmarshalSettings(settings []byte) (probers.Configurer, error) { + var conf TCPConf + err := strictyaml.Unmarshal(settings, &conf) + if err != nil { + return nil, err + } + return conf, nil +} + +// MakeProber constructs a `TCPPProbe` object from the contents of the +// bound `TCPPConf` object. +func (c TCPConf) MakeProber(_ map[string]prometheus.Collector) (probers.Prober, error) { + return TCPProbe{c.Hostport}, nil +} + +// Instrument is a no-op to implement the `Configurer` interface. +func (c TCPConf) Instrument() map[string]prometheus.Collector { + return nil +} + +// init is called at runtime and registers `TCPConf`, a `Prober` +// `Configurer` type, as "TCP". +func init() { + probers.Register(TCPConf{}) +} diff --git a/observer/probers/tls/tls.go b/observer/probers/tls/tls.go new file mode 100644 index 00000000000..070eceadf10 --- /dev/null +++ b/observer/probers/tls/tls.go @@ -0,0 +1,315 @@ +package probers + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/crypto/ocsp" + + "github.com/letsencrypt/boulder/observer/obsdialer" +) + +type reason int + +const ( + none reason = iota + internalError + revocationStatusError + rootDidNotMatch + statusDidNotMatch +) + +var reasonToString = map[reason]string{ + none: "nil", + internalError: "internalError", + revocationStatusError: "revocationStatusError", + rootDidNotMatch: "rootDidNotMatch", + statusDidNotMatch: "statusDidNotMatch", +} + +func getReasons() []string { + var allReasons []string + for _, v := range reasonToString { + allReasons = append(allReasons, v) + } + return allReasons +} + +// TLSProbe is the exported `Prober` object for monitors configured to perform +// TLS protocols. +type TLSProbe struct { + hostname string + rootOrg string + rootCN string + response string + notAfter *prometheus.GaugeVec + notBefore *prometheus.GaugeVec + reason *prometheus.CounterVec +} + +// Name returns a string that uniquely identifies the monitor. +func (p TLSProbe) Name() string { + return p.hostname +} + +// Kind returns a name that uniquely identifies the `Kind` of `Prober`. +func (p TLSProbe) Kind() string { + return "TLS" +} + +// Get OCSP status (good, revoked or unknown) of certificate +func checkOCSP(ctx context.Context, cert, issuer *x509.Certificate, want int) (bool, error) { + req, err := ocsp.CreateRequest(cert, issuer, nil) + if err != nil { + return false, err + } + + url := fmt.Sprintf("%s/%s", cert.OCSPServer[0], base64.StdEncoding.EncodeToString(req)) + r, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return false, err + } + + res, err := http.DefaultClient.Do(r) + if err != nil { + return false, err + } + + output, err := io.ReadAll(res.Body) + if err != nil { + return false, err + } + + ocspRes, err := ocsp.ParseResponseForCert(output, cert, issuer) + if err != nil { + return false, err + } + + return ocspRes.Status == want, nil +} + +func checkCRL(ctx context.Context, cert, issuer *x509.Certificate, want int) (bool, error) { + if len(cert.CRLDistributionPoints) != 1 { + return false, errors.New("cert does not contain CRLDP URI") + } + + req, err := http.NewRequestWithContext(ctx, "GET", cert.CRLDistributionPoints[0], nil) + if err != nil { + return false, fmt.Errorf("creating HTTP request: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return false, fmt.Errorf("downloading CRL: %w", err) + } + defer resp.Body.Close() + + der, err := io.ReadAll(resp.Body) + if err != nil { + return false, fmt.Errorf("reading CRL: %w", err) + } + + crl, err := x509.ParseRevocationList(der) + if err != nil { + return false, fmt.Errorf("parsing CRL: %w", err) + } + + err = crl.CheckSignatureFrom(issuer) + if err != nil { + return false, fmt.Errorf("validating CRL: %w", err) + } + + for _, entry := range crl.RevokedCertificateEntries { + if entry.SerialNumber.Cmp(cert.SerialNumber) == 0 { + return want == ocsp.Revoked, nil + } + } + return want == ocsp.Good, nil +} + +// Return an error if the root settings are nonempty and do not match the +// expected root. +func (p TLSProbe) checkRoot(rootOrg, rootCN string) error { + if (p.rootCN == "" && p.rootOrg == "") || (rootOrg == p.rootOrg && rootCN == p.rootCN) { + return nil + } + return fmt.Errorf("Expected root does not match.") +} + +// Export expiration timestamp and reason to Prometheus. +func (p TLSProbe) exportMetrics(cert *x509.Certificate, reason reason) { + if cert != nil { + p.notAfter.WithLabelValues(p.hostname).Set(float64(cert.NotAfter.Unix())) + p.notBefore.WithLabelValues(p.hostname).Set(float64(cert.NotBefore.Unix())) + } + p.reason.WithLabelValues(p.hostname, reasonToString[reason]).Inc() +} + +func (p TLSProbe) probeExpired(timeout time.Duration) bool { + addr := p.hostname + _, _, err := net.SplitHostPort(addr) + if err != nil { + addr = net.JoinHostPort(addr, "443") + } + + tlsDialer := tls.Dialer{ + NetDialer: &obsdialer.Dialer, + Config: &tls.Config{ + // Set InsecureSkipVerify to skip the default validation we are + // replacing. This will not disable VerifyConnection. + InsecureSkipVerify: true, + VerifyConnection: func(cs tls.ConnectionState) error { + issuers := x509.NewCertPool() + for _, cert := range cs.PeerCertificates[1:] { + issuers.AddCert(cert) + } + opts := x509.VerifyOptions{ + // We set the current time to be the cert's expiration date so that + // the validation routine doesn't complain that the cert is expired. + CurrentTime: cs.PeerCertificates[0].NotAfter, + // By settings roots and intermediates to be whatever was presented + // in the handshake, we're saying that we don't care about the cert + // chaining up to the system trust store. This is safe because we + // check the root ourselves in checkRoot(). + Intermediates: issuers, + Roots: issuers, + } + _, err := cs.PeerCertificates[0].Verify(opts) + return err + }, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + conn, err := tlsDialer.DialContext(ctx, "tcp", addr) + if err != nil { + p.exportMetrics(nil, internalError) + return false + } + defer conn.Close() + + // tls.Dialer.DialContext is documented to always return *tls.Conn + peers := conn.(*tls.Conn).ConnectionState().PeerCertificates + if time.Until(peers[0].NotAfter) > 0 { + p.exportMetrics(peers[0], statusDidNotMatch) + return false + } + + root := peers[len(peers)-1].Issuer + err = p.checkRoot(root.Organization[0], root.CommonName) + if err != nil { + p.exportMetrics(peers[0], rootDidNotMatch) + return false + } + + p.exportMetrics(peers[0], none) + return true +} + +func (p TLSProbe) probeUnexpired(timeout time.Duration) bool { + addr := p.hostname + _, _, err := net.SplitHostPort(addr) + if err != nil { + addr = net.JoinHostPort(addr, "443") + } + + tlsDialer := tls.Dialer{ + NetDialer: &obsdialer.Dialer, + Config: &tls.Config{ + // Set InsecureSkipVerify to skip the default validation we are + // replacing. This will not disable VerifyConnection. + InsecureSkipVerify: true, + VerifyConnection: func(cs tls.ConnectionState) error { + issuers := x509.NewCertPool() + for _, cert := range cs.PeerCertificates[1:] { + issuers.AddCert(cert) + } + opts := x509.VerifyOptions{ + // By settings roots and intermediates to be whatever was presented + // in the handshake, we're saying that we don't care about the cert + // chaining up to the system trust store. This is safe because we + // check the root ourselves in checkRoot(). + Intermediates: issuers, + Roots: issuers, + } + _, err := cs.PeerCertificates[0].Verify(opts) + return err + }, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + conn, err := tlsDialer.DialContext(ctx, "tcp", addr) + if err != nil { + p.exportMetrics(nil, internalError) + return false + } + defer conn.Close() + + // tls.Dialer.DialContext is documented to always return *tls.Conn + peers := conn.(*tls.Conn).ConnectionState().PeerCertificates + root := peers[len(peers)-1].Issuer + err = p.checkRoot(root.Organization[0], root.CommonName) + if err != nil { + p.exportMetrics(peers[0], rootDidNotMatch) + return false + } + + var wantStatus int + switch p.response { + case "valid": + wantStatus = ocsp.Good + case "revoked": + wantStatus = ocsp.Revoked + } + + var statusMatch bool + if len(peers[0].OCSPServer) != 0 { + statusMatch, err = checkOCSP(ctx, peers[0], peers[1], wantStatus) + } else { + statusMatch, err = checkCRL(ctx, peers[0], peers[1], wantStatus) + } + if err != nil { + p.exportMetrics(peers[0], revocationStatusError) + return false + } + + if !statusMatch { + p.exportMetrics(peers[0], statusDidNotMatch) + return false + } + + p.exportMetrics(peers[0], none) + return true +} + +// Probe performs the configured TLS probe. Return true if the root has the +// expected Subject (or if no root is provided for comparison in settings), and +// the end entity certificate has the correct expiration status (either expired +// or unexpired, depending on what is configured). Exports metrics for the +// NotAfter timestamp of the end entity certificate and the reason for the Probe +// returning false ("none" if returns true). +func (p TLSProbe) Probe(timeout time.Duration) (bool, time.Duration) { + start := time.Now() + var success bool + if p.response == "expired" { + success = p.probeExpired(timeout) + } else { + success = p.probeUnexpired(timeout) + } + + return success, time.Since(start) +} diff --git a/observer/probers/tls/tls_conf.go b/observer/probers/tls/tls_conf.go new file mode 100644 index 00000000000..f09c284d161 --- /dev/null +++ b/observer/probers/tls/tls_conf.go @@ -0,0 +1,170 @@ +package probers + +import ( + "fmt" + "net" + "net/url" + "slices" + "strconv" + "strings" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/letsencrypt/boulder/observer/probers" + "github.com/letsencrypt/boulder/strictyaml" +) + +const ( + notAfterName = "obs_tls_not_after" + notBeforeName = "obs_tls_not_before" + reasonName = "obs_tls_reason" +) + +// TLSConf is exported to receive YAML configuration. +type TLSConf struct { + Hostname string `yaml:"hostname"` + RootOrg string `yaml:"rootOrg"` + RootCN string `yaml:"rootCN"` + Response string `yaml:"response"` +} + +// Kind returns a name that uniquely identifies the `Kind` of `Configurer`. +func (c TLSConf) Kind() string { + return "TLS" +} + +// UnmarshalSettings takes YAML as bytes and unmarshals it to the to an TLSConf +// object. +func (c TLSConf) UnmarshalSettings(settings []byte) (probers.Configurer, error) { + var conf TLSConf + err := strictyaml.Unmarshal(settings, &conf) + if err != nil { + return nil, err + } + + return conf, nil +} + +func (c TLSConf) validateHostname() error { + hostname := c.Hostname + + if strings.Contains(c.Hostname, ":") { + host, port, err := net.SplitHostPort(c.Hostname) + if err != nil { + return fmt.Errorf("invalid 'hostname', got %q, expected a valid hostport: %s", c.Hostname, err) + } + + _, err = strconv.Atoi(port) + if err != nil { + return fmt.Errorf("invalid 'hostname', got %q, expected a valid hostport: %s", c.Hostname, err) + } + hostname = host + } + + url, err := url.Parse(hostname) + if err != nil { + return fmt.Errorf("invalid 'hostname', got %q, expected a valid hostname: %s", c.Hostname, err) + } + + if url.Scheme != "" { + return fmt.Errorf("invalid 'hostname', got: %q, should not include scheme", c.Hostname) + } + + return nil +} + +func (c TLSConf) validateResponse() error { + acceptable := []string{"valid", "expired", "revoked"} + if slices.Contains(acceptable, strings.ToLower(c.Response)) { + return nil + } + + return fmt.Errorf( + "invalid `response`, got %q. Must be one of %s", c.Response, acceptable) +} + +// MakeProber constructs a `TLSProbe` object from the contents of the bound +// `TLSConf` object. If the `TLSConf` cannot be validated, an error appropriate +// for end-user consumption is returned instead. +func (c TLSConf) MakeProber(collectors map[string]prometheus.Collector) (probers.Prober, error) { + // Validate `hostname` + err := c.validateHostname() + if err != nil { + return nil, err + } + + // Valid `response` + err = c.validateResponse() + if err != nil { + return nil, err + } + + // Validate the Prometheus collectors that were passed in + coll, ok := collectors[notAfterName] + if !ok { + return nil, fmt.Errorf("tls prober did not receive collector %q", notAfterName) + } + + notAfterColl, ok := coll.(*prometheus.GaugeVec) + if !ok { + return nil, fmt.Errorf("tls prober received collector %q of wrong type, got: %T, expected *prometheus.GaugeVec", notAfterName, coll) + } + + coll, ok = collectors[notBeforeName] + if !ok { + return nil, fmt.Errorf("tls prober did not receive collector %q", notBeforeName) + } + + notBeforeColl, ok := coll.(*prometheus.GaugeVec) + if !ok { + return nil, fmt.Errorf("tls prober received collector %q of wrong type, got: %T, expected *prometheus.GaugeVec", notBeforeName, coll) + } + + coll, ok = collectors[reasonName] + if !ok { + return nil, fmt.Errorf("tls prober did not receive collector %q", reasonName) + } + + reasonColl, ok := coll.(*prometheus.CounterVec) + if !ok { + return nil, fmt.Errorf("tls prober received collector %q of wrong type, got: %T, expected *prometheus.CounterVec", reasonName, coll) + } + + return TLSProbe{c.Hostname, c.RootOrg, c.RootCN, strings.ToLower(c.Response), notAfterColl, notBeforeColl, reasonColl}, nil +} + +// Instrument constructs any `prometheus.Collector` objects the `TLSProbe` will +// need to report its own metrics. A map is returned containing the constructed +// objects, indexed by the name of the Prometheus metric. If no objects were +// constructed, nil is returned. +func (c TLSConf) Instrument() map[string]prometheus.Collector { + notBefore := prometheus.Collector(prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: notBeforeName, + Help: "Certificate notBefore value as a Unix timestamp in seconds", + }, []string{"hostname"}, + )) + notAfter := prometheus.Collector(prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: notAfterName, + Help: "Certificate notAfter value as a Unix timestamp in seconds", + }, []string{"hostname"}, + )) + reason := prometheus.Collector(prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: reasonName, + Help: fmt.Sprintf("Reason for TLS Prober check failure. Can be one of %s", getReasons()), + }, []string{"hostname", "reason"}, + )) + return map[string]prometheus.Collector{ + notAfterName: notAfter, + notBeforeName: notBefore, + reasonName: reason, + } +} + +// init is called at runtime and registers `TLSConf`, a `Prober` `Configurer` +// type, as "TLS". +func init() { + probers.Register(TLSConf{}) +} diff --git a/observer/probers/tls/tls_conf_test.go b/observer/probers/tls/tls_conf_test.go new file mode 100644 index 00000000000..15984427c5c --- /dev/null +++ b/observer/probers/tls/tls_conf_test.go @@ -0,0 +1,114 @@ +package probers + +import ( + "reflect" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "gopkg.in/yaml.v3" + + "github.com/letsencrypt/boulder/observer/probers" +) + +func TestTLSConf_MakeProber(t *testing.T) { + goodHostname, goodRootCN, goodResponse := "example.com", "ISRG Root X1", "valid" + colls := TLSConf{}.Instrument() + badColl := prometheus.Collector(prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "obs_crl_foo", + Help: "Hmmm, this shouldn't be here...", + }, + []string{}, + )) + type fields struct { + Hostname string + RootCN string + Response string + } + tests := []struct { + name string + fields fields + colls map[string]prometheus.Collector + wantErr bool + }{ + // valid + {"valid hostname", fields{"example.com", goodRootCN, "valid"}, colls, false}, + {"valid hostname with path", fields{"example.com/foo/bar", "ISRG Root X2", "Revoked"}, colls, false}, + {"valid hostname with port", fields{"example.com:8080", goodRootCN, "expired"}, colls, false}, + + // invalid hostname + {"bad hostname", fields{":::::", goodRootCN, goodResponse}, colls, true}, + {"included scheme", fields{"https://example.com", goodRootCN, goodResponse}, colls, true}, + {"included scheme and port", fields{"https://example.com:443", goodRootCN, goodResponse}, colls, true}, + + // invalid response + {"empty response", fields{goodHostname, goodRootCN, ""}, colls, true}, + {"unaccepted response", fields{goodHostname, goodRootCN, "invalid"}, colls, true}, + + // invalid collector + { + "unexpected collector", + fields{"http://example.com", goodRootCN, goodResponse}, + map[string]prometheus.Collector{"obs_crl_foo": badColl}, + true, + }, + { + "missing collectors", + fields{"http://example.com", goodRootCN, goodResponse}, + map[string]prometheus.Collector{}, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := TLSConf{ + Hostname: tt.fields.Hostname, + RootCN: tt.fields.RootCN, + Response: tt.fields.Response, + } + if _, err := c.MakeProber(tt.colls); (err != nil) != tt.wantErr { + t.Errorf("TLSConf.Validate() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestTLSConf_UnmarshalSettings(t *testing.T) { + type fields struct { + hostname any + rootOrg any + rootCN any + response any + } + tests := []struct { + name string + fields fields + want probers.Configurer + wantErr bool + }{ + {"valid", fields{"google.com", "", "ISRG Root X1", "valid"}, TLSConf{"google.com", "", "ISRG Root X1", "valid"}, false}, + {"invalid hostname (map)", fields{make(map[string]any), 42, 42, 42}, nil, true}, + {"invalid rootOrg (list)", fields{42, make([]string, 0), 42, 42}, nil, true}, + {"invalid response (list)", fields{42, 42, 42, make([]string, 0)}, nil, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + settings := probers.Settings{ + "hostname": tt.fields.hostname, + "rootOrg": tt.fields.rootOrg, + "rootCN": tt.fields.rootCN, + "response": tt.fields.response, + } + settingsBytes, _ := yaml.Marshal(settings) + c := TLSConf{} + got, err := c.UnmarshalSettings(settingsBytes) + if (err != nil) != tt.wantErr { + t.Errorf("DNSConf.UnmarshalSettings() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("DNSConf.UnmarshalSettings() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/ocsp/responder/db_source.go b/ocsp/responder/db_source.go deleted file mode 100644 index ffcce500bfb..00000000000 --- a/ocsp/responder/db_source.go +++ /dev/null @@ -1,80 +0,0 @@ -package responder - -import ( - "context" - "encoding/hex" - - "github.com/go-gorp/gorp/v3" - "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/db" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/sa" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" -) - -type dbSource struct { - dbMap dbSelector - counter *prometheus.CounterVec - log blog.Logger -} - -// dbSelector is a limited subset of the db.WrappedMap interface to allow for -// easier mocking of mysql operations in tests. -type dbSelector interface { - SelectOne(holder interface{}, query string, args ...interface{}) error - WithContext(ctx context.Context) gorp.SqlExecutor -} - -// NewDbSource returns a dbSource which will look up OCSP responses in a SQL -// database. -func NewDbSource(dbMap dbSelector, stats prometheus.Registerer, log blog.Logger) (*dbSource, error) { - counter := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "ocsp_db_responses", - Help: "Count of OCSP requests/responses by action taken by the dbSource", - }, []string{"result"}) - return &dbSource{ - dbMap: dbMap, - counter: counter, - log: log, - }, nil -} - -// Response implements the Source interface. It looks up the requested OCSP -// response in the sql database. If the certificate status row that it finds -// indicates that the cert is expired or this cert has never had an OCSP -// response generated for it, it returns an error. -func (src *dbSource) Response(ctx context.Context, req *ocsp.Request) (*Response, error) { - serialString := core.SerialToString(req.SerialNumber) - - certStatus, err := sa.SelectCertificateStatus(src.dbMap.WithContext(ctx), serialString) - if err != nil { - if db.IsNoRows(err) { - src.counter.WithLabelValues("not_found").Inc() - return nil, ErrNotFound - } - - src.log.AuditErrf("Looking up OCSP response in DB: %s", err) - src.counter.WithLabelValues("lookup_error").Inc() - return nil, err - } - - if certStatus.IsExpired { - src.log.Infof("OCSP Response not sent (expired) for CA=%s, Serial=%s", hex.EncodeToString(req.IssuerKeyHash), serialString) - src.counter.WithLabelValues("expired").Inc() - return nil, ErrNotFound - } else if certStatus.OCSPLastUpdated.IsZero() { - src.log.Warningf("OCSP Response not sent (ocspLastUpdated is zero) for CA=%s, Serial=%s", hex.EncodeToString(req.IssuerKeyHash), serialString) - src.counter.WithLabelValues("never_updated").Inc() - return nil, ErrNotFound - } - - resp, err := ocsp.ParseResponse(certStatus.OCSPResponse, nil) - if err != nil { - src.counter.WithLabelValues("parse_error").Inc() - return nil, err - } - - src.counter.WithLabelValues("success").Inc() - return &Response{Response: resp, Raw: certStatus.OCSPResponse}, nil -} diff --git a/ocsp/responder/db_source_test.go b/ocsp/responder/db_source_test.go deleted file mode 100644 index 7db7891fe52..00000000000 --- a/ocsp/responder/db_source_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package responder - -import ( - "context" - "database/sql" - "errors" - "fmt" - "io/ioutil" - "testing" - "time" - - "github.com/go-gorp/gorp/v3" - "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/db" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/test" - "golang.org/x/crypto/ocsp" -) - -// To mock out WithContext, we need to be able to return objects that satisfy -// gorp.SqlExecutor. That's a pretty big interface, so we specify one no-op mock -// that we can embed everywhere we need to satisfy it. -// Note: mockSqlExecutor does *not* implement WithContext. The expectation is -// that structs that embed mockSqlExecutor will define their own WithContext -// that returns a reference to themselves. That makes it easy for those structs -// to override the specific methods they need to implement (e.g. SelectOne). -type mockSqlExecutor struct{} - -func (mse mockSqlExecutor) Get(i interface{}, keys ...interface{}) (interface{}, error) { - return nil, errors.New("unimplemented") -} -func (mse mockSqlExecutor) Insert(list ...interface{}) error { - return errors.New("unimplemented") -} -func (mse mockSqlExecutor) Update(list ...interface{}) (int64, error) { - return 0, errors.New("unimplemented") -} -func (mse mockSqlExecutor) Delete(list ...interface{}) (int64, error) { - return 0, errors.New("unimplemented") -} -func (mse mockSqlExecutor) Exec(query string, args ...interface{}) (sql.Result, error) { - return nil, errors.New("unimplemented") -} -func (mse mockSqlExecutor) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { - return nil, errors.New("unimplemented") -} -func (mse mockSqlExecutor) SelectInt(query string, args ...interface{}) (int64, error) { - return 0, errors.New("unimplemented") -} -func (mse mockSqlExecutor) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { - return sql.NullInt64{}, errors.New("unimplemented") -} -func (mse mockSqlExecutor) SelectFloat(query string, args ...interface{}) (float64, error) { - return 0, errors.New("unimplemented") -} -func (mse mockSqlExecutor) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { - return sql.NullFloat64{}, errors.New("unimplemented") -} -func (mse mockSqlExecutor) SelectStr(query string, args ...interface{}) (string, error) { - return "", errors.New("unimplemented") -} -func (mse mockSqlExecutor) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { - return sql.NullString{}, errors.New("unimplemented") -} -func (mse mockSqlExecutor) SelectOne(holder interface{}, query string, args ...interface{}) error { - return errors.New("unimplemented") -} -func (mse mockSqlExecutor) Query(query string, args ...interface{}) (*sql.Rows, error) { - return nil, errors.New("unimplemented") -} -func (mse mockSqlExecutor) QueryRow(query string, args ...interface{}) *sql.Row { - return nil -} - -// echoSelector always returns the given certificateStatus. -type echoSelector struct { - mockSqlExecutor - status core.CertificateStatus -} - -func (s echoSelector) WithContext(context.Context) gorp.SqlExecutor { - return s -} - -func (s echoSelector) SelectOne(output interface{}, _ string, _ ...interface{}) error { - outputPtr, ok := output.(*core.CertificateStatus) - if !ok { - return fmt.Errorf("incorrect output type %T", output) - } - *outputPtr = s.status - return nil -} - -// errorSelector always returns the given error. -type errorSelector struct { - mockSqlExecutor - err error -} - -func (s errorSelector) SelectOne(_ interface{}, _ string, _ ...interface{}) error { - return s.err -} - -func (s errorSelector) WithContext(context.Context) gorp.SqlExecutor { - return s -} - -func TestDbSource(t *testing.T) { - reqBytes, err := ioutil.ReadFile("./testdata/ocsp.req") - test.AssertNotError(t, err, "failed to read OCSP request") - req, err := ocsp.ParseRequest(reqBytes) - test.AssertNotError(t, err, "failed to parse OCSP request") - - respBytes, err := ioutil.ReadFile("./testdata/ocsp.resp") - test.AssertNotError(t, err, "failed to read OCSP response") - - // Test for failure when the database lookup fails. - dbErr := errors.New("something went wrong") - src, err := NewDbSource(errorSelector{err: dbErr}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create dbSource") - _, err = src.Response(context.Background(), req) - test.AssertEquals(t, err, dbErr) - - // Test for graceful recovery when the database returns no results. - dbErr = db.ErrDatabaseOp{ - Op: "test", - Table: "certificateStatus", - Err: sql.ErrNoRows, - } - src, err = NewDbSource(errorSelector{err: dbErr}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create dbSource") - _, err = src.Response(context.Background(), req) - test.AssertErrorIs(t, err, ErrNotFound) - - // Test for converting expired results into no results. - status := core.CertificateStatus{ - IsExpired: true, - } - src, err = NewDbSource(echoSelector{status: status}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create dbSource") - _, err = src.Response(context.Background(), req) - test.AssertErrorIs(t, err, ErrNotFound) - - // Test for converting never-updated results into no results. - status = core.CertificateStatus{ - IsExpired: false, - OCSPLastUpdated: time.Time{}, - } - src, err = NewDbSource(echoSelector{status: status}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create dbSource") - _, err = src.Response(context.Background(), req) - test.AssertErrorIs(t, err, ErrNotFound) - - // Test for reporting parse errors. - status = core.CertificateStatus{ - IsExpired: false, - OCSPLastUpdated: time.Now(), - OCSPResponse: respBytes[1:], - } - src, err = NewDbSource(echoSelector{status: status}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create dbSource") - _, err = src.Response(context.Background(), req) - test.AssertError(t, err, "expected failure") - - // Test the happy path. - status = core.CertificateStatus{ - IsExpired: false, - OCSPLastUpdated: time.Now(), - OCSPResponse: respBytes, - } - src, err = NewDbSource(echoSelector{status: status}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create dbSource") - _, err = src.Response(context.Background(), req) - test.AssertNotError(t, err, "unexpected failure") -} diff --git a/ocsp/responder/filter_source.go b/ocsp/responder/filter_source.go deleted file mode 100644 index 66e35d5edaa..00000000000 --- a/ocsp/responder/filter_source.go +++ /dev/null @@ -1,142 +0,0 @@ -package responder - -import ( - "bytes" - "context" - "crypto" - "encoding/hex" - "errors" - "fmt" - "strings" - - "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/issuance" - blog "github.com/letsencrypt/boulder/log" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" -) - -type responderID struct { - nameHash []byte - keyHash []byte -} - -type filterSource struct { - wrapped Source - hashAlgorithm crypto.Hash - issuers map[issuance.IssuerNameID]responderID - serialPrefixes []string - counter *prometheus.CounterVec - log blog.Logger -} - -// NewFilterSource returns a filterSource which performs various checks on the -// OCSP requests sent to the wrapped Source, and the OCSP responses returned -// by it. -func NewFilterSource(issuerCerts []*issuance.Certificate, serialPrefixes []string, wrapped Source, stats prometheus.Registerer, log blog.Logger) (*filterSource, error) { - if len(issuerCerts) < 1 { - return nil, errors.New("Filter must include at least 1 issuer cert") - } - issuersByNameId := make(map[issuance.IssuerNameID]responderID) - for _, issuerCert := range issuerCerts { - keyHash := issuerCert.KeyHash() - nameHash := issuerCert.NameHash() - rid := responderID{ - keyHash: keyHash[:], - nameHash: nameHash[:], - } - issuersByNameId[issuerCert.NameID()] = rid - } - counter := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "ocsp_filter_responses", - Help: "Count of OCSP requests/responses by action taken by the filter", - }, []string{"result"}) - return &filterSource{ - wrapped: wrapped, - hashAlgorithm: crypto.SHA1, - issuers: issuersByNameId, - serialPrefixes: serialPrefixes, - counter: counter, - log: log, - }, nil -} - -// Response implements the Source interface. It checks the incoming request -// to ensure that we want to handle it, fetches the response from the wrapped -// Source, and checks that the response matches the request. -func (src *filterSource) Response(ctx context.Context, req *ocsp.Request) (*Response, error) { - iss, err := src.checkRequest(req) - if err != nil { - src.log.Debugf("Not responding to filtered OCSP request: %s", err.Error()) - src.counter.WithLabelValues("request_filtered").Inc() - return nil, err - } - - resp, err := src.wrapped.Response(ctx, req) - if err != nil { - src.counter.WithLabelValues("wrapped_error").Inc() - return nil, err - } - - err = src.checkResponse(iss, resp) - if err != nil { - src.log.Warningf("OCSP Response not sent (issuer and serial mismatch) for CA=%s, Serial=%s", hex.EncodeToString(req.IssuerKeyHash), core.SerialToString(req.SerialNumber)) - src.counter.WithLabelValues("response_filtered").Inc() - return nil, err - } - - src.counter.WithLabelValues("success").Inc() - return resp, nil -} - -// checkRequest returns a descriptive error if the request does not satisfy any of -// the requirements of an OCSP request, or nil if the request should be handled. -// If the request passes all checks, then checkRequest returns the unique id of -// the issuer cert specified in the request. -func (src *filterSource) checkRequest(req *ocsp.Request) (issuance.IssuerNameID, error) { - if req.HashAlgorithm != src.hashAlgorithm { - return 0, fmt.Errorf("unsupported issuer key/name hash algorithm %s: %w", req.HashAlgorithm, ErrNotFound) - } - - if len(src.serialPrefixes) > 0 { - serialString := core.SerialToString(req.SerialNumber) - match := false - for _, prefix := range src.serialPrefixes { - if strings.HasPrefix(serialString, prefix) { - match = true - break - } - } - if !match { - return 0, fmt.Errorf("unrecognized serial prefix: %w", ErrNotFound) - } - } - - for nameID, rid := range src.issuers { - if bytes.Equal(req.IssuerNameHash, rid.nameHash) && bytes.Equal(req.IssuerKeyHash, rid.keyHash) { - return nameID, nil - } - } - return 0, fmt.Errorf("unrecognized issuer key hash %s: %w", hex.EncodeToString(req.IssuerKeyHash), ErrNotFound) -} - -// checkResponse returns nil if the ocsp response was generated by the same -// issuer as was identified in the request, or an error otherwise. This filters -// out, for example, responses which are for a serial that we issued, but from a -// different issuer than that contained in the request. -func (src *filterSource) checkResponse(reqIssuerID issuance.IssuerNameID, resp *Response) error { - respIssuerID := issuance.GetOCSPIssuerNameID(resp.Response) - if reqIssuerID != respIssuerID { - // This would be allowed if we used delegated responders, but we don't. - return fmt.Errorf("responder name does not match requested issuer name") - } - - // In an ideal world, we'd also compare the Issuer Key Hash from the request's - // CertID (equivalent to looking up the key hash in src.issuers) against the - // Issuer Key Hash contained in the response's CertID. However, the Go OCSP - // library does not provide access to the response's CertID, so we can't. - // Specifically, we want to compare `src.issuers[reqIssuerID].keyHash` against - // something like resp.CertID.IssuerKeyHash, but the latter does not exist. - - return nil -} diff --git a/ocsp/responder/filter_source_test.go b/ocsp/responder/filter_source_test.go deleted file mode 100644 index 85371f4e704..00000000000 --- a/ocsp/responder/filter_source_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package responder - -import ( - "context" - "crypto" - "encoding/hex" - "io/ioutil" - "testing" - - "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/issuance" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/test" - "golang.org/x/crypto/ocsp" -) - -func TestNewFilter(t *testing.T) { - _, err := NewFilterSource([]*issuance.Certificate{}, []string{}, nil, metrics.NoopRegisterer, blog.NewMock()) - test.AssertError(t, err, "didn't error when creating empty filter") - - issuer, err := issuance.LoadCertificate("./testdata/test-ca.der.pem") - test.AssertNotError(t, err, "failed to load issuer cert") - issuerNameId := issuer.NameID() - - f, err := NewFilterSource([]*issuance.Certificate{issuer}, []string{"00"}, nil, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "errored when creating good filter") - test.AssertEquals(t, len(f.issuers), 1) - test.AssertEquals(t, len(f.serialPrefixes), 1) - test.AssertEquals(t, hex.EncodeToString(f.issuers[issuerNameId].keyHash), "fb784f12f96015832c9f177f3419b32e36ea4189") -} - -func TestCheckRequest(t *testing.T) { - issuer, err := issuance.LoadCertificate("./testdata/test-ca.der.pem") - test.AssertNotError(t, err, "failed to load issuer cert") - - f, err := NewFilterSource([]*issuance.Certificate{issuer}, []string{"00"}, nil, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "errored when creating good filter") - - reqBytes, err := ioutil.ReadFile("./testdata/ocsp.req") - test.AssertNotError(t, err, "failed to read OCSP request") - - // Select a bad hash algorithm. - ocspReq, err := ocsp.ParseRequest(reqBytes) - test.AssertNotError(t, err, "failed to prepare fake ocsp request") - ocspReq.HashAlgorithm = crypto.MD5 - _, err = f.Response(context.Background(), ocspReq) - test.AssertError(t, err, "accepted ocsp request with bad hash algorithm") - - // Make the hash invalid. - ocspReq, err = ocsp.ParseRequest(reqBytes) - test.AssertNotError(t, err, "failed to prepare fake ocsp request") - ocspReq.IssuerKeyHash[0]++ - _, err = f.Response(context.Background(), ocspReq) - test.AssertError(t, err, "accepted ocsp request with bad issuer key hash") - - // Make the serial prefix wrong by incrementing the first byte by 1. - ocspReq, err = ocsp.ParseRequest(reqBytes) - test.AssertNotError(t, err, "failed to prepare fake ocsp request") - serialStr := []byte(core.SerialToString(ocspReq.SerialNumber)) - serialStr[0] = serialStr[0] + 1 - ocspReq.SerialNumber.SetString(string(serialStr), 16) - _, err = f.Response(context.Background(), ocspReq) - test.AssertError(t, err, "accepted ocsp request with bad serial prefix") -} - -type echoSource struct { - resp *Response -} - -func (src *echoSource) Response(context.Context, *ocsp.Request) (*Response, error) { - return src.resp, nil -} - -func TestCheckResponse(t *testing.T) { - issuer, err := issuance.LoadCertificate("./testdata/test-ca.der.pem") - test.AssertNotError(t, err, "failed to load issuer cert") - - reqBytes, err := ioutil.ReadFile("./testdata/ocsp.req") - test.AssertNotError(t, err, "failed to read OCSP request") - req, err := ocsp.ParseRequest(reqBytes) - test.AssertNotError(t, err, "failed to prepare fake ocsp request") - - respBytes, err := ioutil.ReadFile("./testdata/ocsp.resp") - test.AssertNotError(t, err, "failed to read OCSP response") - resp, err := ocsp.ParseResponse(respBytes, nil) - test.AssertNotError(t, err, "failed to parse OCSP response") - - source := &echoSource{&Response{resp, respBytes}} - f, err := NewFilterSource([]*issuance.Certificate{issuer}, []string{"00"}, source, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "errored when creating good filter") - - actual, err := f.Response(context.Background(), req) - test.AssertNotError(t, err, "unexpected error") - test.AssertEquals(t, actual.Response, resp) - - // Overwrite the Responder Name in the stored response to cause a diagreement. - resp.RawResponderName = []byte("C = US, O = Foo, DN = Bar") - source = &echoSource{&Response{resp, respBytes}} - f, err = NewFilterSource([]*issuance.Certificate{issuer}, []string{"00"}, source, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "errored when creating good filter") - - _, err = f.Response(context.Background(), req) - test.AssertError(t, err, "expected error") -} diff --git a/ocsp/responder/inmem_source.go b/ocsp/responder/inmem_source.go deleted file mode 100644 index ba2fca0a9f5..00000000000 --- a/ocsp/responder/inmem_source.go +++ /dev/null @@ -1,78 +0,0 @@ -package responder - -import ( - "context" - "encoding/base64" - "io/ioutil" - "regexp" - - blog "github.com/letsencrypt/boulder/log" - "golang.org/x/crypto/ocsp" -) - -// inMemorySource wraps a map from serialNumber to Response and just looks up -// Responses from that map with no safety checks. Useful for testing. -type inMemorySource struct { - responses map[string]*Response - log blog.Logger -} - -// NewMemorySource returns an initialized InMemorySource which simply looks up -// responses from an in-memory map based on the serial number in the request. -func NewMemorySource(responses map[string]*Response, logger blog.Logger) (*inMemorySource, error) { - return &inMemorySource{ - responses: responses, - log: logger, - }, nil -} - -// NewMemorySourceFromFile reads the named file into an InMemorySource. -// The file read by this function must contain whitespace-separated OCSP -// responses. Each OCSP response must be in base64-encoded DER form (i.e., -// PEM without headers or whitespace). Invalid responses are ignored. -// This function pulls the entire file into an InMemorySource. -func NewMemorySourceFromFile(responseFile string, logger blog.Logger) (*inMemorySource, error) { - fileContents, err := ioutil.ReadFile(responseFile) - if err != nil { - return nil, err - } - - responsesB64 := regexp.MustCompile(`\s`).Split(string(fileContents), -1) - responses := make(map[string]*Response, len(responsesB64)) - for _, b64 := range responsesB64 { - // if the line/space is empty just skip - if b64 == "" { - continue - } - der, tmpErr := base64.StdEncoding.DecodeString(b64) - if tmpErr != nil { - logger.Errf("Base64 decode error %s on: %s", tmpErr, b64) - continue - } - - response, tmpErr := ocsp.ParseResponse(der, nil) - if tmpErr != nil { - logger.Errf("OCSP decode error %s on: %s", tmpErr, b64) - continue - } - - responses[response.SerialNumber.String()] = &Response{ - Response: response, - Raw: der, - } - } - - logger.Infof("Read %d OCSP responses", len(responses)) - return NewMemorySource(responses, logger) -} - -// Response looks up an OCSP response to provide for a given request. -// InMemorySource looks up a response purely based on serial number, -// without regard to what issuer the request is asking for. -func (src inMemorySource) Response(_ context.Context, request *ocsp.Request) (*Response, error) { - response, present := src.responses[request.SerialNumber.String()] - if !present { - return nil, ErrNotFound - } - return response, nil -} diff --git a/ocsp/responder/multi_source.go b/ocsp/responder/multi_source.go deleted file mode 100644 index a59ee20022f..00000000000 --- a/ocsp/responder/multi_source.go +++ /dev/null @@ -1,120 +0,0 @@ -package responder - -import ( - "context" - "errors" - "fmt" - - "github.com/letsencrypt/boulder/core" - blog "github.com/letsencrypt/boulder/log" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" -) - -type multiSource struct { - primary Source - secondary Source - counter *prometheus.CounterVec - log blog.Logger -} - -func NewMultiSource(primary, secondary Source, stats prometheus.Registerer, log blog.Logger) (*multiSource, error) { - if primary == nil || secondary == nil { - return nil, errors.New("must provide both primary and secondary sources") - } - counter := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "ocsp_multiplex_responses", - Help: "Count of OCSP requests/responses by action taken by the multiSource", - }, []string{"result"}) - return &multiSource{ - primary: primary, - secondary: secondary, - counter: counter, - log: log, - }, nil -} - -// Response implements the Source interface. It performs lookups using both the -// primary and secondary wrapped Sources. It returns whichever response arrives -// first, with the caveat that if the secondary Source responds quicker, it will -// wait for the result from the primary to ensure that they agree. -func (src *multiSource) Response(ctx context.Context, req *ocsp.Request) (*Response, error) { - serialString := core.SerialToString(req.SerialNumber) - - primaryChan := getResponse(ctx, src.primary, req) - secondaryChan := getResponse(ctx, src.secondary, req) - - // If the primary source returns first, check the output and return - // it. If the secondary source wins, then wait for the primary so the - // results from the secondary can be verified. It is important that we - // never return a response from the secondary source that is good if the - // primary has a revoked status. If the secondary source wins the race and - // passes these checks, return its response instead. - select { - case <-ctx.Done(): - src.counter.WithLabelValues("timed_out").Inc() - return nil, fmt.Errorf("looking up OCSP response for serial: %s err: %w", serialString, ctx.Err()) - - case primaryResult := <-primaryChan: - src.counter.WithLabelValues("primary_result").Inc() - return primaryResult.resp, primaryResult.err - - case secondaryResult := <-secondaryChan: - // If secondary returns first, wait for primary to return for - // comparison. - var primaryResult responseResult - - // Listen for cancellation or timeout waiting for primary result. - select { - case <-ctx.Done(): - src.counter.WithLabelValues("timed_out").Inc() - return nil, fmt.Errorf("looking up OCSP response for serial: %s err: %w", serialString, ctx.Err()) - - case primaryResult = <-primaryChan: - } - - // Check for error returned from the primary lookup, return on error. - if primaryResult.err != nil { - src.counter.WithLabelValues("primary_error").Inc() - return nil, primaryResult.err - } - - // Check for error returned from the secondary lookup. If error return - // primary lookup result. - if secondaryResult.err != nil { - src.counter.WithLabelValues("secondary_error").Inc() - return primaryResult.resp, nil - } - - // If the secondary response status doesn't match primary, return - // primary response. - if secondaryResult.resp.Status != primaryResult.resp.Status { - src.counter.WithLabelValues("mismatch").Inc() - return primaryResult.resp, nil - } - - // The secondary response has passed checks, return it. - src.counter.WithLabelValues("secondary_result").Inc() - return secondaryResult.resp, nil - } -} - -type responseResult struct { - resp *Response - err error -} - -// getResponse provides a thin wrapper around an underlying Source's Response -// method, calling it in a goroutine and passing the result back on a channel. -func getResponse(ctx context.Context, src Source, req *ocsp.Request) chan responseResult { - responseChan := make(chan responseResult) - - go func() { - defer close(responseChan) - - resp, err := src.Response(ctx, req) - responseChan <- responseResult{resp, err} - }() - - return responseChan -} diff --git a/ocsp/responder/multi_source_test.go b/ocsp/responder/multi_source_test.go deleted file mode 100644 index cd87302c9c3..00000000000 --- a/ocsp/responder/multi_source_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package responder - -import ( - "context" - "errors" - "testing" - - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - "github.com/letsencrypt/boulder/test" - "golang.org/x/crypto/ocsp" -) - -type succeedSource struct { - resp *Response -} - -func (src *succeedSource) Response(context.Context, *ocsp.Request) (*Response, error) { - if src.resp != nil { - return src.resp, nil - } - // We can't just return nil, as the multiSource checks the Statuses from each - // Source to ensure they agree. - return &Response{&ocsp.Response{Status: ocsp.Good}, []byte{}}, nil -} - -type failSource struct{} - -func (src *failSource) Response(context.Context, *ocsp.Request) (*Response, error) { - return nil, errors.New("failure") -} - -func TestBothGood(t *testing.T) { - src, err := NewMultiSource(&succeedSource{}, &succeedSource{}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create multiSource") - - _, err = src.Response(context.Background(), &ocsp.Request{}) - test.AssertNotError(t, err, "unexpected error") -} - -func TestPrimaryGoodSecondaryErr(t *testing.T) { - src, err := NewMultiSource(&succeedSource{}, &failSource{}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create multiSource") - - _, err = src.Response(context.Background(), &ocsp.Request{}) - test.AssertNotError(t, err, "unexpected error") -} - -func TestPrimaryErrSecondaryGood(t *testing.T) { - src, err := NewMultiSource(&failSource{}, &succeedSource{}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create multiSource") - - _, err = src.Response(context.Background(), &ocsp.Request{}) - test.AssertError(t, err, "expected error") -} - -func TestBothErr(t *testing.T) { - src, err := NewMultiSource(&failSource{}, &failSource{}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create multiSource") - - _, err = src.Response(context.Background(), &ocsp.Request{}) - test.AssertError(t, err, "expected error") -} - -func TestBothSucceedButDisagree(t *testing.T) { - otherResp := &Response{&ocsp.Response{Status: ocsp.Revoked}, []byte{}} - src, err := NewMultiSource(&succeedSource{otherResp}, &succeedSource{}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create multiSource") - - resp, err := src.Response(context.Background(), &ocsp.Request{}) - test.AssertNotError(t, err, "unexpected error") - test.AssertEquals(t, resp.Status, ocsp.Revoked) -} - -// blockingSource doesn't return until its channel is closed. -// Use `defer close(signal)` to cause it to block until the test is done. -type blockingSource struct { - signal chan struct{} -} - -func (src *blockingSource) Response(context.Context, *ocsp.Request) (*Response, error) { - <-src.signal - return nil, nil -} - -func TestPrimaryGoodSecondaryTimeout(t *testing.T) { - signal := make(chan struct{}) - defer close(signal) - - src, err := NewMultiSource(&succeedSource{}, &blockingSource{signal}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create multiSource") - - _, err = src.Response(context.Background(), &ocsp.Request{}) - test.AssertNotError(t, err, "unexpected error") -} - -func TestPrimaryTimeoutSecondaryGood(t *testing.T) { - signal := make(chan struct{}) - defer close(signal) - - src, err := NewMultiSource(&blockingSource{signal}, &succeedSource{}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create multiSource") - - // We use cancellation instead of timeout so we don't have to wait on real time. - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - - errChan := make(chan error) - go func() { - _, err = src.Response(ctx, &ocsp.Request{}) - errChan <- err - }() - cancel() - err = <-errChan - - test.AssertError(t, err, "expected error") -} - -func TestBothTimeout(t *testing.T) { - signal := make(chan struct{}) - defer close(signal) - - src, err := NewMultiSource(&blockingSource{signal}, &blockingSource{signal}, metrics.NoopRegisterer, blog.NewMock()) - test.AssertNotError(t, err, "failed to create multiSource") - - // We use cancellation instead of timeout so we don't have to wait on real time. - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - - errChan := make(chan error) - go func() { - _, err = src.Response(ctx, &ocsp.Request{}) - errChan <- err - }() - cancel() - err = <-errChan - - test.AssertError(t, err, "expected error") -} diff --git a/ocsp/responder/redis_source.go b/ocsp/responder/redis_source.go deleted file mode 100644 index e546e5fec96..00000000000 --- a/ocsp/responder/redis_source.go +++ /dev/null @@ -1,54 +0,0 @@ -package responder - -import ( - "context" - - "github.com/letsencrypt/boulder/core" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/rocsp" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" -) - -type redisSource struct { - client *rocsp.Client - counter *prometheus.CounterVec - // Note: this logger is not currently used, as all audit log events are from - // the dbSource right now, but it should and will be used in the future. - log blog.Logger -} - -// NewRedisSource returns a dbSource which will look up OCSP responses in a -// Redis table. -func NewRedisSource(client *rocsp.Client, stats prometheus.Registerer, log blog.Logger) (*redisSource, error) { - counter := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "ocsp_redis_responses", - Help: "Count of OCSP requests/responses by action taken by the redisSource", - }, []string{"result"}) - return &redisSource{ - client: client, - counter: counter, - log: log, - }, nil -} - -// Response implements the Source interface. It looks up the requested OCSP -// response in the redis cluster. -func (src *redisSource) Response(ctx context.Context, req *ocsp.Request) (*Response, error) { - serialString := core.SerialToString(req.SerialNumber) - - respBytes, err := src.client.GetResponse(ctx, serialString) - if err != nil { - src.counter.WithLabelValues("lookup_error").Inc() - return nil, err - } - - resp, err := ocsp.ParseResponse(respBytes, nil) - if err != nil { - src.counter.WithLabelValues("parse_error").Inc() - return nil, err - } - - src.counter.WithLabelValues("success").Inc() - return &Response{Response: resp, Raw: respBytes}, nil -} diff --git a/ocsp/responder/responder.go b/ocsp/responder/responder.go deleted file mode 100644 index d458aa1c78d..00000000000 --- a/ocsp/responder/responder.go +++ /dev/null @@ -1,343 +0,0 @@ -/* -This code was originally forked from https://github.com/cloudflare/cfssl/blob/1a911ca1b1d6e899bf97dcfa4a14b38db0d31134/ocsp/responder.go - -Copyright (c) 2014 CloudFlare Inc. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -// Package responder implements an OCSP HTTP responder based on a generic -// storage backend. -package responder - -import ( - "context" - "crypto" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/honeycombio/beeline-go" - "github.com/jmhodges/clock" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" - - "github.com/letsencrypt/boulder/core" - blog "github.com/letsencrypt/boulder/log" -) - -// ErrNotFound indicates the request OCSP response was not found. It is used to -// indicate that the responder should reply with unauthorizedErrorResponse. -var ErrNotFound = errors.New("Request OCSP Response not found") - -var responseTypeToString = map[ocsp.ResponseStatus]string{ - ocsp.Success: "Success", - ocsp.Malformed: "Malformed", - ocsp.InternalError: "InternalError", - ocsp.TryLater: "TryLater", - ocsp.SignatureRequired: "SignatureRequired", - ocsp.Unauthorized: "Unauthorized", -} - -// A Responder object provides an HTTP wrapper around a Source. -type Responder struct { - Source Source - timeout time.Duration - responseTypes *prometheus.CounterVec - responseAges prometheus.Histogram - requestSizes prometheus.Histogram - clk clock.Clock - log blog.Logger -} - -// NewResponder instantiates a Responder with the give Source. -func NewResponder(source Source, timeout time.Duration, stats prometheus.Registerer, logger blog.Logger) *Responder { - requestSizes := prometheus.NewHistogram( - prometheus.HistogramOpts{ - Name: "ocsp_request_sizes", - Help: "Size of OCSP requests", - Buckets: []float64{1, 100, 200, 400, 800, 1200, 2000, 5000, 10000}, - }, - ) - stats.MustRegister(requestSizes) - - // Set up 12-hour-wide buckets, measured in seconds. - buckets := make([]float64, 14) - for i := range buckets { - buckets[i] = 43200 * float64(i) - } - responseAges := prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "ocsp_response_ages", - Help: "How old are the OCSP responses when we serve them. Must stay well below 84 hours.", - Buckets: buckets, - }) - stats.MustRegister(responseAges) - - responseTypes := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "ocsp_responses", - Help: "Number of OCSP responses returned by type", - }, - []string{"type"}, - ) - stats.MustRegister(responseTypes) - - return &Responder{ - Source: source, - timeout: timeout, - responseTypes: responseTypes, - responseAges: responseAges, - requestSizes: requestSizes, - clk: clock.New(), - log: logger, - } -} - -type logEvent struct { - IP string `json:"ip,omitempty"` - UA string `json:"ua,omitempty"` - Method string `json:"method,omitempty"` - Path string `json:"path,omitempty"` - Body string `json:"body,omitempty"` - Received time.Time `json:"received,omitempty"` - Took time.Duration `json:"took,omitempty"` - Headers http.Header `json:"headers,omitempty"` - - Serial string `json:"serial,omitempty"` - IssuerKeyHash string `json:"issuerKeyHash,omitempty"` - IssuerNameHash string `json:"issuerNameHash,omitempty"` - HashAlg string `json:"hashAlg,omitempty"` -} - -// hashToString contains mappings for the only hash functions -// x/crypto/ocsp supports -var hashToString = map[crypto.Hash]string{ - crypto.SHA1: "SHA1", - crypto.SHA256: "SHA256", - crypto.SHA384: "SHA384", - crypto.SHA512: "SHA512", -} - -// A Responder can process both GET and POST requests. The mapping from an OCSP -// request to an OCSP response is done by the Source; the Responder simply -// decodes the request, and passes back whatever response is provided by the -// source. -// The Responder will set these headers: -// Cache-Control: "max-age=(response.NextUpdate-now), public, no-transform, must-revalidate", -// Last-Modified: response.ThisUpdate, -// Expires: response.NextUpdate, -// ETag: the SHA256 hash of the response, and -// Content-Type: application/ocsp-response. -// Note: The caller must use http.StripPrefix to strip any path components -// (including '/') on GET requests. -// Do not use this responder in conjunction with http.NewServeMux, because the -// default handler will try to canonicalize path components by changing any -// strings of repeated '/' into a single '/', which will break the base64 -// encoding. -func (rs Responder) ServeHTTP(response http.ResponseWriter, request *http.Request) { - ctx := request.Context() - - if rs.timeout != 0 { - var cancel func() - ctx, cancel = context.WithTimeout(ctx, rs.timeout) - defer cancel() - } - - le := logEvent{ - IP: request.RemoteAddr, - UA: request.UserAgent(), - Method: request.Method, - Path: request.URL.Path, - Received: time.Now(), - } - beeline.AddFieldToTrace(ctx, "real_ip", request.RemoteAddr) - beeline.AddFieldToTrace(ctx, "method", request.Method) - beeline.AddFieldToTrace(ctx, "user_agent", request.UserAgent()) - beeline.AddFieldToTrace(ctx, "path", request.URL.Path) - defer func() { - le.Headers = response.Header() - le.Took = time.Since(le.Received) - jb, err := json.Marshal(le) - if err != nil { - // we log this error at the debug level as if we aren't at that level anyway - // we shouldn't really care about marshalling the log event object - rs.log.Debugf("failed to marshal log event object: %s", err) - return - } - rs.log.Debugf("Received request: %s", string(jb)) - }() - // By default we set a 'max-age=0, no-cache' Cache-Control header, this - // is only returned to the client if a valid authorized OCSP response - // is not found or an error is returned. If a response if found the header - // will be altered to contain the proper max-age and modifiers. - response.Header().Add("Cache-Control", "max-age=0, no-cache") - // Read response from request - var requestBody []byte - var err error - switch request.Method { - case "GET": - base64Request, err := url.QueryUnescape(request.URL.Path) - if err != nil { - rs.log.Debugf("Error decoding URL: %s", request.URL.Path) - rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Malformed]}).Inc() - response.WriteHeader(http.StatusBadRequest) - return - } - // url.QueryUnescape not only unescapes %2B escaping, but it additionally - // turns the resulting '+' into a space, which makes base64 decoding fail. - // So we go back afterwards and turn ' ' back into '+'. This means we - // accept some malformed input that includes ' ' or %20, but that's fine. - base64RequestBytes := []byte(base64Request) - for i := range base64RequestBytes { - if base64RequestBytes[i] == ' ' { - base64RequestBytes[i] = '+' - } - } - // In certain situations a UA may construct a request that has a double - // slash between the host name and the base64 request body due to naively - // constructing the request URL. In that case strip the leading slash - // so that we can still decode the request. - if len(base64RequestBytes) > 0 && base64RequestBytes[0] == '/' { - base64RequestBytes = base64RequestBytes[1:] - } - requestBody, err = base64.StdEncoding.DecodeString(string(base64RequestBytes)) - if err != nil { - rs.log.Debugf("Error decoding base64 from URL: %s", string(base64RequestBytes)) - response.WriteHeader(http.StatusBadRequest) - rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Malformed]}).Inc() - return - } - case "POST": - requestBody, err = ioutil.ReadAll(http.MaxBytesReader(nil, request.Body, 10000)) - if err != nil { - rs.log.Errf("Problem reading body of POST: %s", err) - response.WriteHeader(http.StatusBadRequest) - rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Malformed]}).Inc() - return - } - rs.requestSizes.Observe(float64(len(requestBody))) - default: - response.WriteHeader(http.StatusMethodNotAllowed) - return - } - b64Body := base64.StdEncoding.EncodeToString(requestBody) - rs.log.Debugf("Received OCSP request: %s", b64Body) - if request.Method == http.MethodPost { - le.Body = b64Body - } - - // All responses after this point will be OCSP. - // We could check for the content type of the request, but that - // seems unnecessariliy restrictive. - response.Header().Add("Content-Type", "application/ocsp-response") - - // Parse response as an OCSP request - // XXX: This fails if the request contains the nonce extension. - // We don't intend to support nonces anyway, but maybe we - // should return unauthorizedRequest instead of malformed. - ocspRequest, err := ocsp.ParseRequest(requestBody) - if err != nil { - rs.log.Debugf("Error decoding request body: %s", b64Body) - response.WriteHeader(http.StatusBadRequest) - response.Write(ocsp.MalformedRequestErrorResponse) - rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Malformed]}).Inc() - return - } - le.Serial = fmt.Sprintf("%x", ocspRequest.SerialNumber.Bytes()) - beeline.AddFieldToTrace(ctx, "request.serial", core.SerialToString(ocspRequest.SerialNumber)) - le.IssuerKeyHash = fmt.Sprintf("%x", ocspRequest.IssuerKeyHash) - beeline.AddFieldToTrace(ctx, "ocsp.issuer_key_hash", ocspRequest.IssuerKeyHash) - le.IssuerNameHash = fmt.Sprintf("%x", ocspRequest.IssuerNameHash) - beeline.AddFieldToTrace(ctx, "ocsp.issuer_name_hash", ocspRequest.IssuerNameHash) - le.HashAlg = hashToString[ocspRequest.HashAlgorithm] - beeline.AddFieldToTrace(ctx, "ocsp.hash_alg", hashToString[ocspRequest.HashAlgorithm]) - - // Look up OCSP response from source - ocspResponse, err := rs.Source.Response(ctx, ocspRequest) - if err != nil { - if errors.Is(err, ErrNotFound) { - rs.log.Infof("No response found for request: serial %x, request body %s", - ocspRequest.SerialNumber, b64Body) - response.Write(ocsp.UnauthorizedErrorResponse) - rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Unauthorized]}).Inc() - return - } - rs.log.Infof("Error retrieving response for request: serial %x, request body %s, error: %s", - ocspRequest.SerialNumber, b64Body, err) - response.WriteHeader(http.StatusInternalServerError) - response.Write(ocsp.InternalErrorErrorResponse) - rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.InternalError]}).Inc() - return - } - - // Write OCSP response - response.Header().Add("Last-Modified", ocspResponse.ThisUpdate.Format(time.RFC1123)) - response.Header().Add("Expires", ocspResponse.NextUpdate.Format(time.RFC1123)) - now := rs.clk.Now() - maxAge := 0 - if now.Before(ocspResponse.NextUpdate) { - maxAge = int(ocspResponse.NextUpdate.Sub(now) / time.Second) - } else { - // TODO(#530): we want max-age=0 but this is technically an authorized OCSP response - // (despite being stale) and 5019 forbids attaching no-cache - maxAge = 0 - } - response.Header().Set( - "Cache-Control", - fmt.Sprintf( - "max-age=%d, public, no-transform, must-revalidate", - maxAge, - ), - ) - responseHash := sha256.Sum256(ocspResponse.Raw) - response.Header().Add("ETag", fmt.Sprintf("\"%X\"", responseHash)) - - serialString := core.SerialToString(ocspResponse.SerialNumber) - if len(serialString) > 2 { - // Set a cache tag that is equal to the last two bytes of the serial. - // We expect that to be randomly distributed, so each tag should map to - // about 1/256 of our responses. - response.Header().Add("Edge-Cache-Tag", serialString[len(serialString)-2:]) - } - - // RFC 7232 says that a 304 response must contain the above - // headers if they would also be sent for a 200 for the same - // request, so we have to wait until here to do this - if etag := request.Header.Get("If-None-Match"); etag != "" { - if etag == fmt.Sprintf("\"%X\"", responseHash) { - response.WriteHeader(http.StatusNotModified) - return - } - } - response.WriteHeader(http.StatusOK) - response.Write(ocspResponse.Raw) - rs.responseAges.Observe(rs.clk.Now().Sub(ocspResponse.ThisUpdate).Seconds()) - rs.responseTypes.With(prometheus.Labels{"type": responseTypeToString[ocsp.Success]}).Inc() -} diff --git a/ocsp/responder/responder_test.go b/ocsp/responder/responder_test.go deleted file mode 100644 index 08392e58eb2..00000000000 --- a/ocsp/responder/responder_test.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -This code was originally forked from https://github.com/cloudflare/cfssl/blob/1a911ca1b1d6e899bf97dcfa4a14b38db0d31134/ocsp/responder_test.go - -Copyright (c) 2014 CloudFlare Inc. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -package responder - -import ( - "bytes" - "context" - "encoding/hex" - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - "time" - - "github.com/jmhodges/clock" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" - - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/test" -) - -const ( - responseFile = "testdata/resp64.pem" - binResponseFile = "testdata/response.der" - brokenResponseFile = "testdata/response_broken.pem" - mixResponseFile = "testdata/response_mix.pem" -) - -type testSource struct{} - -func (ts testSource) Response(_ context.Context, r *ocsp.Request) (*Response, error) { - respBytes, err := hex.DecodeString("3082031D0A0100A08203163082031206092B060105050730010104820303308202FF3081E8A1453043310B300906035504061302555331123010060355040A1309676F6F6420677579733120301E06035504031317434120696E7465726D6564696174652028525341292041180F32303230303631393030333730305A30818D30818A304C300906052B0E03021A0500041417779CF67D84CD4449A2FC7EAC431F9823D8575A04149F2970E80CF9C75ECC1F2871D8C390CD19F40108021300FF8B2AEC5293C6B31D0BC0BA329CF594E7BAA116180F32303230303631393030333733305AA0030A0101180F32303230303631393030303030305AA011180F32303230303632333030303030305A300D06092A864886F70D01010B0500038202010011688303203098FC522D2C599A234B136930E3C4680F2F3192188B98D6EE90E8479449968C51335FADD1636584ACEA9D01A30790BD90190FA35A47E793718128B19E9ED156382C1B68245A6887F547B0B86C44C2354B8DBA94D8BFCAA768EB55FA84AEB4026DBEFC687DB280D21C0B3497A11909804A20F402BDD95E4843C02E30435C2570FFC4EB152FE2785B8D268AC996619644AEC9CF50959D46DEB21DFE96B4D2881D61ABBCA9B6BFEC2DB9132801CAE737C862F0AEAB4948B63F35740CE93FCDBC148F5070790D7BBA1A87E15078CD8335F83686142CE8AC3AD21FAE45B87A7B12562D9F245352A83E3901E97E5EC77E9817990712D8BE60860ABA58804DDE4ECDCA6AEFD3D8764FDBABF0AB1902FA9A7C4C3F5814C25C5E78E0754469E087CAED81E50A5873CADFCAC42963AB38CFD11096BE4201DE4589B57EC48B3DA05A65800D654160E022F6748CD93B431A17270C1B27E313734FCF85F22547D060F23F594BD68C6330C2705190A04905FBD2389E2DD21C0188809E03D713F56BF95953C9897DA6D4D074D70F164270C41BFB386B69E86EB3B9192FEA8F43CE5368CC9AF8687DEE567672A8580BA6A9F76E6E6705DD2F76F48C2C180C763CF4C48AF78C25D40EA7278CB2FBC78958B3179301825B420A7CAE7ACE4C41B5BA7D567AABC9C2701EE75A28F9181E044EDAAA55A31538AA9C526D4C324B9AE58D2922") - if err != nil { - return nil, err - } - resp, err := ocsp.ParseResponse(respBytes, nil) - if err != nil { - return nil, err - } - return &Response{resp, respBytes}, nil -} - -type testCase struct { - method, path string - expected int -} - -func TestOCSP(t *testing.T) { - cases := []testCase{ - {"OPTIONS", "/", http.StatusMethodNotAllowed}, - {"GET", "/", http.StatusBadRequest}, - // Bad URL encoding - {"GET", "%ZZFQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", http.StatusBadRequest}, - // Bad URL encoding - {"GET", "%%FQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", http.StatusBadRequest}, - // Bad base64 encoding - {"GET", "==MFQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", http.StatusBadRequest}, - // Bad OCSP DER encoding - {"GET", "AAAMFQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", http.StatusBadRequest}, - // Good encoding all around, including a double slash - {"GET", "MFQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", http.StatusOK}, - // Good request, leading slash - {"GET", "/MFQwUjBQME4wTDAJBgUrDgMCGgUABBQ55F6w46hhx%2Fo6OXOHa%2BYfe32YhgQU%2B3hPEvlgFYMsnxd%2FNBmzLjbqQYkCEwD6Wh0MaVKu9gJ3By9DI%2F%2Fxsd4%3D", http.StatusOK}, - } - - responder := Responder{ - Source: testSource{}, - responseTypes: prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "ocspResponses-test", - }, - []string{"type"}, - ), - responseAges: prometheus.NewHistogram( - prometheus.HistogramOpts{ - Name: "ocspAges-test", - Buckets: []float64{43200}, - }, - ), - clk: clock.NewFake(), - log: blog.NewMock(), - } - - for _, tc := range cases { - t.Run(fmt.Sprintf("%s %s", tc.method, tc.path), func(t *testing.T) { - rw := httptest.NewRecorder() - responder.responseTypes.Reset() - - responder.ServeHTTP(rw, &http.Request{ - Method: tc.method, - URL: &url.URL{ - Path: tc.path, - }, - }) - if rw.Code != tc.expected { - t.Errorf("Incorrect response code: got %d, wanted %d", rw.Code, tc.expected) - } - if rw.Code == http.StatusOK { - test.AssertMetricWithLabelsEquals( - t, responder.responseTypes, prometheus.Labels{"type": "Success"}, 1) - } else if rw.Code == http.StatusBadRequest { - test.AssertMetricWithLabelsEquals( - t, responder.responseTypes, prometheus.Labels{"type": "Malformed"}, 1) - } - }) - } - // Exactly two of the cases above result in an OCSP response being sent. - test.AssertMetricWithLabelsEquals(t, responder.responseAges, prometheus.Labels{}, 2) -} - -func TestRequestTooBig(t *testing.T) { - responder := Responder{ - Source: testSource{}, - responseTypes: prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "ocspResponses-test", - }, - []string{"type"}, - ), - responseAges: prometheus.NewHistogram( - prometheus.HistogramOpts{ - Name: "ocspAges-test", - Buckets: []float64{43200}, - }, - ), - clk: clock.NewFake(), - log: blog.NewMock(), - } - - rw := httptest.NewRecorder() - - responder.ServeHTTP(rw, httptest.NewRequest("POST", "/", - bytes.NewBuffer([]byte(strings.Repeat("a", 10001))))) - expected := 400 - if rw.Code != expected { - t.Errorf("Incorrect response code: got %d, wanted %d", rw.Code, expected) - } -} - -func TestCacheHeaders(t *testing.T) { - source, err := NewMemorySourceFromFile(responseFile, blog.NewMock()) - if err != nil { - t.Fatalf("Error constructing source: %s", err) - } - - fc := clock.NewFake() - fc.Set(time.Date(2015, 11, 12, 0, 0, 0, 0, time.UTC)) - responder := Responder{ - Source: source, - responseTypes: prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "ocspResponses-test", - }, - []string{"type"}, - ), - responseAges: prometheus.NewHistogram( - prometheus.HistogramOpts{ - Name: "ocspAges-test", - Buckets: []float64{43200}, - }, - ), - clk: fc, - log: blog.NewMock(), - } - - rw := httptest.NewRecorder() - responder.ServeHTTP(rw, &http.Request{ - Method: "GET", - URL: &url.URL{ - Path: "MEMwQTA/MD0wOzAJBgUrDgMCGgUABBSwLsMRhyg1dJUwnXWk++D57lvgagQU6aQ/7p6l5vLV13lgPJOmLiSOl6oCAhJN", - }, - }) - if rw.Code != http.StatusOK { - t.Errorf("Unexpected HTTP status code %d", rw.Code) - } - testCases := []struct { - header string - value string - }{ - {"Last-Modified", "Tue, 20 Oct 2015 00:00:00 UTC"}, - {"Expires", "Sun, 20 Oct 2030 00:00:00 UTC"}, - {"Cache-Control", "max-age=471398400, public, no-transform, must-revalidate"}, - {"Etag", "\"8169FB0843B081A76E9F6F13FD70C8411597BEACF8B182136FFDD19FBD26140A\""}, - } - for _, tc := range testCases { - headers, ok := rw.Result().Header[tc.header] - if !ok { - t.Errorf("Header %s missing from HTTP response", tc.header) - continue - } - if len(headers) != 1 { - t.Errorf("Wrong number of headers in HTTP response. Wanted 1, got %d", len(headers)) - continue - } - actual := headers[0] - if actual != tc.value { - t.Errorf("Got header %s: %s. Expected %s", tc.header, actual, tc.value) - } - } - - rw = httptest.NewRecorder() - headers := http.Header{} - headers.Add("If-None-Match", "\"8169FB0843B081A76E9F6F13FD70C8411597BEACF8B182136FFDD19FBD26140A\"") - responder.ServeHTTP(rw, &http.Request{ - Method: "GET", - URL: &url.URL{ - Path: "MEMwQTA/MD0wOzAJBgUrDgMCGgUABBSwLsMRhyg1dJUwnXWk++D57lvgagQU6aQ/7p6l5vLV13lgPJOmLiSOl6oCAhJN", - }, - Header: headers, - }) - if rw.Code != http.StatusNotModified { - t.Fatalf("Got wrong status code: expected %d, got %d", http.StatusNotModified, rw.Code) - } -} - -func TestNewSourceFromFile(t *testing.T) { - logger := blog.NewMock() - _, err := NewMemorySourceFromFile("", logger) - if err == nil { - t.Fatal("Didn't fail on non-file input") - } - - // expected case - _, err = NewMemorySourceFromFile(responseFile, logger) - if err != nil { - t.Fatal(err) - } - - // binary-formatted file - _, err = NewMemorySourceFromFile(binResponseFile, logger) - if err != nil { - t.Fatal(err) - } - - // the response file from before, with stuff deleted - _, err = NewMemorySourceFromFile(brokenResponseFile, logger) - if err != nil { - t.Fatal(err) - } - - // mix of a correct and malformed responses - _, err = NewMemorySourceFromFile(mixResponseFile, logger) - if err != nil { - t.Fatal(err) - } -} diff --git a/ocsp/responder/source.go b/ocsp/responder/source.go deleted file mode 100644 index d0c39ae8f65..00000000000 --- a/ocsp/responder/source.go +++ /dev/null @@ -1,20 +0,0 @@ -package responder - -import ( - "context" - - "golang.org/x/crypto/ocsp" -) - -// Response is a wrapper around the standard library's *ocsp.Response, but it -// also carries with it the raw bytes of the encoded response. -type Response struct { - *ocsp.Response - Raw []byte -} - -// Source represents the logical source of OCSP responses, i.e., -// the logic that actually chooses a response based on a request. -type Source interface { - Response(context.Context, *ocsp.Request) (*Response, error) -} diff --git a/ocsp/responder/testdata/LICENSE b/ocsp/responder/testdata/LICENSE deleted file mode 100644 index ed930287561..00000000000 --- a/ocsp/responder/testdata/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -These files were originally taken from https://github.com/cloudflare/cfssl/tree/1a911ca1b1d6e899bf97dcfa4a14b38db0d31134/ocsp/testdata - -Copyright (c) 2014 CloudFlare Inc. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/ocsp/responder/testdata/ocsp.req b/ocsp/responder/testdata/ocsp.req deleted file mode 100644 index 5878715020d..00000000000 Binary files a/ocsp/responder/testdata/ocsp.req and /dev/null differ diff --git a/ocsp/responder/testdata/ocsp.resp b/ocsp/responder/testdata/ocsp.resp deleted file mode 100644 index a35f0bb9fb8..00000000000 Binary files a/ocsp/responder/testdata/ocsp.resp and /dev/null differ diff --git a/ocsp/responder/testdata/resp64.pem b/ocsp/responder/testdata/resp64.pem deleted file mode 100644 index dea2591d58b..00000000000 --- a/ocsp/responder/testdata/resp64.pem +++ /dev/null @@ -1,2 +0,0 @@ -MIIFCAoBAKCCBQEwggT9BgkrBgEFBQcwAQEEggTuMIIE6jCBrKADAgEAoS0wKzEpMCcGA1UEAwwgY2Fja2xpbmcgY3J5cHRvZ3JhcGhlciBmYWtlIFJPT1QYDzIwMTUxMDIxMjEyNjAwWjBlMGMwOzAJBgUrDgMCGgUABBSwLsMRhyg1dJUwnXWk++D57lvgagQU6aQ/7p6l5vLV13lgPJOmLiSOl6oCAhJNgAAYDzIwMTUwOTAxMDAwMDAwWqARGA8yMDE0MDEwMTAwMDAwMFowDQYJKoZIhvcNAQELBQADggEBAHlFcNKa7mZDJeWzJt1S45kx4gDqOLzyeZzflFbSjsrHRrLA7Y3RKoy0i4Y9Vi6Jfhe7xj6dgDMJy1Z1qayI/Q8QvnaU6V2kFcnaD7pah9uALu2xNYMJPllq8KsQYvDLa1E2PMvQTqDhY2/QrIuxw3jkqtzeI5aG0idFm3aF1z/v3dt6XPWjE8IlAJfXY4CeUorLvA+mK2YHJ3V7MSgymVXZdyth1rg0/0cP9v77Rlb8hmWA/EUMcIPKQqErVQK+gZiVC0SfElaMO25CD9cjY+fd904oC5+ahvhHXxOSEbXVZBT1FY2teFCKEpx86gAVcZWpGmVwJO+dpsrkgwpN786gggMjMIIDHzCCAxswggIDoAMCAQICCQDNMc/iNkPNdTANBgkqhkiG9w0BAQsFADArMSkwJwYDVQQDDCBjYWNrbGluZyBjcnlwdG9ncmFwaGVyIGZha2UgUk9PVDAeFw0xNTEwMjEyMDExNTJaFw0yMDEwMTkyMDExNTJaMCsxKTAnBgNVBAMMIGNhY2tsaW5nIGNyeXB0b2dyYXBoZXIgZmFrZSBST09UMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+TbvalHXQYO6GhJUJZI5mF2k4+nZDIvqWyrjw+2k9+UAcekuLKPpSclu9aBRvUggw3XFHAW95qW6Dv2+5gvinUmTq9Ry7kVTUYAxyZu1ydHt+wDETmFJfeY6/fpBHHIsuGLItqpUGmr8D6LROGEqfFY2B9+08O7Zs+FufDRgLHWEvLTdpPkrzeDJs9Oo6g38jfT9b4+9Ahs+FvvwqneAkbeZgBC2NWKB+drMuNBTPbF/W1a8czAzHeOs6qy0dBlTHNjL62/o9cRKNiKe3IqwHJdd01V1aLSUgIbe2HrP9EC1djnUXWR3jx3ursaKt7PTKsC52UJkRqnai80MzQj0WwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU6aQ/7p6l5vLV13lgPJOmLiSOl6owDQYJKoZIhvcNAQELBQADggEBACuwILDTvaBrdorv2zMsYnZuKvXtknWAf/DTcvF4N5PMOPBNkeHuGfv0VDe6VXpBHiU5G9E2RdU435W7o0kRSn27YcqrxaXGt9m2kArW6e49136+MnFx47jjk0p4T48s6MeaL5JVLJzxYouu1ZOZqlVokwNPO+8bxn6ALumIVUOD1jSBN7Y9pgLUS2rzO5pe5pxS2Ak/eO7Q7M21r1sEuG/uPuWqBFogk+4Z9omKVZdRDbzm9vYUATgEZdlTe2tct3BVBQ2zWbe0R2svIuCs8XzERykvfv1JawxI68I9vN0Dh9vj/xDM6udorfALlhjgQdftmbHovRLpJ1ZSOMIUNGY= -MIIFCAoBAKCCBQEwggT9BgkrBgEFBQcwAQEEggTuMIIE6jCBrKADAgEAoS0wKzEpMCcGA1UEAwwgY2Fja2xpbmcgY3J5cHRvZ3JhcGhlciBmYWtlIFJPT1QYDzIwMTUxMDIxMjA1NTAwWjBlMGMwOzAJBgUrDgMCGgUABBSwLsMRhyg1dJUwnXWk++D57lvgagQU6aQ/7p6l5vLV13lgPJOmLiSOl6oCAhJNgAAYDzIwMTUxMDIwMDAwMDAwWqARGA8yMDMwMTAyMDAwMDAwMFowDQYJKoZIhvcNAQELBQADggEBAFgnZ/Ft1LTDYPwPlecOtLykgwS4HZTelUaSi841nq/tgfLM11G3D1AUXAT2V2jxiG+0YTxzkWd5v44KJGB9Mm+qjafPMKR3ULjQkJHJ8goFHpWkUtLrIYurj8N+4HpwZ+RJccieuZIX8SMeSWRq5w83okWZPGoUrl6GRdQDteE7imrNkBa35zrzUWozPqY8k90ttKfhZHRXNCJe8YbVfJRDh0vVZABzlfHeW8V+ie15HPVDx/M341KC3tBMM88e5/bt3sLyUU8SwxGH5nOe/ohVpjhkjk2Pz4TPdwD2ZK5Auc09VBfivdLYRE84BMhd8/yOEt53VWGPIMxWUVtrUyegggMjMIIDHzCCAxswggIDoAMCAQICCQDNMc/iNkPNdTANBgkqhkiG9w0BAQsFADArMSkwJwYDVQQDDCBjYWNrbGluZyBjcnlwdG9ncmFwaGVyIGZha2UgUk9PVDAeFw0xNTEwMjEyMDExNTJaFw0yMDEwMTkyMDExNTJaMCsxKTAnBgNVBAMMIGNhY2tsaW5nIGNyeXB0b2dyYXBoZXIgZmFrZSBST09UMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+TbvalHXQYO6GhJUJZI5mF2k4+nZDIvqWyrjw+2k9+UAcekuLKPpSclu9aBRvUggw3XFHAW95qW6Dv2+5gvinUmTq9Ry7kVTUYAxyZu1ydHt+wDETmFJfeY6/fpBHHIsuGLItqpUGmr8D6LROGEqfFY2B9+08O7Zs+FufDRgLHWEvLTdpPkrzeDJs9Oo6g38jfT9b4+9Ahs+FvvwqneAkbeZgBC2NWKB+drMuNBTPbF/W1a8czAzHeOs6qy0dBlTHNjL62/o9cRKNiKe3IqwHJdd01V1aLSUgIbe2HrP9EC1djnUXWR3jx3ursaKt7PTKsC52UJkRqnai80MzQj0WwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU6aQ/7p6l5vLV13lgPJOmLiSOl6owDQYJKoZIhvcNAQELBQADggEBACuwILDTvaBrdorv2zMsYnZuKvXtknWAf/DTcvF4N5PMOPBNkeHuGfv0VDe6VXpBHiU5G9E2RdU435W7o0kRSn27YcqrxaXGt9m2kArW6e49136+MnFx47jjk0p4T48s6MeaL5JVLJzxYouu1ZOZqlVokwNPO+8bxn6ALumIVUOD1jSBN7Y9pgLUS2rzO5pe5pxS2Ak/eO7Q7M21r1sEuG/uPuWqBFogk+4Z9omKVZdRDbzm9vYUATgEZdlTe2tct3BVBQ2zWbe0R2svIuCs8XzERykvfv1JawxI68I9vN0Dh9vj/xDM6udorfALlhjgQdftmbHovRLpJ1ZSOMIUNGY= diff --git a/ocsp/responder/testdata/response.der b/ocsp/responder/testdata/response.der deleted file mode 100644 index bd43e37bfd1..00000000000 Binary files a/ocsp/responder/testdata/response.der and /dev/null differ diff --git a/ocsp/responder/testdata/response_broken.pem b/ocsp/responder/testdata/response_broken.pem deleted file mode 100644 index 29a64c66661..00000000000 --- a/ocsp/responder/testdata/response_broken.pem +++ /dev/null @@ -1 +0,0 @@ -MIICGAoBAKCCAhEwggINBgkrBgEFBQcwAQEEggH+OZ4ZSKS2J85Kr9UaI2LAEFKvOM8/hjk8uyp7KnqJ12h8GOhGZAgIBdaADAQH/GA8wMDAxMDEwMTAwMDAwMFqgERgPMDAwMTAxMDEwMDAwMDBaMA0GCSqGSIb3DQEBCwUAA4IBAQCBGs+8UNwUdkEBladnajZIV+sHtmao/mMTIvpyPqnmV2Ab9KfNWlSDSDuMtZYKS4VsEwtbZ+4kKWI8DugE6egjP3o64R7VP2aqrh41IORwccLGVsexILBpxg4h602JbhXM0sxgXoh5WAt9f1oy6PsHAt/XAuJGSo7yMNv3nHKNFwjExmZt21sNLYlWlljjtX92rlo/mBTWKO0js4YRNyeNQhchARbn9oL18jW0yAVqB9a8rees+EippbTfoktFf0cIhnmkiknPZSZ+dN2qHkxiXIujWlymZzUZcqRTNtrmmhlOdt35QSg7Vw8eyw2rl8ZU94zaI5DPWn1QYn0dk7l9 \ No newline at end of file diff --git a/ocsp/responder/testdata/response_mix.pem b/ocsp/responder/testdata/response_mix.pem deleted file mode 100644 index 43249fb0aeb..00000000000 Binary files a/ocsp/responder/testdata/response_mix.pem and /dev/null differ diff --git a/ocsp/responder/testdata/test-ca.der.pem b/ocsp/responder/testdata/test-ca.der.pem deleted file mode 100644 index 760417fe943..00000000000 --- a/ocsp/responder/testdata/test-ca.der.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDETCCAfmgAwIBAgIJAJzxkS6o1QkIMA0GCSqGSIb3DQEBCwUAMB8xHTAbBgNV -BAMMFGhhcHB5IGhhY2tlciBmYWtlIENBMB4XDTE1MDQwNzIzNTAzOFoXDTI1MDQw -NDIzNTAzOFowHzEdMBsGA1UEAwwUaGFwcHkgaGFja2VyIGZha2UgQ0EwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDCCkd5mgXFErJ3F2M0E9dw+Ta/md5i -8TDId01HberAApqmydG7UZYF3zLTSzNjlNSOmtybvrSGUnZ9r9tSQcL8VM6WUOM8 -tnIpiIjEA2QkBycMwvRmZ/B2ltPdYs/R9BqNwO1g18GDZrHSzUYtNKNeFI6Glamj -7GK2Vr0SmiEamlNIR5ktAFsEErzf/d4jCF7sosMsJpMCm1p58QkP4LHLShVLXDa8 -BMfVoI+ipYcA08iNUFkgW8VWDclIDxcysa0psDDtMjX3+4aPkE/cefmP+1xOfUuD -HOGV8XFynsP4EpTfVOZr0/g9gYQ7ZArqXX7GTQkFqduwPm/w5qxSPTarAgMBAAGj -UDBOMB0GA1UdDgQWBBT7eE8S+WAVgyyfF380GbMuNupBiTAfBgNVHSMEGDAWgBT7 -eE8S+WAVgyyfF380GbMuNupBiTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUA -A4IBAQAd9Da+Zv+TjMv7NTAmliqnWHY6d3UxEZN3hFEJ58IQVHbBZVZdW7zhRktB -vR05Kweac0HJeK91TKmzvXl21IXLvh0gcNLU/uweD3no/snfdB4OoFompljThmgl -zBqiqWoKBJQrLCA8w5UB+ReomRYd/EYXF/6TAfzm6hr//Xt5mPiUHPdvYt75lMAo -vRxLSbF8TSQ6b7BYxISWjPgFASNNqJNHEItWsmQMtAjjwzb9cs01XH9pChVAWn9L -oeMKa+SlHSYrWG93+EcrIH/dGU76uNOiaDzBSKvaehG53h25MHuO1anNICJvZovW -rFo4Uv1EnkKJm3vJFe50eJGhEKlx ------END CERTIFICATE----- diff --git a/ocsp/updater/testdata/test-cert-b.pem b/ocsp/updater/testdata/test-cert-b.pem deleted file mode 100644 index 3ad010fb188..00000000000 --- a/ocsp/updater/testdata/test-cert-b.pem +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEQzCCAyugAwIBAgITAP945y6GOlWTJzEwwYCxxvyiNDANBgkqhkiG9w0BAQsF -ADAfMR0wGwYDVQQDDBRoMnBweSBoMmNrZXIgZmFrZSBDQTAeFw0xODA1MjQxNzQ5 -MThaFw0yMDEwMTgxNzQ5MThaMBAxDjAMBgNVBAMTBXMuY29tMIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtmgMPWXMet57pY8Usb9sVOsg7A3hpWch7VAE -LlyBw/g7SXJt6arRn2w4UChS/oIw6s+VI9YO8AeKaBN4jBG9VF7q5vYGrOkHxEye -ZVh/cjuRzMf8siexyjRrRDrqqZmg4/t3O/FiC5qrHabKoEfERGosXUq7mwm/XuW7 -GAUovbrY9CjQYu+4yWqRlTSlcnSdAY4EYfNE1akowAsLi1iCfkasmy0PlXEenskh -GkU6mYeRubeqVzIVp28MjqXd2zQ5ybzdNNG4OUURu5z4ZeuAAvbZYHGNt27kqQmW -GlBnqewrhRAVFWIxiSC4Xb1AgQdgoQD0O9mwW7kDfLTKn4UyyQIDAQABo4IBhTCC -AYEwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD -AjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBQHimy1vewsMOONppMya/B90IYUbjAf -BgNVHSMEGDAWgBT7eE8S+WAVgyyfF380GbMuNupBiTBkBggrBgEFBQcBAQRYMFYw -IgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6NDAwMi8wMAYIKwYBBQUHMAKG -JGh0dHA6Ly9ib3VsZGVyOjQ0MzAvYWNtZS9pc3N1ZXItY2VydDAQBgNVHREECTAH -ggVzLmNvbTAnBgNVHR8EIDAeMBygGqAYhhZodHRwOi8vZXhhbXBsZS5jb20vY3Js -MGEGA1UdIARaMFgwCAYGZ4EMAQIBMEwGAyoDBDBFMCIGCCsGAQUFBwIBFhZodHRw -Oi8vZXhhbXBsZS5jb20vY3BzMB8GCCsGAQUFBwICMBMMEURvIFdoYXQgVGhvdSBX -aWx0MA0GCSqGSIb3DQEBCwUAA4IBAQCSdKzs3Rav+P+8fa7x6SW4OU6NTVBdPF8O -+tQChZuVOBB6kBE9Zi04urKJ/qoK0N+QFukRjO+O1dLs2eGfQL07cFUfMclopVna -cHoTtOzuWvMHn4lgx+QeobuNgc/pBnFelGyFp2M3RRgdYZk/JVAF3OBtkSbgIGCS -1/8iO4qhW4OffJGTvqb+YWIPen2lWX7mNXNZYblOlqJXEanrBRjED4rFr0tgtQdz -YAjEoM9sGHFLy9PAcs4qP5tYfqD4B3sJ0hjSHn4+mcBHtQpkT6/fsqP9fTz3u0q7 -v7CcXf5XYaU1On5y0wYNgg6RsFOoOvsqRacP2v8iT4QULxfGJ3yN ------END CERTIFICATE----- diff --git a/ocsp/updater/testdata/test-cert.pem b/ocsp/updater/testdata/test-cert.pem deleted file mode 100644 index 3e603cb111a..00000000000 --- a/ocsp/updater/testdata/test-cert.pem +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEQzCCAyugAwIBAgITAP9qPF6Ypd4636BCjXm7LRhxCzANBgkqhkiG9w0BAQsF -ADAfMR0wGwYDVQQDDBRoMnBweSBoMmNrZXIgZmFrZSBDQTAeFw0xODA1MjQxNzQ4 -MDRaFw0yMDEwMTgxNzQ4MDRaMBAxDjAMBgNVBAMTBWsuY29tMIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEAso7yw8mzhIRyHb4HIvdF1oAz1+58trJcw6ig -bmYSauDbjVWFosJ0PiL4obTWNftmGxYDqRR3ssjlSUaSos+hHaI30qIT2pauQKc/ -oKxJqlWBKxeIco0DJ7SUieuKVIUWbqV9QJGlPzLoGPzUCQEyYCCE+GZv7IbbOLDc -RfZLidkb3nTVkogmWhin2WNlicHxdrSaAsSj/Sw+UIPeLnDbrFO+aMivvJdjkoct -F4iLSRsFxk+jadBWJlZo0GcPoGaCe6eZiYxDupTmioIqdwScbwUTFtv0kQsuGSsE -UA3fvCl9hQ/fQfsQB987o+o+iINVMpNOUptMBxdK2PGul5zuMwIDAQABo4IBhTCC -AYEwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD -AjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBS7G5+r83EGZokhTAwcy9XufuLJxjAf -BgNVHSMEGDAWgBT7eE8S+WAVgyyfF380GbMuNupBiTBkBggrBgEFBQcBAQRYMFYw -IgYIKwYBBQUHMAGGFmh0dHA6Ly8xMjcuMC4wLjE6NDAwMi8wMAYIKwYBBQUHMAKG -JGh0dHA6Ly9ib3VsZGVyOjQ0MzAvYWNtZS9pc3N1ZXItY2VydDAQBgNVHREECTAH -ggVrLmNvbTAnBgNVHR8EIDAeMBygGqAYhhZodHRwOi8vZXhhbXBsZS5jb20vY3Js -MGEGA1UdIARaMFgwCAYGZ4EMAQIBMEwGAyoDBDBFMCIGCCsGAQUFBwIBFhZodHRw -Oi8vZXhhbXBsZS5jb20vY3BzMB8GCCsGAQUFBwICMBMMEURvIFdoYXQgVGhvdSBX -aWx0MA0GCSqGSIb3DQEBCwUAA4IBAQA2IQr6zV+ptlO+6wXjVctBRpgbrwDZA+kn -dnCCYYTfyPkPGk4pCzC3qPNB0Hat9CR75TBCEYh0QBRIENPyVJyAFln5Kc4tzmC7 -9oX8n+MbaAh26yUcTp9t4ngpVOkrhCQYi6raFv/rE8LP52+p7YazaoYSL4LYqv9L -/nYPx70fXz5/D1r0+Kdd/mrznOROUpGxzIo+VJalv2DeGwLDtXsbA0YdTZRJJ2bE -pb2s63UWaoDUlsYTm6oeAAJYIxpXR2E3B9PKGLsCuQxvDgmpHD3oTg1Yd5Tv18qc -xXpiKJvw9iBVyYj8ncNb8vn1VzLWMdYPBt7rOV91pon3yUcx0pGy ------END CERTIFICATE----- diff --git a/ocsp/updater/updater.go b/ocsp/updater/updater.go deleted file mode 100644 index 925ad9d5b86..00000000000 --- a/ocsp/updater/updater.go +++ /dev/null @@ -1,527 +0,0 @@ -package updater - -import ( - "context" - "database/sql" - "errors" - "fmt" - "strings" - "sync" - "time" - - "github.com/jmhodges/clock" - "github.com/prometheus/client_golang/prometheus" - - capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/core" - blog "github.com/letsencrypt/boulder/log" - rocsp_config "github.com/letsencrypt/boulder/rocsp/config" - "github.com/letsencrypt/boulder/sa" -) - -// ocspDB and ocspReadOnlyDB are interfaces collecting the `sql.DB` methods that -// the various parts of OCSPUpdater rely on. Using this adapter shim allows tests to -// swap out the `sql.DB` implementation. - -// ocspReadOnlyDb provides only read-only portions of the `sql.DB` interface. -type ocspReadOnlyDb interface { - Query(query string, args ...interface{}) (*sql.Rows, error) -} - -// ocspDb provides read-write portions of the `sql.DB` interface. -type ocspDb interface { - ocspReadOnlyDb - Exec(query string, args ...interface{}) (sql.Result, error) -} - -type rocspClientInterface interface { - StoreResponse(ctx context.Context, respBytes []byte, shortIssuerID byte, ttl time.Duration) error -} - -// failCounter provides a concurrent safe counter. -type failCounter struct { - mu sync.Mutex - count int -} - -func (c *failCounter) Add(i int) { - c.mu.Lock() - defer c.mu.Unlock() - c.count += i -} - -func (c *failCounter) Reset() { - c.mu.Lock() - defer c.mu.Unlock() - c.count = 0 -} - -func (c *failCounter) Value() int { - c.mu.Lock() - defer c.mu.Unlock() - return c.count -} - -// OCSPUpdater contains the useful objects for the Updater -type OCSPUpdater struct { - log blog.Logger - clk clock.Clock - - db ocspDb - readOnlyDb ocspReadOnlyDb - rocspClient rocspClientInterface - - issuers []rocsp_config.ShortIDIssuer - - ogc capb.OCSPGeneratorClient - - batchSize int - - tickWindow time.Duration - maxBackoff time.Duration - backoffFactor float64 - readFailures failCounter - - serialSuffixes []string - queryBody string - - // Used to calculate how far back stale OCSP responses should be looked for - ocspMinTimeToExpiry time.Duration - // Maximum number of individual OCSP updates to attempt in parallel. Making - // these requests in parallel allows us to get higher total throughput. - parallelGenerateOCSPRequests int - - redisTimeout time.Duration - - tickHistogram *prometheus.HistogramVec - stalenessHistogram prometheus.Histogram - genStoreHistogram prometheus.Histogram - generatedCounter *prometheus.CounterVec - storedCounter *prometheus.CounterVec - storedRedisCounter *prometheus.CounterVec - markExpiredCounter *prometheus.CounterVec - findStaleOCSPCounter *prometheus.CounterVec -} - -func New( - stats prometheus.Registerer, - clk clock.Clock, - db ocspDb, - readOnlyDb ocspReadOnlyDb, - rocspClient rocspClientInterface, - issuers []rocsp_config.ShortIDIssuer, - serialSuffixes []string, - ogc capb.OCSPGeneratorClient, - batchSize int, - windowSize time.Duration, - retryBackoffMax time.Duration, - retryBackoffFactor float64, - ocspMinTimeToExpiry time.Duration, - parallelGenerateOCSPRequests int, - redisTimeout time.Duration, - log blog.Logger, -) (*OCSPUpdater, error) { - if batchSize == 0 { - return nil, errors.New("loop batch sizes must be non-zero") - } - if windowSize == 0 { - return nil, errors.New("loop window sizes must be non-zero") - } - if parallelGenerateOCSPRequests == 0 { - // Default to 1 - parallelGenerateOCSPRequests = 1 - } - for _, s := range serialSuffixes { - if len(s) != 1 || strings.ToLower(s) != s { - return nil, fmt.Errorf("serial suffixes must all be one lowercase character, got %q, expected %q", s, strings.ToLower(s)) - } - c := s[0] - if !(c >= '0' && c <= '9' || c >= 'a' && c <= 'f') { - return nil, errors.New("valid range for suffixes is [0-9a-f]") - } - } - - var queryBody strings.Builder - queryBody.WriteString("WHERE ocspLastUpdated < ? AND NOT isExpired ") - if len(serialSuffixes) > 0 { - fmt.Fprintf(&queryBody, "AND RIGHT(serial, 1) IN ( %s ) ", - getQuestionsForShardList(len(serialSuffixes)), - ) - } - queryBody.WriteString("ORDER BY ocspLastUpdated ASC LIMIT ?") - - genStoreHistogram := prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "ocsp_updater_generate_and_store", - Help: "A histogram of latencies of OCSP generation and storage latencies", - }) - stats.MustRegister(genStoreHistogram) - generatedCounter := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "ocsp_updater_generated", - Help: "A counter of OCSP response generation calls labeled by result", - }, []string{"result"}) - stats.MustRegister(generatedCounter) - storedRedisCounter := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "ocsp_updater_stored_redis", - Help: "A counter of OCSP response storage calls labeled by result", - }, []string{"result"}) - stats.MustRegister(storedRedisCounter) - storedCounter := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "ocsp_updater_stored", - Help: "A counter of OCSP response storage calls labeled by result", - }, []string{"result"}) - stats.MustRegister(storedCounter) - tickHistogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "ocsp_updater_ticks", - Help: "A histogram of ocsp-updater tick latencies labelled by result and whether the tick was considered longer than expected", - Buckets: []float64{0.01, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000}, - }, []string{"result", "long"}) - stats.MustRegister(tickHistogram) - stalenessHistogram := prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "ocsp_status_staleness", - Help: "How long past the refresh time a status is when we try to refresh it. Will always be > 0, but must stay well below 12 hours.", - Buckets: []float64{10, 100, 1000, 10000, 21600, 32400, 36000, 39600, 43200, 54000, 64800, 75600, 86400, 108000, 129600, 172800}, - }) - stats.MustRegister(stalenessHistogram) - markExpiredCounter := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "mark_expired", - Help: "A counter of mark expired calls labeled by result", - }, []string{"result"}) - stats.MustRegister(markExpiredCounter) - findStaleOCSPCounter := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "find_stale_ocsp", - Help: "A counter of query for stale OCSP responses labeled by result", - }, []string{"result"}) - stats.MustRegister(findStaleOCSPCounter) - - var rocspClientInterface rocspClientInterface - if rocspClient != nil { - rocspClientInterface = rocspClient - } - updater := OCSPUpdater{ - log: log, - clk: clk, - db: db, - readOnlyDb: readOnlyDb, - rocspClient: rocspClientInterface, - issuers: issuers, - ogc: ogc, - batchSize: batchSize, - tickWindow: windowSize, - maxBackoff: retryBackoffMax, - backoffFactor: retryBackoffFactor, - readFailures: failCounter{}, - serialSuffixes: serialSuffixes, - queryBody: queryBody.String(), - ocspMinTimeToExpiry: ocspMinTimeToExpiry, - parallelGenerateOCSPRequests: parallelGenerateOCSPRequests, - redisTimeout: redisTimeout, - tickHistogram: tickHistogram, - stalenessHistogram: stalenessHistogram, - genStoreHistogram: genStoreHistogram, - generatedCounter: generatedCounter, - storedCounter: storedCounter, - storedRedisCounter: storedRedisCounter, - markExpiredCounter: markExpiredCounter, - findStaleOCSPCounter: findStaleOCSPCounter, - } - - return &updater, nil -} - -func getQuestionsForShardList(count int) string { - return strings.TrimRight(strings.Repeat("?,", count), ",") -} - -// findStaleOCSPResponses sends a goroutine to fetch rows of stale OCSP -// responses from the database and returns results on a channel. -func (updater *OCSPUpdater) findStaleOCSPResponses(ctx context.Context, oldestLastUpdatedTime time.Time, batchSize int) <-chan sa.CertStatusMetadata { - // staleStatusesOut channel contains all stale ocsp responses that need - // updating. - staleStatusesOut := make(chan sa.CertStatusMetadata) - - args := make([]interface{}, 0) - args = append(args, oldestLastUpdatedTime) - - // If serialSuffixes is unset, this will be deliberately a no-op. - for _, c := range updater.serialSuffixes { - args = append(args, c) - } - args = append(args, batchSize) - - go func() { - defer close(staleStatusesOut) - - rows, err := updater.readOnlyDb.Query( - fmt.Sprintf( - "SELECT %s FROM certificateStatus %s", - strings.Join(sa.CertStatusMetadataFields(), ","), - updater.queryBody, - ), - args..., - ) - - // If error, log and increment retries for backoff. Else no - // error, proceed to push statuses to channel. - if err != nil { - updater.log.AuditErrf("failed to find stale OCSP responses: %s", err) - updater.findStaleOCSPCounter.WithLabelValues("failed").Inc() - updater.readFailures.Add(1) - return - } - defer func() { - err := rows.Close() - if err != nil { - updater.log.AuditErrf("closing query rows: %s", err) - } - }() - - for rows.Next() { - var status sa.CertStatusMetadata - err := sa.ScanCertStatusMetadataRow(rows, &status) - if err != nil { - updater.log.AuditErrf("failed to scan metadata status row: %s", err) - updater.findStaleOCSPCounter.WithLabelValues("failed").Inc() - updater.readFailures.Add(1) - return - } - staleness := oldestLastUpdatedTime.Sub(status.OCSPLastUpdated).Seconds() - updater.stalenessHistogram.Observe(staleness) - select { - case <-ctx.Done(): - err := ctx.Err() - if err != nil { - updater.log.AuditErrf("context done reading rows: %s", err) - } - return - case staleStatusesOut <- status: - } - } - - // Ensure the query wasn't interrupted before it could complete. - err = rows.Err() - if err != nil { - updater.log.AuditErrf("finishing row scan: %s", err) - updater.findStaleOCSPCounter.WithLabelValues("failed").Inc() - updater.readFailures.Add(1) - return - } - - updater.findStaleOCSPCounter.WithLabelValues("success").Inc() - updater.readFailures.Reset() - }() - - return staleStatusesOut -} - -// generateResponse signs an new OCSP response for a given certStatus row. -// Takes its argument by value to force a copy, then returns a reference to that copy. -func (updater *OCSPUpdater) generateResponse(ctx context.Context, status sa.CertStatusMetadata) (*sa.CertStatusMetadata, error) { - if status.IssuerID == 0 { - return nil, errors.New("cert status has 0 IssuerID") - } - ocspReq := capb.GenerateOCSPRequest{ - Serial: status.Serial, - IssuerID: status.IssuerID, - Status: string(status.Status), - Reason: int32(status.RevokedReason), - RevokedAt: status.RevokedDate.UnixNano(), - } - - ocspResponse, err := updater.ogc.GenerateOCSP(ctx, &ocspReq) - if err != nil { - return nil, err - } - - status.OCSPLastUpdated = updater.clk.Now() - status.OCSPResponse = ocspResponse.Response - - return &status, nil -} - -// storeResponse stores a given CertificateStatus in the database. -func (updater *OCSPUpdater) storeResponse(ctx context.Context, status *sa.CertStatusMetadata) error { - // If a redis client is configured, try to store the response in redis. - if updater.rocspClient != nil { - // Create a context to set a deadline for the goroutine that stores - // the response in redis. Set the timeout to one second longer than - // the configured redis timeout to give redis a chance to return a - // timeout error first. This context is necessary because we don't - // want to wait to confirm a write to redis (best effort), which - // causes a race with the Tick() context cancellation if the parent - // context is used. When writing to redis is the primary storage - // source we can change to use the parent context. - ctx2, cancel := context.WithTimeout(context.Background(), updater.redisTimeout+time.Second) - go func() { - defer cancel() - ttl := status.NotAfter.Sub(updater.clk.Now()) - shortIssuerID, err := rocsp_config.FindIssuerByID(status.IssuerID, updater.issuers) - if err != nil { - updater.storedRedisCounter.WithLabelValues("missing issuer").Inc() - return - } - err = updater.rocspClient.StoreResponse(ctx2, status.OCSPResponse, shortIssuerID.ShortID(), ttl) - if err != nil { - if errors.Is(err, context.Canceled) { - updater.storedRedisCounter.WithLabelValues("canceled").Inc() - } else if errors.Is(err, context.DeadlineExceeded) { - updater.storedRedisCounter.WithLabelValues("deadlineExceeded").Inc() - } else { - updater.storedRedisCounter.WithLabelValues("failed").Inc() - } - } else { - updater.storedRedisCounter.WithLabelValues("success").Inc() - } - }() - } - - // Update the certificateStatus table with the new OCSP response, the status - // WHERE is used make sure we don't overwrite a revoked response with a one - // containing a 'good' status. - _, err := updater.db.Exec( - `UPDATE certificateStatus - SET ocspResponse=?,ocspLastUpdated=? - WHERE id=? - AND status=?`, - status.OCSPResponse, - status.OCSPLastUpdated, - status.ID, - string(status.Status), - ) - - if err != nil { - updater.storedCounter.WithLabelValues("failed").Inc() - } else { - updater.storedCounter.WithLabelValues("success").Inc() - } - return err -} - -// markExpired updates a given CertificateStatus to have `isExpired` set. -func (updater *OCSPUpdater) markExpired(status sa.CertStatusMetadata) error { - _, err := updater.db.Exec( - `UPDATE certificateStatus - SET isExpired = TRUE - WHERE id = ?`, - status.ID, - ) - return err -} - -// processExpired is a pipeline step to process a channel of -// `core.CertificateStatus` and set `isExpired` in the database. -func (updater *OCSPUpdater) processExpired(ctx context.Context, staleStatusesIn <-chan sa.CertStatusMetadata) <-chan sa.CertStatusMetadata { - tickStart := updater.clk.Now() - staleStatusesOut := make(chan sa.CertStatusMetadata) - go func() { - defer close(staleStatusesOut) - for status := range staleStatusesIn { - if !status.IsExpired && tickStart.After(status.NotAfter) { - err := updater.markExpired(status) - if err != nil { - // Update error counters and log - updater.log.AuditErrf("Failed to set certificate expired: %s", err) - updater.markExpiredCounter.WithLabelValues("failed").Inc() - } else { - updater.markExpiredCounter.WithLabelValues("success").Inc() - } - } - select { - case <-ctx.Done(): - return - case staleStatusesOut <- status: - } - } - }() - - return staleStatusesOut -} - -// generateOCSPResponses is the final stage of a pipeline. It takes a -// channel of `core.CertificateStatus` and sends a goroutine for each to -// obtain a new OCSP response and update the status in the database. -func (updater *OCSPUpdater) generateOCSPResponses(ctx context.Context, staleStatusesIn <-chan sa.CertStatusMetadata) { - // Use the semaphore pattern from - // https://github.com/golang/go/wiki/BoundingResourceUse to send a number of - // GenerateOCSP / storeResponse requests in parallel, while limiting the total number of - // outstanding requests. The number of outstanding requests equals the - // capacity of the channel. - sem := make(chan int, updater.parallelGenerateOCSPRequests) - wait := func() { - sem <- 1 // Block until there's capacity. - } - done := func(start time.Time) { - <-sem // Indicate there's more capacity. - updater.genStoreHistogram.Observe(time.Since(start).Seconds()) - } - - // Work runs as a goroutine per ocsp response to obtain a new ocsp - // response and store it in the database. - work := func(status sa.CertStatusMetadata) { - defer done(updater.clk.Now()) - - meta, err := updater.generateResponse(ctx, status) - if err != nil { - updater.log.AuditErrf("Failed to generate OCSP response: %s", err) - updater.generatedCounter.WithLabelValues("failed").Inc() - return - } - updater.generatedCounter.WithLabelValues("success").Inc() - - err = updater.storeResponse(ctx, meta) - if err != nil { - updater.log.AuditErrf("Failed to store OCSP response: %s", err) - updater.storedCounter.WithLabelValues("failed").Inc() - return - } - updater.storedCounter.WithLabelValues("success").Inc() - } - - // Consume the stale statuses channel and send off a sign/store request - // for each stale response. - for status := range staleStatusesIn { - wait() - go work(status) - } - - // Block until the sem channel reaches its full capacity again, - // indicating each goroutine has completed. - for i := 0; i < updater.parallelGenerateOCSPRequests; i++ { - wait() - } -} - -func (updater *OCSPUpdater) Tick() { - start := updater.clk.Now() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - oldestLastUpdatedTime := updater.clk.Now().Add(-updater.ocspMinTimeToExpiry) - - // Run pipeline - updater.generateOCSPResponses(ctx, updater.processExpired(ctx, updater.findStaleOCSPResponses(ctx, oldestLastUpdatedTime, updater.batchSize))) - - end := updater.clk.Now() - took := end.Sub(start) - long, state := "false", "success" - if took > updater.tickWindow { - long = "true" - } - - // Set sleep duration to the configured tickWindow. - sleepDur := start.Add(updater.tickWindow).Sub(end) - - // Set sleep duration higher to backoff starting the next tick and - // reading from the database if the last read failed. - readFails := updater.readFailures.Value() - if readFails > 0 { - sleepDur = core.RetryBackoff( - readFails, - updater.tickWindow, - updater.maxBackoff, - updater.backoffFactor, - ) - } - updater.tickHistogram.WithLabelValues(state, long).Observe(took.Seconds()) - updater.clk.Sleep(sleepDur) -} diff --git a/ocsp/updater/updater_test.go b/ocsp/updater/updater_test.go deleted file mode 100644 index 73f13de3429..00000000000 --- a/ocsp/updater/updater_test.go +++ /dev/null @@ -1,753 +0,0 @@ -package updater - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "database/sql" - "errors" - "math/big" - "strings" - "sync" - "testing" - "time" - - "github.com/jmhodges/clock" - capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/db" - bgrpc "github.com/letsencrypt/boulder/grpc" - blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/metrics" - rocsp_config "github.com/letsencrypt/boulder/rocsp/config" - "github.com/letsencrypt/boulder/sa" - sapb "github.com/letsencrypt/boulder/sa/proto" - "github.com/letsencrypt/boulder/sa/satest" - "github.com/letsencrypt/boulder/test" - isa "github.com/letsencrypt/boulder/test/inmem/sa" - "github.com/letsencrypt/boulder/test/vars" - "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc" -) - -var ctx = context.Background() - -type mockOCSP struct { - sleepTime time.Duration -} - -func (ca *mockOCSP) GenerateOCSP(_ context.Context, req *capb.GenerateOCSPRequest, _ ...grpc.CallOption) (*capb.OCSPResponse, error) { - time.Sleep(ca.sleepTime) - return &capb.OCSPResponse{Response: []byte{1, 2, 3}}, nil -} - -type noopROCSP struct { -} - -func (noopROCSP) StoreResponse(_ context.Context, _ []byte, _ byte, _ time.Duration) error { - return nil -} - -var log = blog.UseMock() - -func setup(t *testing.T) (*OCSPUpdater, sapb.StorageAuthorityClient, *db.WrappedMap, clock.FakeClock, func()) { - dbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{}) - test.AssertNotError(t, err, "Failed to create dbMap") - readOnlyDb, err := sa.NewDbMap(vars.DBConnSAOcspUpdateRO, sa.DbSettings{}) - test.AssertNotError(t, err, "Failed to create dbMap") - cleanUp := test.ResetSATestDatabase(t) - sa.SetSQLDebug(dbMap, log) - - fc := clock.NewFake() - fc.Add(1 * time.Hour) - - sa, err := sa.NewSQLStorageAuthority(dbMap, dbMap, nil, nil, fc, log, metrics.NoopRegisterer, 1) - test.AssertNotError(t, err, "Failed to create SA") - - updater, err := New( - metrics.NoopRegisterer, - fc, - dbMap, - readOnlyDb, - noopROCSP{}, - nil, - strings.Fields("0 1 2 3 4 5 6 7 8 9 a b c d e f"), - &mockOCSP{}, - 1, - time.Second, - time.Minute, - 1.5, - 0, - 0, - 0, - blog.NewMock(), - ) - test.AssertNotError(t, err, "Failed to create newUpdater") - - return updater, isa.SA{Impl: sa}, dbMap, fc, cleanUp -} - -func nowNano(fc clock.Clock) int64 { - return fc.Now().UnixNano() -} - -func TestStalenessHistogram(t *testing.T) { - updater, sac, _, fc, cleanUp := setup(t) - defer cleanUp() - - reg := satest.CreateWorkingRegistration(t, sac) - parsedCertA, err := core.LoadCert("testdata/test-cert.pem") - test.AssertNotError(t, err, "Couldn't read test certificate") - _, err = sac.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsedCertA.Raw, - RegID: reg.Id, - Ocsp: nil, - Issued: nowNano(fc), - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test-cert.pem") - parsedCertB, err := core.LoadCert("testdata/test-cert-b.pem") - test.AssertNotError(t, err, "Couldn't read test certificate") - _, err = sac.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsedCertB.Raw, - RegID: reg.Id, - Ocsp: nil, - Issued: nowNano(fc), - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test-cert-b.pem") - - // Jump time forward by 2 hours so the ocspLastUpdate value will be older than - // the earliest lastUpdate time we care about. - fc.Set(fc.Now().Add(2 * time.Hour)) - earliest := fc.Now().Add(-time.Hour) - - // We should have 2 stale responses now. - statuses := updater.findStaleOCSPResponses(ctx, earliest, 10) - var statusSlice []sa.CertStatusMetadata - for status := range statuses { - statusSlice = append(statusSlice, status) - } - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(statusSlice), 2) - - test.AssertMetricWithLabelsEquals(t, updater.stalenessHistogram, prometheus.Labels{}, 2) -} - -func TestGenerateAndStoreOCSPResponse(t *testing.T) { - updater, sa, _, fc, cleanUp := setup(t) - defer cleanUp() - - reg := satest.CreateWorkingRegistration(t, sa) - parsedCert, err := core.LoadCert("testdata/test-cert.pem") - test.AssertNotError(t, err, "Couldn't read test certificate") - _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsedCert.Raw, - RegID: reg.Id, - Ocsp: nil, - Issued: nowNano(fc), - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test-cert.pem") - - fc.Set(fc.Now().Add(2 * time.Hour)) - earliest := fc.Now().Add(-time.Hour) - statuses := findStaleOCSPResponsesBuffered(ctx, updater, earliest, 10) - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(statuses), 1) - status := <-statuses - - meta, err := updater.generateResponse(ctx, status) - test.AssertNotError(t, err, "Couldn't generate OCSP response") - err = updater.storeResponse(context.Background(), meta) - test.AssertNotError(t, err, "Couldn't store certificate status") -} - -type rocspStorage struct { - shortIDIssuer byte - response []byte - ttl time.Duration -} - -type recordingROCSP struct { - sync.Mutex - storage []rocspStorage -} - -func (rr *recordingROCSP) get() []rocspStorage { - rr.Lock() - defer rr.Unlock() - var ret []rocspStorage - return append(ret, rr.storage...) -} - -func (rr *recordingROCSP) StoreResponse(ctx context.Context, respBytes []byte, shortIssuerID byte, ttl time.Duration) error { - rr.Lock() - defer rr.Unlock() - rr.storage = append(rr.storage, rocspStorage{ - shortIDIssuer: shortIssuerID, - response: respBytes, - ttl: ttl, - }) - return nil -} - -// A mock ocspDb that sleeps for 50ms when Exec is called. -type mockDBBlocksOnExec struct{} - -func (mdboe *mockDBBlocksOnExec) Query(query string, args ...interface{}) (*sql.Rows, error) { - return nil, nil -} - -func (mdboe *mockDBBlocksOnExec) Exec(query string, args ...interface{}) (sql.Result, error) { - time.Sleep(500 * time.Millisecond) - return nil, nil -} - -func TestROCSP(t *testing.T) { - updater, sac, _, fc, cleanUp := setup(t) - defer cleanUp() - - reg := satest.CreateWorkingRegistration(t, sac) - parsedCert, err := core.LoadCert("testdata/test-cert.pem") - test.AssertNotError(t, err, "Couldn't read test certificate") - _, err = sac.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsedCert.Raw, - RegID: reg.Id, - Ocsp: nil, - Issued: nowNano(fc), - IssuerID: 66283756913588288, - }) - test.AssertNotError(t, err, "Couldn't add test-cert.pem") - - recorder := &recordingROCSP{} - updater.rocspClient = recorder - updater.issuers, err = rocsp_config.LoadIssuers( - map[string]int{ - "../../test/hierarchy/int-e1.cert.pem": 23, - }, - ) - test.AssertNotError(t, err, "loading issuers") - updater.db = &mockDBBlocksOnExec{} - - err = updater.storeResponse(context.Background(), &sa.CertStatusMetadata{ - CertificateStatus: core.CertificateStatus{ - OCSPResponse: []byte("fake response"), - Serial: "fake serial", - IssuerID: 66283756913588288, - }, - }) - test.AssertNotError(t, err, "Couldn't store certificate status") - storage := recorder.get() - test.AssertEquals(t, len(storage), 1) - - test.AssertByteEquals(t, storage[0].response, []byte("fake response")) -} - -// findStaleOCSPResponsesBuffered runs findStaleOCSPResponses and returns -// it as a buffered channel. This is helpful for tests that want to test -// the length of the channel. -func findStaleOCSPResponsesBuffered(ctx context.Context, updater *OCSPUpdater, earliest time.Time, batchSize int) <-chan sa.CertStatusMetadata { - statuses := make(chan sa.CertStatusMetadata, batchSize) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - defer close(statuses) - s := updater.findStaleOCSPResponses(ctx, earliest, 10) - for status := range s { - statuses <- status - } - }() - wg.Wait() - return statuses -} - -func TestGenerateOCSPResponses(t *testing.T) { - updater, sa, _, fc, cleanUp := setup(t) - defer cleanUp() - - reg := satest.CreateWorkingRegistration(t, sa) - parsedCertA, err := core.LoadCert("testdata/test-cert.pem") - test.AssertNotError(t, err, "Couldn't read test certificate") - _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsedCertA.Raw, - RegID: reg.Id, - Ocsp: nil, - Issued: nowNano(fc), - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test-cert.pem") - parsedCertB, err := core.LoadCert("testdata/test-cert-b.pem") - test.AssertNotError(t, err, "Couldn't read test certificate") - _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsedCertB.Raw, - RegID: reg.Id, - Ocsp: nil, - Issued: nowNano(fc), - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test-cert-b.pem") - - // Jump time forward by 2 hours so the ocspLastUpdate value will be older than - // the earliest lastUpdate time we care about. - fc.Set(fc.Now().Add(2 * time.Hour)) - earliest := fc.Now().Add(-time.Hour) - - // We should have 2 stale responses now. - statuses := findStaleOCSPResponsesBuffered(ctx, updater, earliest, 10) - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(statuses), 2) - - // Hacky test of parallelism: Make each request to the CA take 1 second, and - // produce 2 requests to the CA. If the pair of requests complete in about a - // second, they were made in parallel. - // Note that this test also tests the basic functionality of - // generateOCSPResponses. - start := time.Now() - updater.ogc = &mockOCSP{time.Second} - updater.parallelGenerateOCSPRequests = 10 - updater.generateOCSPResponses(ctx, statuses) - elapsed := time.Since(start) - if elapsed > 1500*time.Millisecond { - t.Errorf("generateOCSPResponses took too long, expected it to make calls in parallel.") - } - - // generateOCSPResponses should have updated the ocspLastUpdate for each - // cert, so there shouldn't be any stale responses anymore. - statuses = findStaleOCSPResponsesBuffered(ctx, updater, earliest, 10) - - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(statuses), 0) -} - -func TestFindStaleOCSPResponses(t *testing.T) { - updater, sa, _, fc, cleanUp := setup(t) - defer cleanUp() - - // With no rows in the CertificateStatus table we shouldn't get an error. - statuses := findStaleOCSPResponsesBuffered(ctx, updater, fc.Now(), 10) - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(statuses), 0) - - reg := satest.CreateWorkingRegistration(t, sa) - parsedCert, err := core.LoadCert("testdata/test-cert.pem") - test.AssertNotError(t, err, "Couldn't read test certificate") - _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsedCert.Raw, - RegID: reg.Id, - Ocsp: nil, - Issued: nowNano(fc), - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test-cert.pem") - - // Jump time forward by 2 hours so the ocspLastUpdate value will be older than - // the earliest lastUpdate time we care about. - fc.Set(fc.Now().Add(2 * time.Hour)) - earliest := fc.Now().Add(-time.Hour) - - // We should have 1 stale response now. - statuses = findStaleOCSPResponsesBuffered(ctx, updater, earliest, 10) - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(statuses), 1) - status := <-statuses - - // Generate and store an updated response, which will update the - // ocspLastUpdate field for this cert. - meta, err := updater.generateResponse(ctx, status) - test.AssertNotError(t, err, "Couldn't generate OCSP response") - err = updater.storeResponse(context.Background(), meta) - test.AssertNotError(t, err, "Couldn't store OCSP response") - - // We should have 0 stale responses now. - statuses = findStaleOCSPResponsesBuffered(ctx, updater, earliest, 10) - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(statuses), 0) -} - -func TestFindStaleOCSPResponsesRevokedReason(t *testing.T) { - updater, sa, dbMap, fc, cleanUp := setup(t) - defer cleanUp() - - reg := satest.CreateWorkingRegistration(t, sa) - parsedCert, err := core.LoadCert("testdata/test-cert.pem") - test.AssertNotError(t, err, "Couldn't read test certificate") - _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsedCert.Raw, - RegID: reg.Id, - Ocsp: nil, - Issued: nowNano(fc), - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test-cert.pem") - - // Set a revokedReason to ensure it gets written into the OCSPResponse. - _, err = dbMap.Exec( - "UPDATE certificateStatus SET revokedReason = 1 WHERE serial = ?", - core.SerialToString(parsedCert.SerialNumber)) - test.AssertNotError(t, err, "Couldn't update revokedReason") - - // Jump time forward by 2 hours so the ocspLastUpdate value will be older than - // the earliest lastUpdate time we care about. - fc.Set(fc.Now().Add(2 * time.Hour)) - earliest := fc.Now().Add(-time.Hour) - - statuses := findStaleOCSPResponsesBuffered(ctx, updater, earliest, 10) - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(statuses), 1) - status := <-statuses - test.AssertEquals(t, int(status.RevokedReason), 1) -} - -func TestPipelineTick(t *testing.T) { - updater, sa, _, fc, cleanUp := setup(t) - defer cleanUp() - - reg := satest.CreateWorkingRegistration(t, sa) - parsedCert, err := core.LoadCert("testdata/test-cert.pem") - test.AssertNotError(t, err, "Couldn't read test certificate") - _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsedCert.Raw, - RegID: reg.Id, - Ocsp: nil, - Issued: nowNano(fc), - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test-cert.pem") - - updater.ocspMinTimeToExpiry = 1 * time.Hour - earliest := fc.Now().Add(-time.Hour) - updater.generateOCSPResponses(ctx, updater.processExpired(ctx, updater.findStaleOCSPResponses(ctx, earliest, 10))) - test.AssertEquals(t, updater.readFailures.Value(), 0) - - certs := findStaleOCSPResponsesBuffered(ctx, updater, fc.Now().Add(-updater.ocspMinTimeToExpiry), 10) - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(certs), 0) -} - -// TestProcessExpired checks that the `processExpired` pipeline step -// updates the `IsExpired` field opportunistically as it encounters -// certificates that are expired but whose certificate status rows do not -// have `IsExpired` set, and that expired certs don't show up as having -// stale responses. -func TestProcessExpired(t *testing.T) { - updater, sa, _, fc, cleanUp := setup(t) - defer cleanUp() - - reg := satest.CreateWorkingRegistration(t, sa) - parsedCert, err := core.LoadCert("testdata/test-cert.pem") - test.AssertNotError(t, err, "Couldn't read test certificate") - serial := core.SerialToString(parsedCert.SerialNumber) - - // Add a new test certificate - _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsedCert.Raw, - RegID: reg.Id, - Ocsp: nil, - Issued: nowNano(fc), - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test-cert.pem") - - // Jump time forward by 2 hours so the ocspLastUpdate value will be older than - // the earliest lastUpdate time we care about. - fc.Set(fc.Now().Add(2 * time.Hour)) - earliest := fc.Now().Add(-time.Hour) - - // The certificate isn't expired, so the certificate status should have - // a false `IsExpired` and it should show up as stale. - statusPB, err := sa.GetCertificateStatus(ctx, &sapb.Serial{Serial: serial}) - test.AssertNotError(t, err, "Couldn't get the certificateStatus from the database") - cs, err := bgrpc.PBToCertStatus(statusPB) - test.AssertNotError(t, err, "Count't convert the certificateStatus from a PB") - - test.AssertEquals(t, cs.IsExpired, false) - statuses := findStaleOCSPResponsesBuffered(ctx, updater, earliest, 10) - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(statuses), 1) - - // Advance the clock to the point that the certificate we added is now expired - fc.Set(parsedCert.NotAfter.Add(2 * time.Hour)) - earliest = fc.Now().Add(-time.Hour) - updater.ocspMinTimeToExpiry = 1 * time.Hour - - // Run pipeline to find stale responses, mark expired, and generate new response. - updater.generateOCSPResponses(ctx, updater.processExpired(ctx, updater.findStaleOCSPResponses(ctx, earliest, 10))) - - // Since we advanced the fakeclock beyond our test certificate's NotAfter we - // expect the certificate status has been updated to have a true `IsExpired` - statusPB, err = sa.GetCertificateStatus(ctx, &sapb.Serial{Serial: serial}) - test.AssertNotError(t, err, "Couldn't get the certificateStatus from the database") - cs, err = bgrpc.PBToCertStatus(statusPB) - test.AssertNotError(t, err, "Count't convert the certificateStatus from a PB") - - test.AssertEquals(t, cs.IsExpired, true) - statuses = findStaleOCSPResponsesBuffered(ctx, updater, earliest, 10) - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(statuses), 0) -} - -func TestStoreResponseGuard(t *testing.T) { - updater, sa, _, fc, cleanUp := setup(t) - defer cleanUp() - - reg := satest.CreateWorkingRegistration(t, sa) - parsedCert, err := core.LoadCert("testdata/test-cert.pem") - test.AssertNotError(t, err, "Couldn't read test certificate") - _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: parsedCert.Raw, - RegID: reg.Id, - Ocsp: nil, - Issued: nowNano(fc), - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test-cert.pem") - - fc.Set(fc.Now().Add(2 * time.Hour)) - earliest := fc.Now().Add(-time.Hour) - statuses := findStaleOCSPResponsesBuffered(ctx, updater, earliest, 10) - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(statuses), 1) - status := <-statuses - - serialStr := core.SerialToString(parsedCert.SerialNumber) - reason := int64(0) - revokedDate := fc.Now().UnixNano() - _, err = sa.RevokeCertificate(context.Background(), &sapb.RevokeCertificateRequest{ - Serial: serialStr, - Reason: reason, - Date: revokedDate, - Response: []byte("fakeocspbytes"), - }) - test.AssertNotError(t, err, "Failed to revoked certificate") - - // Attempt to update OCSP response where status.Status is good but stored status - // is revoked, this should fail silently - status.OCSPResponse = []byte("newfakeocspbytes") - err = updater.storeResponse(context.Background(), &status) - test.AssertNotError(t, err, "Failed to update certificate status") - - // Make sure the OCSP response hasn't actually changed - unchangedStatus, err := sa.GetCertificateStatus(ctx, &sapb.Serial{Serial: core.SerialToString(parsedCert.SerialNumber)}) - test.AssertNotError(t, err, "Failed to get certificate status") - test.AssertEquals(t, string(unchangedStatus.OcspResponse), "fakeocspbytes") - - // Changing the status to the stored status should allow the update to occur - status.Status = core.OCSPStatusRevoked - err = updater.storeResponse(context.Background(), &status) - test.AssertNotError(t, err, "Failed to updated certificate status") - - // Make sure the OCSP response has been updated - changedStatus, err := sa.GetCertificateStatus(ctx, &sapb.Serial{Serial: core.SerialToString(parsedCert.SerialNumber)}) - test.AssertNotError(t, err, "Failed to get certificate status") - test.AssertEquals(t, string(changedStatus.OcspResponse), "newfakeocspbytes") -} - -func TestGenerateOCSPResponsePrecert(t *testing.T) { - updater, sa, _, fc, cleanUp := setup(t) - defer cleanUp() - - reg := satest.CreateWorkingRegistration(t, sa) - - // Create a throw-away self signed certificate with some names - serial, testCert := test.ThrowAwayCert(t, 5) - - // Use AddPrecertificate to set up a precertificate, serials, and - // certificateStatus row for the testcert. - ocspResp := []byte{0, 0, 1} - regID := reg.Id - issuedTime := fc.Now().UnixNano() - _, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: testCert.Raw, - RegID: regID, - Ocsp: ocspResp, - Issued: issuedTime, - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test-cert2.der") - - // Jump time forward by 2 hours so the ocspLastUpdate value will be older than - // the earliest lastUpdate time we care about. - fc.Set(fc.Now().Add(2 * time.Hour)) - earliest := fc.Now().Add(-time.Hour) - - // There should be one stale ocsp response found for the precert - certs := findStaleOCSPResponsesBuffered(ctx, updater, earliest, 10) - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(certs), 1) - cert := <-certs - test.AssertEquals(t, cert.Serial, serial) - - // Directly call generateResponse again with the same result. It should not - // error and should instead update the precertificate's OCSP status even - // though no certificate row exists. - _, err = updater.generateResponse(ctx, cert) - test.AssertNotError(t, err, "generateResponse for precert errored") -} - -type mockOCSPRecordIssuer struct { - gotIssuer bool -} - -func (ca *mockOCSPRecordIssuer) GenerateOCSP(_ context.Context, req *capb.GenerateOCSPRequest, _ ...grpc.CallOption) (*capb.OCSPResponse, error) { - ca.gotIssuer = req.IssuerID != 0 && req.Serial != "" - return &capb.OCSPResponse{Response: []byte{1, 2, 3}}, nil -} - -func TestIssuerInfo(t *testing.T) { - updater, sa, _, fc, cleanUp := setup(t) - defer cleanUp() - m := mockOCSPRecordIssuer{} - updater.ogc = &m - reg := satest.CreateWorkingRegistration(t, sa) - - k, err := rsa.GenerateKey(rand.Reader, 512) - test.AssertNotError(t, err, "rsa.GenerateKey failed") - template := &x509.Certificate{ - SerialNumber: big.NewInt(1), - DNSNames: []string{"example.com"}, - } - certA, err := x509.CreateCertificate(rand.Reader, template, template, &k.PublicKey, k) - test.AssertNotError(t, err, "x509.CreateCertificate failed") - - now := fc.Now().UnixNano() - id := int64(1234) - _, err = sa.AddPrecertificate(context.Background(), &sapb.AddCertificateRequest{ - Der: certA, - RegID: reg.Id, - Ocsp: []byte{1, 2, 3}, - Issued: now, - IssuerID: id, - }) - test.AssertNotError(t, err, "sa.AddPrecertificate failed") - - fc.Add(time.Hour * 24 * 4) - statuses := findStaleOCSPResponsesBuffered(ctx, updater, fc.Now().Add(-time.Hour), 10) - - test.AssertEquals(t, updater.readFailures.Value(), 0) - test.AssertEquals(t, len(statuses), 1) - status := <-statuses - test.AssertEquals(t, status.IssuerID, id) - - _, err = updater.generateResponse(context.Background(), status) - test.AssertNotError(t, err, "generateResponse failed") - test.Assert(t, m.gotIssuer, "generateResponse didn't send issuer information and serial") -} - -type brokenDB struct{} - -func (bdb *brokenDB) Query(query string, args ...interface{}) (*sql.Rows, error) { - return nil, errors.New("broken") -} -func (bdb *brokenDB) Exec(query string, args ...interface{}) (sql.Result, error) { - return nil, errors.New("broken") -} - -func TestTickSleep(t *testing.T) { - updater, _, dbMap, fc, cleanUp := setup(t) - defer cleanUp() - m := &brokenDB{} - updater.readOnlyDb = m - - // Test that when findStaleResponses fails the failure counter is - // incremented and the clock moved forward by more than - // updater.tickWindow - updater.readFailures.Add(2) - before := fc.Now() - updater.Tick() - test.AssertEquals(t, updater.readFailures.Value(), 3) - took := fc.Since(before) - test.Assert(t, took > updater.tickWindow, "Clock didn't move forward enough") - - // Test when findStaleResponses works the failure counter is reset to - // zero and the clock only moves by updater.tickWindow - updater.readOnlyDb = dbMap - before = fc.Now() - updater.Tick() - test.AssertEquals(t, updater.readFailures.Value(), 0) - took = fc.Since(before) - test.AssertEquals(t, took, updater.tickWindow) - -} - -func TestFindOCSPResponsesSleep(t *testing.T) { - updater, _, dbMap, fc, cleanUp := setup(t) - defer cleanUp() - m := &brokenDB{} - updater.readOnlyDb = m - - // Test when updateOCSPResponses fails the failure counter is incremented - // and the clock moved forward by more than updater.tickWindow - updater.readFailures.Add(2) - before := fc.Now() - updater.Tick() - test.AssertEquals(t, updater.readFailures.Value(), 3) - took := fc.Since(before) - test.Assert(t, took > updater.tickWindow, "Clock didn't move forward enough") - - // Test when updateOCSPResponses works the failure counter is reset to zero - // and the clock only moves by updater.tickWindow - updater.readOnlyDb = dbMap - before = fc.Now() - updater.Tick() - test.AssertEquals(t, updater.readFailures.Value(), 0) - took = fc.Since(before) - test.AssertEquals(t, took, updater.tickWindow) - -} - -func mkNewUpdaterWithStrings(t *testing.T, shards []string) (*OCSPUpdater, error) { - dbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{}) - test.AssertNotError(t, err, "Failed to create dbMap") - sa.SetSQLDebug(dbMap, log) - - fc := clock.NewFake() - - updater, err := New( - metrics.NoopRegisterer, - fc, - dbMap, - dbMap, - noopROCSP{}, - nil, - shards, - &mockOCSP{}, - 1, - time.Second, - time.Minute, - 1.5, - 0, - 0, - 0, - blog.NewMock(), - ) - return updater, err -} - -func TestUpdaterConfiguration(t *testing.T) { - _, err := mkNewUpdaterWithStrings(t, strings.Fields("0 1 2 3 4 5 6 7 8 9 a B c d e f")) - test.AssertError(t, err, "No uppercase allowed") - - _, err = mkNewUpdaterWithStrings(t, strings.Fields("0 1 g")) - test.AssertError(t, err, "No letters > f allowed") - - _, err = mkNewUpdaterWithStrings(t, strings.Fields("0 *")) - test.AssertError(t, err, "No special chars allowed") - - _, err = mkNewUpdaterWithStrings(t, strings.Fields("0 -1")) - test.AssertError(t, err, "No negative numbers allowed") - - _, err = mkNewUpdaterWithStrings(t, strings.Fields("wazzup 0 a b c")) - test.AssertError(t, err, "No multi-letter shards allowed") - - _, err = mkNewUpdaterWithStrings(t, []string{}) - test.AssertNotError(t, err, "Empty should be valid, meaning use old queries") -} - -func TestGetQuestionsForShardList(t *testing.T) { - test.AssertEquals(t, getQuestionsForShardList(2), "?,?") - test.AssertEquals(t, getQuestionsForShardList(1), "?") - test.AssertEquals(t, getQuestionsForShardList(16), "?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?") -} diff --git a/pkcs11helpers/helpers.go b/pkcs11helpers/helpers.go index 04b88fe2f19..4c02146d8d8 100644 --- a/pkcs11helpers/helpers.go +++ b/pkcs11helpers/helpers.go @@ -80,7 +80,7 @@ func (s *Session) getPublicKeyID(label string, publicKey crypto.PublicKey) ([]by // PKCS#11 v2.20 specified that the CKA_EC_POINT was to be store in a DER-encoded // OCTET STRING. rawValue := asn1.RawValue{ - Tag: 4, // in Go 1.6+ this is asn1.TagOctetString + Tag: asn1.TagOctetString, Bytes: elliptic.Marshal(key.Curve, key.X, key.Y), } marshalledPoint, err := asn1.Marshal(rawValue) @@ -235,7 +235,7 @@ const ( // Hash identifiers required for PKCS#11 RSA signing. Only support SHA-256, SHA-384, // and SHA-512 -var hashIdentifiers = map[crypto.Hash][]byte{ +var hashIdents = map[crypto.Hash][]byte{ crypto.SHA256: {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20}, crypto.SHA384: {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30}, crypto.SHA512: {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40}, @@ -250,7 +250,7 @@ func (s *Session) Sign(object pkcs11.ObjectHandle, keyType keyType, digest []byt switch keyType { case RSAKey: mech[0] = pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS, nil) - prefix, ok := hashIdentifiers[hash] + prefix, ok := hashIdents[hash] if !ok { return nil, errors.New("unsupported hash function") } diff --git a/policy/pa.go b/policy/pa.go index d3494495fa0..99ac11ad564 100644 --- a/policy/pa.go +++ b/policy/pa.go @@ -5,10 +5,11 @@ import ( "encoding/hex" "errors" "fmt" - "math/rand" - "net" "net/mail" + "net/netip" + "os" "regexp" + "slices" "strings" "sync" @@ -17,99 +18,91 @@ import ( "github.com/letsencrypt/boulder/core" berrors "github.com/letsencrypt/boulder/errors" + "github.com/letsencrypt/boulder/features" "github.com/letsencrypt/boulder/iana" "github.com/letsencrypt/boulder/identifier" blog "github.com/letsencrypt/boulder/log" - "github.com/letsencrypt/boulder/reloader" - "gopkg.in/yaml.v2" + "github.com/letsencrypt/boulder/strictyaml" ) // AuthorityImpl enforces CA policy decisions. type AuthorityImpl struct { log blog.Logger - blocklist map[string]bool - exactBlocklist map[string]bool - wildcardExactBlocklist map[string]bool - blocklistMu sync.RWMutex + domainBlocklist map[string]bool + fqdnBlocklist map[string]bool + wildcardFqdnBlocklist map[string]bool + ipPrefixBlocklist []netip.Prefix + blocklistMu sync.RWMutex - enabledChallenges map[core.AcmeChallenge]bool - pseudoRNG *rand.Rand - rngMu sync.Mutex + enabledChallenges map[core.AcmeChallenge]bool + enabledIdentifiers map[identifier.IdentifierType]bool } // New constructs a Policy Authority. -func New(challengeTypes map[core.AcmeChallenge]bool) (*AuthorityImpl, error) { - - pa := AuthorityImpl{ - log: blog.Get(), - enabledChallenges: challengeTypes, - // We don't need real randomness for this. - pseudoRNG: rand.New(rand.NewSource(99)), - } - - return &pa, nil +func New(identifierTypes map[identifier.IdentifierType]bool, challengeTypes map[core.AcmeChallenge]bool, log blog.Logger) (*AuthorityImpl, error) { + return &AuthorityImpl{ + log: log, + enabledChallenges: challengeTypes, + enabledIdentifiers: identifierTypes, + }, nil } -// blockedNamesPolicy is a struct holding lists of blocked domain names. One for -// exact blocks and one for blocks including all subdomains. -type blockedNamesPolicy struct { - // ExactBlockedNames is a list of domain names. Issuance for names exactly - // matching an entry in the list will be forbidden. (e.g. `ExactBlockedNames` - // containing `www.example.com` will not block `example.com` or - // `mail.example.com`). +// blockedIdentsPolicy is a struct holding lists of blocked identifiers. +type blockedIdentsPolicy struct { + // ExactBlockedNames is a list of Fully Qualified Domain Names (FQDNs). + // Issuance for names exactly matching an entry in the list will be + // forbidden. (e.g. `ExactBlockedNames` containing `www.example.com` will + // not block `example.com`, `mail.example.com`, or `dev.www.example.com`). ExactBlockedNames []string `yaml:"ExactBlockedNames"` - // HighRiskBlockedNames is like ExactBlockedNames except that issuance is - // blocked for subdomains as well. (e.g. BlockedNames containing `example.com` - // will block `www.example.com`). + + // HighRiskBlockedNames is a list of domain names: like ExactBlockedNames + // except that issuance is blocked for subdomains as well. (e.g. + // BlockedNames containing `example.com` will block `www.example.com`). // // This list typically doesn't change with much regularity. HighRiskBlockedNames []string `yaml:"HighRiskBlockedNames"` - // AdminBlockedNames operates the same as BlockedNames but is changed with more - // frequency based on administrative blocks/revocations that are added over - // time above and beyond the high-risk domains. Managing these entries separately - // from HighRiskBlockedNames makes it easier to vet changes accurately. + // AdminBlockedNames operates the same as HighRiskBlockedNames but is + // changed with more frequency based on administrative blocks/revocations + // that are added over time above and beyond the high-risk domains. Managing + // these entries separately from HighRiskBlockedNames makes it easier to vet + // changes accurately. AdminBlockedNames []string `yaml:"AdminBlockedNames"` + + // AdminBlockedPrefixes is a list of IP address prefixes. All IP addresses + // contained within the prefix are blocked. + AdminBlockedPrefixes []string `yaml:"AdminBlockedPrefixes"` } -// SetHostnamePolicyFile will load the given policy file, returning error if it -// fails. It will also start a reloader in case the file changes -func (pa *AuthorityImpl) SetHostnamePolicyFile(f string) error { - if _, err := reloader.New(f, pa.loadHostnamePolicy, pa.hostnamePolicyLoadError); err != nil { +// LoadIdentPolicyFile will load the given policy file, returning an error if it +// fails. +func (pa *AuthorityImpl) LoadIdentPolicyFile(f string) error { + configBytes, err := os.ReadFile(f) + if err != nil { return err } - return nil -} - -func (pa *AuthorityImpl) hostnamePolicyLoadError(err error) { - pa.log.AuditErrf("error loading hostname policy: %s", err) -} - -// loadHostnamePolicy is a callback suitable for use with reloader.New() that -// will unmarshal a YAML hostname policy. -func (pa *AuthorityImpl) loadHostnamePolicy(contents []byte) error { - hash := sha256.Sum256(contents) - pa.log.Infof("loading hostname policy, sha256: %s", hex.EncodeToString(hash[:])) - var policy blockedNamesPolicy - err := yaml.Unmarshal(contents, &policy) + hash := sha256.Sum256(configBytes) + pa.log.Infof("loading identifier policy, sha256: %s", hex.EncodeToString(hash[:])) + var policy blockedIdentsPolicy + err = strictyaml.Unmarshal(configBytes, &policy) if err != nil { return err } if len(policy.HighRiskBlockedNames) == 0 { - return fmt.Errorf("No entries in HighRiskBlockedNames.") + return fmt.Errorf("no entries in HighRiskBlockedNames") } if len(policy.ExactBlockedNames) == 0 { - return fmt.Errorf("No entries in ExactBlockedNames.") + return fmt.Errorf("no entries in ExactBlockedNames") } - return pa.processHostnamePolicy(policy) + return pa.processIdentPolicy(policy) } -// processHostnamePolicy handles loading a new blockedNamesPolicy into the PA. -// All of the policy.ExactBlockedNames will be added to the -// wildcardExactBlocklist by processHostnamePolicy to ensure that wildcards for -// exact blocked names entries are forbidden. -func (pa *AuthorityImpl) processHostnamePolicy(policy blockedNamesPolicy) error { +// processIdentPolicy handles loading a new blockedIdentsPolicy into the PA. All +// of the policy.ExactBlockedNames will be added to the wildcardExactBlocklist +// by processIdentPolicy to ensure that wildcards for exact blocked names +// entries are forbidden. +func (pa *AuthorityImpl) processIdentPolicy(policy blockedIdentsPolicy) error { nameMap := make(map[string]bool) for _, v := range policy.HighRiskBlockedNames { nameMap[v] = true @@ -117,6 +110,7 @@ func (pa *AuthorityImpl) processHostnamePolicy(policy blockedNamesPolicy) error for _, v := range policy.AdminBlockedNames { nameMap[v] = true } + exactNameMap := make(map[string]bool) wildcardNameMap := make(map[string]bool) for _, v := range policy.ExactBlockedNames { @@ -133,16 +127,28 @@ func (pa *AuthorityImpl) processHostnamePolicy(policy blockedNamesPolicy) error // at least be a "something." and a TLD like "com" if len(parts) < 2 { return fmt.Errorf( - "Malformed ExactBlockedNames entry, only one label: %q", v) + "malformed ExactBlockedNames entry, only one label: %q", v) } // Add the second part, the domain minus the first label, to the // wildcardNameMap to block issuance for `*.`+parts[1] wildcardNameMap[parts[1]] = true } + + var prefixes []netip.Prefix + for _, p := range policy.AdminBlockedPrefixes { + prefix, err := netip.ParsePrefix(p) + if err != nil { + return fmt.Errorf( + "malformed AdminBlockedPrefixes entry, not a prefix: %q", p) + } + prefixes = append(prefixes, prefix) + } + pa.blocklistMu.Lock() - pa.blocklist = nameMap - pa.exactBlocklist = exactNameMap - pa.wildcardExactBlocklist = wildcardNameMap + pa.domainBlocklist = nameMap + pa.fqdnBlocklist = exactNameMap + pa.wildcardFqdnBlocklist = wildcardNameMap + pa.ipPrefixBlocklist = prefixes pa.blocklistMu.Unlock() return nil } @@ -178,15 +184,15 @@ func isDNSCharacter(ch byte) bool { // If these values change, the related error messages should be updated. var ( - errInvalidIdentifier = berrors.MalformedError("Invalid identifier type") errNonPublic = berrors.MalformedError("Domain name does not end with a valid public suffix (TLD)") errICANNTLD = berrors.MalformedError("Domain name is an ICANN TLD") errPolicyForbidden = berrors.RejectedIdentifierError("The ACME server refuses to issue a certificate for this domain name, because it is forbidden by policy") errInvalidDNSCharacter = berrors.MalformedError("Domain name contains an invalid character") errNameTooLong = berrors.MalformedError("Domain name is longer than 253 bytes") - errIPAddress = berrors.MalformedError("The ACME server can not issue a certificate for an IP address") + errIPAddressInDNS = berrors.MalformedError("Identifier type is DNS but value is an IP address") + errIPInvalid = berrors.MalformedError("IP address is invalid") errTooManyLabels = berrors.MalformedError("Domain name has more than 10 labels (parts)") - errEmptyName = berrors.MalformedError("Domain name is empty") + errEmptyIdentifier = berrors.MalformedError("Identifier value (name) is empty") errNameEndsInDot = berrors.MalformedError("Domain name ends in a dot") errTooFewLabels = berrors.MalformedError("Domain name needs at least one dot") errLabelTooShort = berrors.MalformedError("Domain name can not have two dots in a row") @@ -197,25 +203,25 @@ var ( errMalformedWildcard = berrors.MalformedError("Domain name contains an invalid wildcard. A wildcard is only permitted before the first dot in a domain name") errICANNTLDWildcard = berrors.MalformedError("Domain name is a wildcard for an ICANN TLD") errWildcardNotSupported = berrors.MalformedError("Wildcard domain names are not supported") + errUnsupportedIdent = berrors.MalformedError("Invalid identifier type") ) -// ValidDomain checks that a domain isn't: -// -// * empty -// * prefixed with the wildcard label `*.` -// * made of invalid DNS characters -// * longer than the maxDNSIdentifierLength -// * an IPv4 or IPv6 address -// * suffixed with just "." -// * made of too many DNS labels -// * made of any invalid DNS labels -// * suffixed with something other than an IANA registered TLD -// * exactly equal to an IANA registered TLD +// validNonWildcardDomain checks that a domain isn't: +// - empty +// - prefixed with the wildcard label `*.` +// - made of invalid DNS characters +// - longer than the maxDNSIdentifierLength +// - an IPv4 or IPv6 address +// - suffixed with just "." +// - made of too many DNS labels +// - made of any invalid DNS labels +// - suffixed with something other than an IANA registered TLD +// - exactly equal to an IANA registered TLD // -// It does _not_ check that the domain isn't on any PA blocked lists. -func ValidDomain(domain string) error { +// It does NOT ensure that the domain is absent from any PA blocked lists. +func validNonWildcardDomain(domain string) error { if domain == "" { - return errEmptyName + return errEmptyIdentifier } if strings.HasPrefix(domain, "*.") { @@ -232,8 +238,9 @@ func ValidDomain(domain string) error { return errNameTooLong } - if ip := net.ParseIP(domain); ip != nil { - return errIPAddress + _, err := netip.ParseAddr(domain) + if err == nil { + return errIPAddressInDNS } if strings.HasSuffix(domain, ".") { @@ -301,6 +308,66 @@ func ValidDomain(domain string) error { return nil } +// ValidDomain checks that a domain is valid and that it doesn't contain any +// invalid wildcard characters. It does NOT ensure that the domain is absent +// from any PA blocked lists. +func ValidDomain(domain string) error { + if strings.Count(domain, "*") <= 0 { + return validNonWildcardDomain(domain) + } + + // Names containing more than one wildcard are invalid. + if strings.Count(domain, "*") > 1 { + return errTooManyWildcards + } + + // If the domain has a wildcard character, but it isn't the first most + // label of the domain name then the wildcard domain is malformed + if !strings.HasPrefix(domain, "*.") { + return errMalformedWildcard + } + + // The base domain is the wildcard request with the `*.` prefix removed + baseDomain := strings.TrimPrefix(domain, "*.") + + // Names must end in an ICANN TLD, but they must not be equal to an ICANN TLD. + icannTLD, err := iana.ExtractSuffix(baseDomain) + if err != nil { + return errNonPublic + } + // Names must have a non-wildcard label immediately adjacent to the ICANN + // TLD. No `*.com`! + if baseDomain == icannTLD { + return errICANNTLDWildcard + } + return validNonWildcardDomain(baseDomain) +} + +// ValidIP checks that an IP address: +// - isn't empty +// - is an IPv4 or IPv6 address +// - doesn't contain a scope zone (RFC 4007) +// - isn't in an IANA special-purpose address registry +// +// It does NOT ensure that the IP address is absent from any PA blocked lists. +func ValidIP(ip string) error { + if ip == "" { + return errEmptyIdentifier + } + + // Check the output of netip.Addr.String(), to ensure the input complied + // with RFC 8738, Sec. 3. ("The identifier value MUST contain the textual + // form of the address as defined in RFC 1123, Sec. 2.1 for IPv4 and in RFC + // 5952, Sec. 4 for IPv6.") ParseAddr() will accept a non-compliant but + // otherwise valid string; String() will output a compliant string. + parsedIP, err := netip.ParseAddr(ip) + if err != nil || parsedIP.WithZone("").String() != ip { + return errIPInvalid + } + + return iana.IsReservedAddr(parsedIP) +} + // forbiddenMailDomains is a map of domain names we do not allow after the // @ symbol in contact mailto addresses. These are frequently used when // copy-pasting example configurations and would not result in expiration @@ -319,109 +386,139 @@ var forbiddenMailDomains = map[string]bool{ func ValidEmail(address string) error { email, err := mail.ParseAddress(address) if err != nil { - if len(address) > 254 { - address = address[:254] + "..." - } - return berrors.InvalidEmailError("%q is not a valid e-mail address", address) + return berrors.InvalidEmailError("unable to parse email address") } - splitEmail := strings.SplitN(email.Address, "@", -1) + splitEmail := strings.Split(email.Address, "@") domain := strings.ToLower(splitEmail[len(splitEmail)-1]) - err = ValidDomain(domain) + err = validNonWildcardDomain(domain) if err != nil { - return berrors.InvalidEmailError( - "contact email %q has invalid domain : %s", - email.Address, err) + return berrors.InvalidEmailError("contact email has invalid domain: %s", err) } if forbiddenMailDomains[domain] { - return berrors.InvalidEmailError( - "invalid contact domain. Contact emails @%s are forbidden", - domain) + // We're okay including the domain in the error message here because this + // case occurs only for a small block-list of domains listed above. + return berrors.InvalidEmailError("contact email has forbidden domain %q", domain) } return nil } +// subError returns an appropriately typed error based on the input error +func subError(ident identifier.ACMEIdentifier, err error) berrors.SubBoulderError { + var bErr *berrors.BoulderError + if errors.As(err, &bErr) { + return berrors.SubBoulderError{ + Identifier: ident, + BoulderError: bErr, + } + } else { + return berrors.SubBoulderError{ + Identifier: ident, + BoulderError: &berrors.BoulderError{ + Type: berrors.RejectedIdentifier, + Detail: err.Error(), + }, + } + } +} + // WillingToIssue determines whether the CA is willing to issue for the provided -// identifier. It expects domains in id to be lowercase to prevent mismatched -// cases breaking queries. -// -// We place several criteria on identifiers we are willing to issue for: +// identifiers. // -// * MUST self-identify as DNS identifiers -// * MUST contain only bytes in the DNS hostname character set -// * MUST NOT have more than maxLabels labels -// * MUST follow the DNS hostname syntax rules in RFC 1035 and RFC 2181 -// In particular: -// * MUST NOT contain underscores -// * MUST NOT match the syntax of an IP address -// * MUST end in a public suffix -// * MUST have at least one label in addition to the public suffix -// * MUST NOT be a label-wise suffix match for a name on the block list, -// where comparison is case-independent (normalized to lower case) +// It checks the criteria checked by `WellFormedIdentifiers`, and additionally +// checks whether any identifier is on a blocklist. // -// If WillingToIssue returns an error, it will be of type MalformedRequestError -// or RejectedIdentifierError +// If multiple identifiers are invalid, the error will contain suberrors +// specific to each identifier. // -// TODO(#5816): Consider making this method private, as it has no callers -// outside of this package. -func (pa *AuthorityImpl) WillingToIssue(id identifier.ACMEIdentifier) error { - if id.Type != identifier.DNS { - return errInvalidIdentifier - } - domain := id.Value - - err := ValidDomain(domain) +// Precondition: all input identifier values must be in lowercase. +func (pa *AuthorityImpl) WillingToIssue(idents identifier.ACMEIdentifiers) error { + err := WellFormedIdentifiers(idents) if err != nil { return err } - // Require no match against hostname block lists - err = pa.checkHostLists(domain) - if err != nil { - return err - } + var subErrors []berrors.SubBoulderError + for _, ident := range idents { + if !pa.IdentifierTypeEnabled(ident.Type) { + subErrors = append(subErrors, subError(ident, berrors.RejectedIdentifierError("The ACME server has disabled this identifier type"))) + continue + } - return nil + // Wildcard DNS identifiers are checked against an additional blocklist. + if ident.Type == identifier.TypeDNS && strings.Count(ident.Value, "*") > 0 { + // The base domain is the wildcard request with the `*.` prefix removed + baseDomain := strings.TrimPrefix(ident.Value, "*.") + + // The base domain can't be in the wildcard exact blocklist + err = pa.checkWildcardBlocklist(baseDomain) + if err != nil { + subErrors = append(subErrors, subError(ident, err)) + continue + } + } + + // For all identifier types, check whether the identifier value is + // covered by the regular blocklists. + err := pa.checkBlocklists(ident) + if err != nil { + subErrors = append(subErrors, subError(ident, err)) + continue + } + } + return combineSubErrors(subErrors) } -// WillingToIssueWildcards is an extension of WillingToIssue that accepts DNS -// identifiers for well formed wildcard domains in addition to regular -// identifiers. +// WellFormedIdentifiers returns an error if any of the provided identifiers do +// not meet these criteria: // -// All provided identifiers are run through WillingToIssue and any errors are -// returned. In addition to the regular WillingToIssue checks this function -// also checks each wildcard identifier to enforce that: +// For DNS identifiers: +// - MUST contains only lowercase characters, numbers, hyphens, and dots +// - MUST NOT have more than maxLabels labels +// - MUST follow the DNS hostname syntax rules in RFC 1035 and RFC 2181 // -// * The identifier is a DNS type identifier -// * There is at most one `*` wildcard character -// * That the wildcard character is the leftmost label -// * That the wildcard label is not immediately adjacent to a top level ICANN -// TLD -// * That the wildcard wouldn't cover an exact blocklist entry (e.g. an exact -// blocklist entry for "foo.example.com" should prevent issuance for -// "*.example.com") +// In particular, DNS identifiers: +// - MUST NOT contain underscores +// - MUST NOT match the syntax of an IP address +// - MUST end in a public suffix +// - MUST have at least one label in addition to the public suffix +// - MUST NOT be a label-wise suffix match for a name on the block list, +// where comparison is case-independent (normalized to lower case) // -// If any of the identifiers are not valid then an error with suberrors specific -// to the rejected identifiers will be returned. -func (pa *AuthorityImpl) WillingToIssueWildcards(idents []identifier.ACMEIdentifier) error { +// If a DNS identifier contains a *, we additionally require: +// - There is at most one `*` wildcard character +// - That the wildcard character is the leftmost label +// - That the wildcard label is not immediately adjacent to a top level ICANN +// TLD +// +// For IP identifiers: +// - MUST match the syntax of an IP address +// - MUST NOT contain a scope zone (RFC 4007) +// - MUST NOT be in an IANA special-purpose address registry +// +// If multiple identifiers are invalid, the error will contain suberrors +// specific to each identifier. +func WellFormedIdentifiers(idents identifier.ACMEIdentifiers) error { var subErrors []berrors.SubBoulderError for _, ident := range idents { - err := pa.willingToIssueWildcard(ident) - if err != nil { - var bErr *berrors.BoulderError - if errors.As(err, &bErr) { - subErrors = append(subErrors, berrors.SubBoulderError{ - Identifier: ident, - BoulderError: bErr}) - } else { - subErrors = append(subErrors, berrors.SubBoulderError{ - Identifier: ident, - BoulderError: &berrors.BoulderError{ - Type: berrors.RejectedIdentifier, - Detail: err.Error(), - }}) + switch ident.Type { + case identifier.TypeDNS: + err := ValidDomain(ident.Value) + if err != nil { + subErrors = append(subErrors, subError(ident, err)) } + case identifier.TypeIP: + err := ValidIP(ident.Value) + if err != nil { + subErrors = append(subErrors, subError(ident, err)) + } + default: + subErrors = append(subErrors, subError(ident, errUnsupportedIdent)) } } + return combineSubErrors(subErrors) +} + +func combineSubErrors(subErrors []berrors.SubBoulderError) error { if len(subErrors) > 0 { // If there was only one error, then use it as the top level error that is // returned. @@ -447,154 +544,140 @@ func (pa *AuthorityImpl) WillingToIssueWildcards(idents []identifier.ACMEIdentif return nil } -// willingToIssueWildcard vets a single identifier. It is used by -// the plural WillingToIssueWildcards when evaluating a list of identifiers. -func (pa *AuthorityImpl) willingToIssueWildcard(ident identifier.ACMEIdentifier) error { - // We're only willing to process DNS identifiers - if ident.Type != identifier.DNS { - return errInvalidIdentifier - } - rawDomain := ident.Value - - // If there is more than one wildcard in the domain the ident is invalid - if strings.Count(rawDomain, "*") > 1 { - return errTooManyWildcards - } - - // If there is exactly one wildcard in the domain we need to do some special - // processing to ensure that it is a well formed wildcard request and to - // translate the identifier to its base domain for use with WillingToIssue - if strings.Count(rawDomain, "*") == 1 { - // If the rawDomain has a wildcard character, but it isn't the first most - // label of the domain name then the wildcard domain is malformed - if !strings.HasPrefix(rawDomain, "*.") { - return errMalformedWildcard - } - // The base domain is the wildcard request with the `*.` prefix removed - baseDomain := strings.TrimPrefix(rawDomain, "*.") - // Names must end in an ICANN TLD, but they must not be equal to an ICANN TLD. - icannTLD, err := iana.ExtractSuffix(baseDomain) - if err != nil { - return errNonPublic - } - // Names must have a non-wildcard label immediately adjacent to the ICANN - // TLD. No `*.com`! - if baseDomain == icannTLD { - return errICANNTLDWildcard - } - // The base domain can't be in the wildcard exact blocklist - err = pa.checkWildcardHostList(baseDomain) - if err != nil { - return err - } - // Check that the PA is willing to issue for the base domain - // Since the base domain without the "*." may trip the exact hostname policy - // blocklist when the "*." is removed we replace it with a single "x" - // character to differentiate "*.example.com" from "example.com" for the - // exact hostname check. - // - // NOTE(@cpu): This is pretty hackish! Boulder issue #3323[0] describes - // a better follow-up that we should land to replace this code. - // [0] https://github.com/letsencrypt/boulder/issues/3323 - return pa.WillingToIssue(identifier.ACMEIdentifier{ - Type: identifier.DNS, - Value: "x." + baseDomain, - }) - } - - return pa.WillingToIssue(ident) -} - -// checkWildcardHostList checks the wildcardExactBlocklist for a given domain. +// checkWildcardBlocklist checks the wildcardExactBlocklist for a given domain. // If the domain is not present on the list nil is returned, otherwise // errPolicyForbidden is returned. -func (pa *AuthorityImpl) checkWildcardHostList(domain string) error { +func (pa *AuthorityImpl) checkWildcardBlocklist(domain string) error { pa.blocklistMu.RLock() defer pa.blocklistMu.RUnlock() - if pa.blocklist == nil { - return fmt.Errorf("Hostname policy not yet loaded.") + if pa.wildcardFqdnBlocklist == nil { + return fmt.Errorf("identifier policy not yet loaded") } - if pa.wildcardExactBlocklist[domain] { + if pa.wildcardFqdnBlocklist[domain] { return errPolicyForbidden } return nil } -func (pa *AuthorityImpl) checkHostLists(domain string) error { +func (pa *AuthorityImpl) checkBlocklists(ident identifier.ACMEIdentifier) error { pa.blocklistMu.RLock() defer pa.blocklistMu.RUnlock() - if pa.blocklist == nil { - return fmt.Errorf("Hostname policy not yet loaded.") + if pa.domainBlocklist == nil { + return fmt.Errorf("identifier policy not yet loaded") } - labels := strings.Split(domain, ".") - for i := range labels { - joined := strings.Join(labels[i:], ".") - if pa.blocklist[joined] { - return errPolicyForbidden + switch ident.Type { + case identifier.TypeDNS: + labels := strings.Split(ident.Value, ".") + for i := range labels { + joined := strings.Join(labels[i:], ".") + if pa.domainBlocklist[joined] { + return errPolicyForbidden + } } - } - if pa.exactBlocklist[domain] { - return errPolicyForbidden + if pa.fqdnBlocklist[ident.Value] { + return errPolicyForbidden + } + case identifier.TypeIP: + ip, err := netip.ParseAddr(ident.Value) + if err != nil { + return errIPInvalid + } + for _, prefix := range pa.ipPrefixBlocklist { + if prefix.Contains(ip.WithZone("")) { + return errPolicyForbidden + } + } + default: + return errUnsupportedIdent } return nil } -// ChallengesFor makes a decision of what challenges are acceptable for -// the given identifier. -func (pa *AuthorityImpl) ChallengesFor(identifier identifier.ACMEIdentifier) ([]core.Challenge, error) { - challenges := []core.Challenge{} - - token := core.NewToken() - - // If the identifier is for a DNS wildcard name we only - // provide a DNS-01 challenge as a matter of CA policy. - if strings.HasPrefix(identifier.Value, "*.") { - // We must have the DNS-01 challenge type enabled to create challenges for - // a wildcard identifier per LE policy. - if !pa.ChallengeTypeEnabled(core.ChallengeTypeDNS01) { - return nil, fmt.Errorf( - "Challenges requested for wildcard identifier but DNS-01 " + - "challenge type is not enabled") - } - // Only provide a DNS-01-Wildcard challenge - challenges = []core.Challenge{core.DNSChallenge01(token)} - } else { - // Otherwise we collect up challenges based on what is enabled. - if pa.ChallengeTypeEnabled(core.ChallengeTypeHTTP01) { - challenges = append(challenges, core.HTTPChallenge01(token)) +// ChallengeTypesFor determines which challenge types are acceptable for the +// given identifier. This determination is made purely based on the identifier, +// and not based on which challenge types are enabled, so that challenge type +// filtering can happen dynamically at request rather than being set in stone +// at creation time. +func (pa *AuthorityImpl) ChallengeTypesFor(ident identifier.ACMEIdentifier) ([]core.AcmeChallenge, error) { + switch ident.Type { + case identifier.TypeDNS: + // If the identifier is for a DNS wildcard name we only provide DNS-01 + // or DNS-ACCOUNT-01 challenges, to comply with the BRs Sections 3.2.2.4.19 + // and 3.2.2.4.20 stating that ACME HTTP-01 and TLS-ALPN-01 are not + // suitable for validating Wildcard Domains. + if strings.HasPrefix(ident.Value, "*.") { + challenges := []core.AcmeChallenge{core.ChallengeTypeDNS01} + if features.Get().DNSAccount01Enabled { + challenges = append(challenges, core.ChallengeTypeDNSAccount01) + } + return challenges, nil } - if pa.ChallengeTypeEnabled(core.ChallengeTypeTLSALPN01) { - challenges = append(challenges, core.TLSALPNChallenge01(token)) + // Return all challenge types we support for non-wildcard DNS identifiers. + challenges := []core.AcmeChallenge{ + core.ChallengeTypeHTTP01, + core.ChallengeTypeDNS01, + core.ChallengeTypeTLSALPN01, } - - if pa.ChallengeTypeEnabled(core.ChallengeTypeDNS01) { - challenges = append(challenges, core.DNSChallenge01(token)) + if features.Get().DNSAccount01Enabled { + challenges = append(challenges, core.ChallengeTypeDNSAccount01) } + return challenges, nil + case identifier.TypeIP: + // Only HTTP-01 and TLS-ALPN-01 are suitable for IP address identifiers + // per RFC 8738, Sec. 4. + return []core.AcmeChallenge{ + core.ChallengeTypeHTTP01, + core.ChallengeTypeTLSALPN01, + }, nil + default: + // Otherwise return an error because we don't support any challenges for this + // identifier type. + return nil, fmt.Errorf("unrecognized identifier type %q", ident.Type) + } +} + +// ChallengeTypeEnabled returns whether the specified challenge type is enabled +func (pa *AuthorityImpl) ChallengeTypeEnabled(t core.AcmeChallenge) bool { + pa.blocklistMu.RLock() + defer pa.blocklistMu.RUnlock() + return pa.enabledChallenges[t] +} + +// CheckAuthzChallenges determines that an authorization was fulfilled by a +// challenge that is currently enabled and was appropriate for the kind of +// identifier in the authorization. +func (pa *AuthorityImpl) CheckAuthzChallenges(authz *core.Authorization) error { + chall, err := authz.SolvedBy() + if err != nil { + return err + } + + if !pa.ChallengeTypeEnabled(chall) { + return errors.New("authorization fulfilled by disabled challenge type") } - // We shuffle the challenges to prevent ACME clients from relying on the - // specific order that boulder returns them in. - shuffled := make([]core.Challenge, len(challenges)) + challTypes, err := pa.ChallengeTypesFor(authz.Identifier) + if err != nil { + return err + } - pa.rngMu.Lock() - defer pa.rngMu.Unlock() - for i, challIdx := range pa.pseudoRNG.Perm(len(challenges)) { - shuffled[i] = challenges[challIdx] + if !slices.Contains(challTypes, chall) { + return errors.New("authorization fulfilled by inapplicable challenge type") } - return shuffled, nil + return nil } -// ChallengeTypeEnabled returns whether the specified challenge type is enabled -func (pa *AuthorityImpl) ChallengeTypeEnabled(t core.AcmeChallenge) bool { +// IdentifierTypeEnabled returns whether the specified identifier type is enabled +func (pa *AuthorityImpl) IdentifierTypeEnabled(t identifier.IdentifierType) bool { pa.blocklistMu.RLock() defer pa.blocklistMu.RUnlock() - return pa.enabledChallenges[t] + return pa.enabledIdentifiers[t] } diff --git a/policy/pa_test.go b/policy/pa_test.go index 3b29b7c2d47..891f529fd39 100644 --- a/policy/pa_test.go +++ b/policy/pa_test.go @@ -1,120 +1,192 @@ package policy import ( - "io/ioutil" + "fmt" + "net/netip" "os" + "strings" "testing" + "gopkg.in/yaml.v3" + "github.com/letsencrypt/boulder/core" berrors "github.com/letsencrypt/boulder/errors" "github.com/letsencrypt/boulder/features" "github.com/letsencrypt/boulder/identifier" + blog "github.com/letsencrypt/boulder/log" "github.com/letsencrypt/boulder/test" - "gopkg.in/yaml.v2" ) -var enabledChallenges = map[core.AcmeChallenge]bool{ - core.ChallengeTypeHTTP01: true, - core.ChallengeTypeDNS01: true, -} - func paImpl(t *testing.T) *AuthorityImpl { - pa, err := New(enabledChallenges) + enabledChallenges := map[core.AcmeChallenge]bool{ + core.ChallengeTypeHTTP01: true, + core.ChallengeTypeDNS01: true, + core.ChallengeTypeTLSALPN01: true, + core.ChallengeTypeDNSAccount01: true, + } + + enabledIdentifiers := map[identifier.IdentifierType]bool{ + identifier.TypeDNS: true, + identifier.TypeIP: true, + } + + pa, err := New(enabledIdentifiers, enabledChallenges, blog.NewMock()) if err != nil { t.Fatalf("Couldn't create policy implementation: %s", err) } return pa } -func TestWillingToIssue(t *testing.T) { +func TestWellFormedIdentifiers(t *testing.T) { testCases := []struct { - domain string - err error + ident identifier.ACMEIdentifier + err error }{ - {``, errEmptyName}, // Empty name - {`zomb!.com`, errInvalidDNSCharacter}, // ASCII character out of range - {`emailaddress@myseriously.present.com`, errInvalidDNSCharacter}, - {`user:pass@myseriously.present.com`, errInvalidDNSCharacter}, - {`zömbo.com`, errInvalidDNSCharacter}, // non-ASCII character - {`127.0.0.1`, errIPAddress}, // IPv4 address - {`fe80::1:1`, errInvalidDNSCharacter}, // IPv6 addresses - {`[2001:db8:85a3:8d3:1319:8a2e:370:7348]`, errInvalidDNSCharacter}, // unexpected IPv6 variants - {`[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443`, errInvalidDNSCharacter}, - {`2001:db8::/32`, errInvalidDNSCharacter}, - {`a.b.c.d.e.f.g.h.i.j.k`, errTooManyLabels}, // Too many labels (>10) - - {`www.0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef012345.com`, errNameTooLong}, // Too long (254 characters) - - {`www.ef0123456789abcdef013456789abcdef012345.789abcdef012345679abcdef0123456789abcdef01234.6789abcdef0123456789abcdef0.23456789abcdef0123456789a.cdef0123456789abcdef0123456789ab.def0123456789abcdef0123456789.bcdef0123456789abcdef012345.com`, nil}, // OK, not too long (240 characters) - - {`www.abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz.com`, errLabelTooLong}, // Label too long (>63 characters) - - {`www.-ombo.com`, errInvalidDNSCharacter}, // Label starts with '-' - {`www.zomb-.com`, errInvalidDNSCharacter}, // Label ends with '-' - {`xn--.net`, errInvalidDNSCharacter}, // Label ends with '-' - {`-0b.net`, errInvalidDNSCharacter}, // First label begins with '-' - {`-0.net`, errInvalidDNSCharacter}, // First label begins with '-' - {`-.net`, errInvalidDNSCharacter}, // First label is only '-' - {`---.net`, errInvalidDNSCharacter}, // First label is only hyphens - {`0`, errTooFewLabels}, - {`1`, errTooFewLabels}, - {`*`, errInvalidDNSCharacter}, - {`**`, errInvalidDNSCharacter}, - {`*.*`, errWildcardNotSupported}, - {`zombo*com`, errInvalidDNSCharacter}, - {`*.com`, errWildcardNotSupported}, - {`*.zombo.com`, errWildcardNotSupported}, - {`..a`, errLabelTooShort}, - {`a..a`, errLabelTooShort}, - {`.a..a`, errLabelTooShort}, - {`..foo.com`, errLabelTooShort}, - {`.`, errNameEndsInDot}, - {`..`, errNameEndsInDot}, - {`a..`, errNameEndsInDot}, - {`.....`, errNameEndsInDot}, - {`.a.`, errNameEndsInDot}, - {`www.zombo.com.`, errNameEndsInDot}, - {`www.zombo_com.com`, errInvalidDNSCharacter}, - {`\uFEFF`, errInvalidDNSCharacter}, // Byte order mark - {`\uFEFFwww.zombo.com`, errInvalidDNSCharacter}, - {`www.zom\u202Ebo.com`, errInvalidDNSCharacter}, // Right-to-Left Override - {`\u202Ewww.zombo.com`, errInvalidDNSCharacter}, - {`www.zom\u200Fbo.com`, errInvalidDNSCharacter}, // Right-to-Left Mark - {`\u200Fwww.zombo.com`, errInvalidDNSCharacter}, + // Invalid identifier types + {identifier.ACMEIdentifier{}, errUnsupportedIdent}, // Empty identifier type + {identifier.ACMEIdentifier{Type: "fnord", Value: "uh-oh, Spaghetti-Os[tm]"}, errUnsupportedIdent}, + + // Empty identifier values + {identifier.NewDNS(``), errEmptyIdentifier}, // Empty DNS identifier + {identifier.ACMEIdentifier{Type: "ip"}, errEmptyIdentifier}, // Empty IP identifier + + // DNS follies + + {identifier.NewDNS(`zomb!.com`), errInvalidDNSCharacter}, // ASCII character out of range + {identifier.NewDNS(`emailaddress@myseriously.present.com`), errInvalidDNSCharacter}, + {identifier.NewDNS(`user:pass@myseriously.present.com`), errInvalidDNSCharacter}, + {identifier.NewDNS(`zömbo.com`), errInvalidDNSCharacter}, // non-ASCII character + {identifier.NewDNS(`127.0.0.1`), errIPAddressInDNS}, // IPv4 address + {identifier.NewDNS(`fe80::1:1`), errInvalidDNSCharacter}, // IPv6 address + {identifier.NewDNS(`[2001:db8:85a3:8d3:1319:8a2e:370:7348]`), errInvalidDNSCharacter}, // unexpected IPv6 variants + {identifier.NewDNS(`[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443`), errInvalidDNSCharacter}, + {identifier.NewDNS(`2001:db8::/32`), errInvalidDNSCharacter}, + {identifier.NewDNS(`a.b.c.d.e.f.g.h.i.j.k`), errTooManyLabels}, // Too many labels (>10) + + {identifier.NewDNS(`www.0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef012345.com`), errNameTooLong}, // Too long (254 characters) + + {identifier.NewDNS(`www.ef0123456789abcdef013456789abcdef012345.789abcdef012345679abcdef0123456789abcdef01234.6789abcdef0123456789abcdef0.23456789abcdef0123456789a.cdef0123456789abcdef0123456789ab.def0123456789abcdef0123456789.bcdef0123456789abcdef012345.com`), nil}, // OK, not too long (240 characters) + + {identifier.NewDNS(`www.abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz.com`), errLabelTooLong}, // Label too long (>63 characters) + + {identifier.NewDNS(`www.-ombo.com`), errInvalidDNSCharacter}, // Label starts with '-' + {identifier.NewDNS(`www.zomb-.com`), errInvalidDNSCharacter}, // Label ends with '-' + {identifier.NewDNS(`xn--.net`), errInvalidDNSCharacter}, // Label ends with '-' + {identifier.NewDNS(`-0b.net`), errInvalidDNSCharacter}, // First label begins with '-' + {identifier.NewDNS(`-0.net`), errInvalidDNSCharacter}, // First label begins with '-' + {identifier.NewDNS(`-.net`), errInvalidDNSCharacter}, // First label is only '-' + {identifier.NewDNS(`---.net`), errInvalidDNSCharacter}, // First label is only hyphens + {identifier.NewDNS(`0`), errTooFewLabels}, + {identifier.NewDNS(`1`), errTooFewLabels}, + {identifier.NewDNS(`*`), errMalformedWildcard}, + {identifier.NewDNS(`**`), errTooManyWildcards}, + {identifier.NewDNS(`*.*`), errTooManyWildcards}, + {identifier.NewDNS(`zombo*com`), errMalformedWildcard}, + {identifier.NewDNS(`*.com`), errICANNTLDWildcard}, + {identifier.NewDNS(`..a`), errLabelTooShort}, + {identifier.NewDNS(`a..a`), errLabelTooShort}, + {identifier.NewDNS(`.a..a`), errLabelTooShort}, + {identifier.NewDNS(`..foo.com`), errLabelTooShort}, + {identifier.NewDNS(`.`), errNameEndsInDot}, + {identifier.NewDNS(`..`), errNameEndsInDot}, + {identifier.NewDNS(`a..`), errNameEndsInDot}, + {identifier.NewDNS(`.....`), errNameEndsInDot}, + {identifier.NewDNS(`.a.`), errNameEndsInDot}, + {identifier.NewDNS(`www.zombo.com.`), errNameEndsInDot}, + {identifier.NewDNS(`www.zombo_com.com`), errInvalidDNSCharacter}, + {identifier.NewDNS(`\uFEFF`), errInvalidDNSCharacter}, // Byte order mark + {identifier.NewDNS(`\uFEFFwww.zombo.com`), errInvalidDNSCharacter}, + {identifier.NewDNS(`www.zom\u202Ebo.com`), errInvalidDNSCharacter}, // Right-to-Left Override + {identifier.NewDNS(`\u202Ewww.zombo.com`), errInvalidDNSCharacter}, + {identifier.NewDNS(`www.zom\u200Fbo.com`), errInvalidDNSCharacter}, // Right-to-Left Mark + {identifier.NewDNS(`\u200Fwww.zombo.com`), errInvalidDNSCharacter}, // Underscores are technically disallowed in DNS. Some DNS // implementations accept them but we will be conservative. - {`www.zom_bo.com`, errInvalidDNSCharacter}, - {`zombocom`, errTooFewLabels}, - {`localhost`, errTooFewLabels}, - {`mail`, errTooFewLabels}, + {identifier.NewDNS(`www.zom_bo.com`), errInvalidDNSCharacter}, + {identifier.NewDNS(`zombocom`), errTooFewLabels}, + {identifier.NewDNS(`localhost`), errTooFewLabels}, + {identifier.NewDNS(`mail`), errTooFewLabels}, // disallow capitalized letters for #927 - {`CapitalizedLetters.com`, errInvalidDNSCharacter}, + {identifier.NewDNS(`CapitalizedLetters.com`), errInvalidDNSCharacter}, - {`example.acting`, errNonPublic}, - {`example.internal`, errNonPublic}, + {identifier.NewDNS(`example.acting`), errNonPublic}, + {identifier.NewDNS(`example.internal`), errNonPublic}, // All-numeric final label not okay. - {`www.zombo.163`, errNonPublic}, - {`xn--109-3veba6djs1bfxlfmx6c9g.xn--f1awi.xn--p1ai`, errMalformedIDN}, // Not in Unicode NFC - {`bq--abwhky3f6fxq.jakacomo.com`, errInvalidRLDH}, + {identifier.NewDNS(`www.zombo.163`), errNonPublic}, + {identifier.NewDNS(`xn--109-3veba6djs1bfxlfmx6c9g.xn--f1awi.xn--p1ai`), errMalformedIDN}, // Not in Unicode NFC + {identifier.NewDNS(`bq--abwhky3f6fxq.jakacomo.com`), errInvalidRLDH}, // Three hyphens starting at third second char of first label. - {`bq---abwhky3f6fxq.jakacomo.com`, errInvalidRLDH}, + {identifier.NewDNS(`bq---abwhky3f6fxq.jakacomo.com`), errInvalidRLDH}, // Three hyphens starting at second char of first label. - {`h---test.hk2yz.org`, errInvalidRLDH}, + {identifier.NewDNS(`h---test.hk2yz.org`), errInvalidRLDH}, + {identifier.NewDNS(`co.uk`), errICANNTLD}, + {identifier.NewDNS(`foo.er`), errICANNTLD}, + + // IP oopsies + + {identifier.ACMEIdentifier{Type: "ip", Value: `zombo.com`}, errIPInvalid}, // That's DNS! + + // Unexpected IPv4 variants + {identifier.ACMEIdentifier{Type: "ip", Value: `192.168.1.1.1`}, errIPInvalid}, // extra octet + {identifier.ACMEIdentifier{Type: "ip", Value: `192.168.1.256`}, errIPInvalid}, // octet out of range + {identifier.ACMEIdentifier{Type: "ip", Value: `192.168.1.a1`}, errIPInvalid}, // character out of range + {identifier.ACMEIdentifier{Type: "ip", Value: `192.168.1.0/24`}, errIPInvalid}, // with CIDR + {identifier.ACMEIdentifier{Type: "ip", Value: `192.168.1.1:443`}, errIPInvalid}, // with port + {identifier.ACMEIdentifier{Type: "ip", Value: `0xc0a80101`}, errIPInvalid}, // as hex + {identifier.ACMEIdentifier{Type: "ip", Value: `1.1.168.192.in-addr.arpa`}, errIPInvalid}, // reverse DNS + + // Unexpected IPv6 variants + {identifier.ACMEIdentifier{Type: "ip", Value: `2602:80a:6000:abad:cafe::1%lo`}, errIPInvalid}, // scope zone (RFC 4007) + {identifier.ACMEIdentifier{Type: "ip", Value: `2602:80a:6000:abad:cafe::1%`}, errIPInvalid}, // empty scope zone (RFC 4007) + {identifier.ACMEIdentifier{Type: "ip", Value: `3fff:aaa:a:c0ff:ee:a:bad:deed:ffff`}, errIPInvalid}, // extra octet + {identifier.ACMEIdentifier{Type: "ip", Value: `3fff:aaa:a:c0ff:ee:a:bad:mead`}, errIPInvalid}, // character out of range + {identifier.ACMEIdentifier{Type: "ip", Value: `2001:db8::/32`}, errIPInvalid}, // with CIDR + {identifier.ACMEIdentifier{Type: "ip", Value: `[3fff:aaa:a:c0ff:ee:a:bad:deed]`}, errIPInvalid}, // in brackets + {identifier.ACMEIdentifier{Type: "ip", Value: `[3fff:aaa:a:c0ff:ee:a:bad:deed]:443`}, errIPInvalid}, // in brackets, with port + {identifier.ACMEIdentifier{Type: "ip", Value: `0x3fff0aaa000ac0ff00ee000a0baddeed`}, errIPInvalid}, // as hex + {identifier.ACMEIdentifier{Type: "ip", Value: `d.e.e.d.d.a.b.0.a.0.0.0.e.e.0.0.f.f.0.c.a.0.0.0.a.a.a.0.f.f.f.3.ip6.arpa`}, errIPInvalid}, // reverse DNS + {identifier.ACMEIdentifier{Type: "ip", Value: `3fff:0aaa:a:c0ff:ee:a:bad:deed`}, errIPInvalid}, // leading 0 in 2nd octet (RFC 5952, Sec. 4.1) + {identifier.ACMEIdentifier{Type: "ip", Value: `3fff:aaa:0:0:0:a:bad:deed`}, errIPInvalid}, // lone 0s in 3rd-5th octets, :: not used (RFC 5952, Sec. 4.2.1) + {identifier.ACMEIdentifier{Type: "ip", Value: `3fff:aaa::c0ff:ee:a:bad:deed`}, errIPInvalid}, // :: used for just one empty octet (RFC 5952, Sec. 4.2.2) + {identifier.ACMEIdentifier{Type: "ip", Value: `3fff:aaa::ee:0:0:0`}, errIPInvalid}, // :: used for the shorter of two possible collapses (RFC 5952, Sec. 4.2.3) + {identifier.ACMEIdentifier{Type: "ip", Value: `fe80:0:0:0:a::`}, errIPInvalid}, // :: used for the last of two possible equal-length collapses (RFC 5952, Sec. 4.2.3) + {identifier.ACMEIdentifier{Type: "ip", Value: `3fff:aaa:a:C0FF:EE:a:bad:deed`}, errIPInvalid}, // alpha characters capitalized (RFC 5952, Sec. 4.3) + {identifier.ACMEIdentifier{Type: "ip", Value: `::ffff:192.168.1.1`}, berrors.MalformedError("IP address is in a reserved address block")}, // IPv6-encapsulated IPv4 + + // IANA special-purpose address blocks + {identifier.NewIP(netip.MustParseAddr("192.0.2.129")), berrors.MalformedError("IP address is in a reserved address block")}, // Documentation (TEST-NET-1) + {identifier.NewIP(netip.MustParseAddr("2001:db8:eee:eeee:eeee:eeee:d01:f1")), berrors.MalformedError("IP address is in a reserved address block")}, // Documentation } - shouldBeTLDError := []string{ - `co.uk`, - `foo.bd`, + // Test syntax errors + for _, tc := range testCases { + err := WellFormedIdentifiers(identifier.ACMEIdentifiers{tc.ident}) + if tc.err == nil { + test.AssertNil(t, err, fmt.Sprintf("Unexpected error for %q identifier %q, got %s", tc.ident.Type, tc.ident.Value, err)) + } else { + test.AssertError(t, err, fmt.Sprintf("Expected error for %q identifier %q, but got none", tc.ident.Type, tc.ident.Value)) + var berr *berrors.BoulderError + test.AssertErrorWraps(t, err, &berr) + test.AssertContains(t, berr.Error(), tc.err.Error()) + } } +} - shouldBeBlocked := []string{ - `highvalue.website1.org`, - `website2.co.uk`, - `www.website3.com`, - `lots.of.labels.website4.com`, - `banned.in.dc.com`, - `bad.brains.banned.in.dc.com`, +func TestWillingToIssue(t *testing.T) { + shouldBeBlocked := identifier.ACMEIdentifiers{ + identifier.NewDNS(`highvalue.website1.org`), + identifier.NewDNS(`website2.co.uk`), + identifier.NewDNS(`www.website3.com`), + identifier.NewDNS(`lots.of.labels.website4.com`), + identifier.NewDNS(`banned.in.dc.com`), + identifier.NewDNS(`bad.brains.banned.in.dc.com`), + identifier.NewIP(netip.MustParseAddr(`64.112.117.66`)), + identifier.NewIP(netip.MustParseAddr(`2602:80a:6000:666::1`)), + identifier.NewIP(netip.MustParseAddr(`2602:80a:6000:666::1%lo`)), + identifier.NewIP(netip.MustParseAddr(`ff00::1`)), + identifier.NewIP(netip.MustParseAddr(`ff10::1`)), + identifier.NewIP(netip.MustParseAddr(`ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff`)), } blocklistContents := []string{ `website2.com`, @@ -128,93 +200,81 @@ func TestWillingToIssue(t *testing.T) { `highvalue.website1.org`, `dl.website1.org`, } - adminBlockedContents := []string{ + adminBlockedNamesContents := []string{ `banned.in.dc.com`, } + adminBlockedPrefixesContents := []string{ + `64.112.117.66/32`, + `224.0.0.0/4`, + `2602:80a:6000:666::/64`, + `ff00::/8`, + } - shouldBeAccepted := []string{ - `lowvalue.website1.org`, - `website4.sucks`, - "www.unrelated.com", - "unrelated.com", - "www.8675309.com", - "8675309.com", - "web5ite2.com", - "www.web-site2.com", + shouldBeAccepted := identifier.ACMEIdentifiers{ + identifier.NewDNS(`lowvalue.website1.org`), + identifier.NewDNS(`website4.sucks`), + identifier.NewDNS(`www.unrelated.com`), + identifier.NewDNS(`unrelated.com`), + identifier.NewDNS(`www.8675309.com`), + identifier.NewDNS(`8675309.com`), + identifier.NewDNS(`web5ite2.com`), + identifier.NewDNS(`www.web-site2.com`), + identifier.NewDNS(`www.highvalue.website1.org`), + identifier.NewIP(netip.MustParseAddr(`64.112.117.67`)), + identifier.NewIP(netip.MustParseAddr(`2620:fe::fe`)), + identifier.NewIP(netip.MustParseAddr(`2602:80a:6000:667::`)), } - policy := blockedNamesPolicy{ + policy := blockedIdentsPolicy{ HighRiskBlockedNames: blocklistContents, ExactBlockedNames: exactBlocklistContents, - AdminBlockedNames: adminBlockedContents, + AdminBlockedNames: adminBlockedNamesContents, + AdminBlockedPrefixes: adminBlockedPrefixesContents, } yamlPolicyBytes, err := yaml.Marshal(policy) test.AssertNotError(t, err, "Couldn't YAML serialize blocklist") - yamlPolicyFile, _ := ioutil.TempFile("", "test-blocklist.*.yaml") + yamlPolicyFile, _ := os.CreateTemp("", "test-blocklist.*.yaml") defer os.Remove(yamlPolicyFile.Name()) - err = ioutil.WriteFile(yamlPolicyFile.Name(), yamlPolicyBytes, 0640) + err = os.WriteFile(yamlPolicyFile.Name(), yamlPolicyBytes, 0640) test.AssertNotError(t, err, "Couldn't write YAML blocklist") pa := paImpl(t) - err = pa.SetHostnamePolicyFile(yamlPolicyFile.Name()) + err = pa.LoadIdentPolicyFile(yamlPolicyFile.Name()) test.AssertNotError(t, err, "Couldn't load rules") - // Test for invalid identifier type - ident := identifier.ACMEIdentifier{Type: "ip", Value: "example.com"} - err = pa.WillingToIssue(ident) - if err != errInvalidIdentifier { - t.Error("Identifier was not correctly forbidden: ", ident) - } - - // Test syntax errors - for _, tc := range testCases { - ident := identifier.DNSIdentifier(tc.domain) - err := pa.WillingToIssue(ident) - if err != tc.err { - t.Errorf("WillingToIssue(%q) = %q, expected %q", tc.domain, err, tc.err) - } - } - // Invalid encoding - err = pa.WillingToIssue(identifier.DNSIdentifier("www.xn--m.com")) + err = pa.WillingToIssue(identifier.ACMEIdentifiers{identifier.NewDNS("www.xn--m.com")}) test.AssertError(t, err, "WillingToIssue didn't fail on a malformed IDN") + // Invalid identifier type + err = pa.WillingToIssue(identifier.ACMEIdentifiers{identifier.ACMEIdentifier{Type: "fnord", Value: "uh-oh, Spaghetti-Os[tm]"}}) + test.AssertError(t, err, "WillingToIssue didn't fail on an invalid identifier type") // Valid encoding - err = pa.WillingToIssue(identifier.DNSIdentifier("www.xn--mnich-kva.com")) + err = pa.WillingToIssue(identifier.ACMEIdentifiers{identifier.NewDNS("www.xn--mnich-kva.com")}) test.AssertNotError(t, err, "WillingToIssue failed on a properly formed IDN") // IDN TLD - err = pa.WillingToIssue(identifier.DNSIdentifier("xn--example--3bhk5a.xn--p1ai")) + err = pa.WillingToIssue(identifier.ACMEIdentifiers{identifier.NewDNS("xn--example--3bhk5a.xn--p1ai")}) test.AssertNotError(t, err, "WillingToIssue failed on a properly formed domain with IDN TLD") features.Reset() - // Test domains that are equal to public suffixes - for _, domain := range shouldBeTLDError { - ident := identifier.DNSIdentifier(domain) - err := pa.WillingToIssue(ident) - if err != errICANNTLD { - t.Error("Identifier was not correctly forbidden: ", ident, err) - } - } - - // Test expected blocked domains - for _, domain := range shouldBeBlocked { - ident := identifier.DNSIdentifier(domain) - err := pa.WillingToIssue(ident) - if err != errPolicyForbidden { - t.Error("Identifier was not correctly forbidden: ", ident, err) - } + // Test expected blocked identifiers + for _, ident := range shouldBeBlocked { + err := pa.WillingToIssue(identifier.ACMEIdentifiers{ident}) + test.AssertError(t, err, "identifier was not correctly forbidden") + var berr *berrors.BoulderError + test.AssertErrorWraps(t, err, &berr) + test.AssertContains(t, berr.Detail, errPolicyForbidden.Error()) } - // Test acceptance of good names - for _, domain := range shouldBeAccepted { - ident := identifier.DNSIdentifier(domain) - err := pa.WillingToIssue(ident) - test.AssertNotError(t, err, "identiier was incorrectly forbidden") + // Test acceptance of good identifiers + for _, ident := range shouldBeAccepted { + err := pa.WillingToIssue(identifier.ACMEIdentifiers{ident}) + test.AssertNotError(t, err, "identifier was incorrectly forbidden") } } -func TestWillingToIssueWildcard(t *testing.T) { +func TestWillingToIssue_Wildcards(t *testing.T) { bannedDomains := []string{ "zombo.gov.us", } @@ -223,205 +283,302 @@ func TestWillingToIssueWildcard(t *testing.T) { } pa := paImpl(t) - bannedBytes, err := yaml.Marshal(blockedNamesPolicy{ + bannedBytes, err := yaml.Marshal(blockedIdentsPolicy{ HighRiskBlockedNames: bannedDomains, ExactBlockedNames: exactBannedDomains, }) test.AssertNotError(t, err, "Couldn't serialize banned list") - f, _ := ioutil.TempFile("", "test-wildcard-banlist.*.yaml") + f, _ := os.CreateTemp("", "test-wildcard-banlist.*.yaml") defer os.Remove(f.Name()) - err = ioutil.WriteFile(f.Name(), bannedBytes, 0640) + err = os.WriteFile(f.Name(), bannedBytes, 0640) test.AssertNotError(t, err, "Couldn't write serialized banned list to file") - err = pa.SetHostnamePolicyFile(f.Name()) + err = pa.LoadIdentPolicyFile(f.Name()) test.AssertNotError(t, err, "Couldn't load policy contents from file") testCases := []struct { Name string - Ident identifier.ACMEIdentifier + Domain string ExpectedErr error }{ - { - Name: "Non-DNS identifier", - Ident: identifier.ACMEIdentifier{Type: "nickname", Value: "cpu"}, - ExpectedErr: errInvalidIdentifier, - }, { Name: "Too many wildcards", - Ident: identifier.DNSIdentifier("ok.*.whatever.*.example.com"), + Domain: "ok.*.whatever.*.example.com", ExpectedErr: errTooManyWildcards, }, { Name: "Misplaced wildcard", - Ident: identifier.DNSIdentifier("ok.*.whatever.example.com"), + Domain: "ok.*.whatever.example.com", ExpectedErr: errMalformedWildcard, }, { Name: "Missing ICANN TLD", - Ident: identifier.DNSIdentifier("*.ok.madeup"), + Domain: "*.ok.madeup", ExpectedErr: errNonPublic, }, { Name: "Wildcard for ICANN TLD", - Ident: identifier.DNSIdentifier("*.com"), + Domain: "*.com", ExpectedErr: errICANNTLDWildcard, }, { Name: "Forbidden base domain", - Ident: identifier.DNSIdentifier("*.zombo.gov.us"), + Domain: "*.zombo.gov.us", ExpectedErr: errPolicyForbidden, }, // We should not allow getting a wildcard for that would cover an exact // blocklist domain { Name: "Wildcard for ExactBlocklist base domain", - Ident: identifier.DNSIdentifier("*.letsdecrypt.org"), + Domain: "*.letsdecrypt.org", ExpectedErr: errPolicyForbidden, }, // We should allow a wildcard for a domain that doesn't match the exact // blocklist domain { Name: "Wildcard for non-matching subdomain of ExactBlocklist domain", - Ident: identifier.DNSIdentifier("*.lowvalue.letsdecrypt.org"), + Domain: "*.lowvalue.letsdecrypt.org", ExpectedErr: nil, }, // We should allow getting a wildcard for an exact blocklist domain since it // only covers subdomains, not the exact name. { Name: "Wildcard for ExactBlocklist domain", - Ident: identifier.DNSIdentifier("*.highvalue.letsdecrypt.org"), + Domain: "*.highvalue.letsdecrypt.org", ExpectedErr: nil, }, { Name: "Valid wildcard domain", - Ident: identifier.DNSIdentifier("*.everything.is.possible.at.zombo.com"), + Domain: "*.everything.is.possible.at.zombo.com", ExpectedErr: nil, }, } for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - result := pa.willingToIssueWildcard(tc.Ident) - test.AssertEquals(t, result, tc.ExpectedErr) + err := pa.WillingToIssue(identifier.ACMEIdentifiers{identifier.NewDNS(tc.Domain)}) + if tc.ExpectedErr == nil { + test.AssertNil(t, err, fmt.Sprintf("Unexpected error for domain %q, got %s", tc.Domain, err)) + } else { + test.AssertError(t, err, fmt.Sprintf("Expected error for domain %q, but got none", tc.Domain)) + var berr *berrors.BoulderError + test.AssertErrorWraps(t, err, &berr) + test.AssertContains(t, berr.Error(), tc.ExpectedErr.Error()) + } }) } } -// TestWillingToIssueWildcards tests that more than one rejected identifier +// TestWillingToIssue_SubErrors tests that more than one rejected identifier // results in an error with suberrors. -func TestWillingToIssueWildcards(t *testing.T) { +func TestWillingToIssue_SubErrors(t *testing.T) { banned := []string{ "letsdecrypt.org", + "example.com", } pa := paImpl(t) - bannedBytes, err := yaml.Marshal(blockedNamesPolicy{ + bannedBytes, err := yaml.Marshal(blockedIdentsPolicy{ HighRiskBlockedNames: banned, ExactBlockedNames: banned, }) test.AssertNotError(t, err, "Couldn't serialize banned list") - f, _ := ioutil.TempFile("", "test-wildcard-banlist.*.yaml") + f, _ := os.CreateTemp("", "test-wildcard-banlist.*.yaml") defer os.Remove(f.Name()) - err = ioutil.WriteFile(f.Name(), bannedBytes, 0640) + err = os.WriteFile(f.Name(), bannedBytes, 0640) test.AssertNotError(t, err, "Couldn't write serialized banned list to file") - err = pa.SetHostnamePolicyFile(f.Name()) + err = pa.LoadIdentPolicyFile(f.Name()) test.AssertNotError(t, err, "Couldn't load policy contents from file") - idents := []identifier.ACMEIdentifier{ - identifier.DNSIdentifier("perfectly-fine.com"), - identifier.DNSIdentifier("letsdecrypt.org"), - identifier.DNSIdentifier("ok.*.this.is.a.*.weird.one.com"), - identifier.DNSIdentifier("also-perfectly-fine.com"), - } + // Test multiple malformed domains and one banned domain; only the malformed ones will generate errors + err = pa.WillingToIssue(identifier.ACMEIdentifiers{ + identifier.NewDNS("perfectly-fine.com"), // fine + identifier.NewDNS("letsdecrypt_org"), // malformed + identifier.NewDNS("example.comm"), // malformed + identifier.NewDNS("letsdecrypt.org"), // banned + identifier.NewDNS("also-perfectly-fine.com"), // fine + }) + test.AssertDeepEquals(t, err, + &berrors.BoulderError{ + Type: berrors.RejectedIdentifier, + Detail: "Cannot issue for \"letsdecrypt_org\": Domain name contains an invalid character (and 1 more problems. Refer to sub-problems for more information.)", + SubErrors: []berrors.SubBoulderError{ + { + BoulderError: &berrors.BoulderError{ + Type: berrors.Malformed, + Detail: "Domain name contains an invalid character", + }, + Identifier: identifier.NewDNS("letsdecrypt_org"), + }, + { + BoulderError: &berrors.BoulderError{ + Type: berrors.Malformed, + Detail: "Domain name does not end with a valid public suffix (TLD)", + }, + Identifier: identifier.NewDNS("example.comm"), + }, + }, + }) - err = pa.WillingToIssueWildcards(idents) + // Test multiple banned domains. + err = pa.WillingToIssue(identifier.ACMEIdentifiers{ + identifier.NewDNS("perfectly-fine.com"), // fine + identifier.NewDNS("letsdecrypt.org"), // banned + identifier.NewDNS("example.com"), // banned + identifier.NewDNS("also-perfectly-fine.com"), // fine + }) test.AssertError(t, err, "Expected err from WillingToIssueWildcards") - var berr *berrors.BoulderError - test.AssertErrorWraps(t, err, &berr) - test.AssertEquals(t, len(berr.SubErrors), 2) - test.AssertEquals(t, berr.Error(), "Cannot issue for \"letsdecrypt.org\": The ACME server refuses to issue a certificate for this domain name, because it is forbidden by policy (and 1 more problems. Refer to sub-problems for more information.)") - - subErrMap := make(map[string]berrors.SubBoulderError, len(berr.SubErrors)) - - for _, subErr := range berr.SubErrors { - subErrMap[subErr.Identifier.Value] = subErr - } - - subErrA, foundA := subErrMap["letsdecrypt.org"] - subErrB, foundB := subErrMap["ok.*.this.is.a.*.weird.one.com"] - test.AssertEquals(t, foundA, true) - test.AssertEquals(t, foundB, true) - - test.AssertEquals(t, subErrA.Type, berrors.RejectedIdentifier) - test.AssertEquals(t, subErrB.Type, berrors.Malformed) + test.AssertDeepEquals(t, err, + &berrors.BoulderError{ + Type: berrors.RejectedIdentifier, + Detail: "Cannot issue for \"letsdecrypt.org\": The ACME server refuses to issue a certificate for this domain name, because it is forbidden by policy (and 1 more problems. Refer to sub-problems for more information.)", + SubErrors: []berrors.SubBoulderError{ + { + BoulderError: &berrors.BoulderError{ + Type: berrors.RejectedIdentifier, + Detail: "The ACME server refuses to issue a certificate for this domain name, because it is forbidden by policy", + }, + Identifier: identifier.NewDNS("letsdecrypt.org"), + }, + { + BoulderError: &berrors.BoulderError{ + Type: berrors.RejectedIdentifier, + Detail: "The ACME server refuses to issue a certificate for this domain name, because it is forbidden by policy", + }, + Identifier: identifier.NewDNS("example.com"), + }, + }, + }) // Test willing to issue with only *one* bad identifier. - err = pa.WillingToIssueWildcards([]identifier.ACMEIdentifier{ - identifier.DNSIdentifier("letsdecrypt.org"), - }) - // It should error - test.AssertError(t, err, "Expected err from WillingToIssueWildcards") - - test.AssertErrorWraps(t, err, &berr) - // There should be *no* suberrors because there was only one error overall. - test.AssertEquals(t, len(berr.SubErrors), 0) - test.AssertEquals(t, berr.Error(), "Cannot issue for \"letsdecrypt.org\": The ACME server refuses to issue a certificate for this domain name, because it is forbidden by policy") + err = pa.WillingToIssue(identifier.ACMEIdentifiers{identifier.NewDNS("letsdecrypt.org")}) + test.AssertDeepEquals(t, err, + &berrors.BoulderError{ + Type: berrors.RejectedIdentifier, + Detail: "Cannot issue for \"letsdecrypt.org\": The ACME server refuses to issue a certificate for this domain name, because it is forbidden by policy", + }) } -func TestChallengesFor(t *testing.T) { +func TestChallengeTypesFor(t *testing.T) { + t.Parallel() pa := paImpl(t) - challenges, err := pa.ChallengesFor(identifier.ACMEIdentifier{}) - test.AssertNotError(t, err, "ChallengesFor failed") - - test.Assert(t, len(challenges) == len(enabledChallenges), "Wrong number of challenges returned") - - seenChalls := make(map[core.AcmeChallenge]bool) - for _, challenge := range challenges { - test.Assert(t, !seenChalls[challenge.Type], "should not already have seen this type") - seenChalls[challenge.Type] = true - - test.Assert(t, enabledChallenges[challenge.Type], "Unsupported challenge returned") - } - test.AssertEquals(t, len(seenChalls), len(enabledChallenges)) - -} + t.Run("DNSAccount01Enabled=true", func(t *testing.T) { + features.Set(features.Config{DNSAccount01Enabled: true}) + t.Cleanup(features.Reset) + + testCases := []struct { + name string + ident identifier.ACMEIdentifier + wantChalls []core.AcmeChallenge + wantErr string + }{ + { + name: "dns", + ident: identifier.NewDNS("example.com"), + wantChalls: []core.AcmeChallenge{ + core.ChallengeTypeHTTP01, + core.ChallengeTypeDNS01, + core.ChallengeTypeTLSALPN01, + core.ChallengeTypeDNSAccount01, + }, + }, + { + name: "dns wildcard", + ident: identifier.NewDNS("*.example.com"), + wantChalls: []core.AcmeChallenge{ + core.ChallengeTypeDNS01, + core.ChallengeTypeDNSAccount01, + }, + }, + { + name: "ip", + ident: identifier.NewIP(netip.MustParseAddr("1.2.3.4")), + wantChalls: []core.AcmeChallenge{ + core.ChallengeTypeHTTP01, core.ChallengeTypeTLSALPN01, + }, + }, + { + name: "invalid", + ident: identifier.ACMEIdentifier{Type: "fnord", Value: "uh-oh, Spaghetti-Os[tm]"}, + wantErr: "unrecognized identifier type", + }, + } -func TestChallengesForWildcard(t *testing.T) { - // wildcardIdent is an identifier for a wildcard domain name - wildcardIdent := identifier.ACMEIdentifier{ - Type: identifier.DNS, - Value: "*.zombo.com", - } + for _, tc := range testCases { + tc := tc // Capture range variable + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + challs, err := pa.ChallengeTypesFor(tc.ident) + + if len(tc.wantChalls) != 0 { + test.AssertNotError(t, err, "should have succeeded") + test.AssertDeepEquals(t, challs, tc.wantChalls) + } + + if tc.wantErr != "" { + test.AssertError(t, err, "should have errored") + test.AssertContains(t, err.Error(), tc.wantErr) + } + }) + } + }) - mustConstructPA := func(t *testing.T, enabledChallenges map[core.AcmeChallenge]bool) *AuthorityImpl { - pa, err := New(enabledChallenges) - test.AssertNotError(t, err, "Couldn't create policy implementation") - return pa - } + t.Run("DNSAccount01Enabled=false", func(t *testing.T) { + features.Set(features.Config{DNSAccount01Enabled: false}) + t.Cleanup(features.Reset) + + testCases := []struct { + name string + ident identifier.ACMEIdentifier + wantChalls []core.AcmeChallenge + wantErr string + }{ + { + name: "dns", + ident: identifier.NewDNS("example.com"), + wantChalls: []core.AcmeChallenge{ + core.ChallengeTypeHTTP01, + core.ChallengeTypeDNS01, + core.ChallengeTypeTLSALPN01, + // DNSAccount01 excluded + }, + }, + { + name: "wildcard", + ident: identifier.NewDNS("*.example.com"), + wantChalls: []core.AcmeChallenge{ + core.ChallengeTypeDNS01, + // DNSAccount01 excluded + }, + }, + { + name: "ip", + ident: identifier.NewIP(netip.MustParseAddr("1.2.3.4")), + wantChalls: []core.AcmeChallenge{ + core.ChallengeTypeHTTP01, core.ChallengeTypeTLSALPN01, + }, + }, + } - // First try to get a challenge for the wildcard ident without the - // DNS-01 challenge type enabled. This should produce an error - var enabledChallenges = map[core.AcmeChallenge]bool{ - core.ChallengeTypeHTTP01: true, - core.ChallengeTypeDNS01: false, - } - pa := mustConstructPA(t, enabledChallenges) - _, err := pa.ChallengesFor(wildcardIdent) - test.AssertError(t, err, "ChallengesFor did not error for a wildcard ident "+ - "when DNS-01 was disabled") - test.AssertEquals(t, err.Error(), "Challenges requested for wildcard "+ - "identifier but DNS-01 challenge type is not enabled") - - // Try again with DNS-01 enabled. It should not error and - // should return only one DNS-01 type challenge - enabledChallenges[core.ChallengeTypeDNS01] = true - pa = mustConstructPA(t, enabledChallenges) - challenges, err := pa.ChallengesFor(wildcardIdent) - test.AssertNotError(t, err, "ChallengesFor errored for a wildcard ident "+ - "unexpectedly") - test.AssertEquals(t, len(challenges), 1) - test.AssertEquals(t, challenges[0].Type, core.ChallengeTypeDNS01) + for _, tc := range testCases { + tc := tc // Capture range variable + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + challs, err := pa.ChallengeTypesFor(tc.ident) + + if len(tc.wantChalls) != 0 { + test.AssertNotError(t, err, "should have succeeded") + test.AssertDeepEquals(t, challs, tc.wantChalls) + } + + if tc.wantErr != "" { + test.AssertError(t, err, "should have errored") + test.AssertContains(t, err.Error(), tc.wantErr) + } + }) + } + }) } // TestMalformedExactBlocklist tests that loading a YAML policy file with an @@ -438,36 +595,222 @@ func TestMalformedExactBlocklist(t *testing.T) { } // Create YAML for the exactBannedDomains - bannedBytes, err := yaml.Marshal(blockedNamesPolicy{ + bannedBytes, err := yaml.Marshal(blockedIdentsPolicy{ HighRiskBlockedNames: bannedDomains, ExactBlockedNames: exactBannedDomains, }) test.AssertNotError(t, err, "Couldn't serialize banned list") // Create a temp file for the YAML contents - f, _ := ioutil.TempFile("", "test-invalid-exactblocklist.*.yaml") + f, _ := os.CreateTemp("", "test-invalid-exactblocklist.*.yaml") defer os.Remove(f.Name()) // Write the YAML to the temp file - err = ioutil.WriteFile(f.Name(), bannedBytes, 0640) + err = os.WriteFile(f.Name(), bannedBytes, 0640) test.AssertNotError(t, err, "Couldn't write serialized banned list to file") - // Try to use the YAML tempfile as the hostname policy. It should produce an + // Try to use the YAML tempfile as the ident policy. It should produce an // error since the exact blocklist contents are malformed. - err = pa.SetHostnamePolicyFile(f.Name()) + err = pa.LoadIdentPolicyFile(f.Name()) test.AssertError(t, err, "Loaded invalid exact blocklist content without error") - test.AssertEquals(t, err.Error(), "Malformed ExactBlockedNames entry, only one label: \"com\"") + test.AssertEquals(t, err.Error(), "malformed ExactBlockedNames entry, only one label: \"com\"") } func TestValidEmailError(t *testing.T) { err := ValidEmail("(à¹‘â€¢Ì Ï‰ •̀๑)") - test.AssertEquals(t, err.Error(), "\"(à¹‘â€¢Ì Ï‰ •̀๑)\" is not a valid e-mail address") + test.AssertEquals(t, err.Error(), "unable to parse email address") err = ValidEmail("john.smith@gmail.com #replace with real email") - test.AssertEquals(t, err.Error(), "\"john.smith@gmail.com #replace with real email\" is not a valid e-mail address") + test.AssertEquals(t, err.Error(), "unable to parse email address") err = ValidEmail("example@example.com") - test.AssertEquals(t, err.Error(), "invalid contact domain. Contact emails @example.com are forbidden") + test.AssertEquals(t, err.Error(), "contact email has forbidden domain \"example.com\"") err = ValidEmail("example@-foobar.com") - test.AssertEquals(t, err.Error(), "contact email \"example@-foobar.com\" has invalid domain : Domain name contains an invalid character") + test.AssertEquals(t, err.Error(), "contact email has invalid domain: Domain name contains an invalid character") +} + +func TestCheckAuthzChallenges(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + authz core.Authorization + enabled map[core.AcmeChallenge]bool + wantErr string + }{ + { + name: "unrecognized identifier", + authz: core.Authorization{ + Identifier: identifier.ACMEIdentifier{Type: "oops", Value: "example.com"}, + Challenges: []core.Challenge{{Type: core.ChallengeTypeDNS01, Status: core.StatusValid}}, + }, + wantErr: "unrecognized identifier type", + }, + { + name: "no challenges", + authz: core.Authorization{ + Identifier: identifier.NewDNS("example.com"), + Challenges: []core.Challenge{}, + }, + wantErr: "has no challenges", + }, + { + name: "no valid challenges", + authz: core.Authorization{ + Identifier: identifier.NewDNS("example.com"), + Challenges: []core.Challenge{{Type: core.ChallengeTypeDNS01, Status: core.StatusPending}}, + }, + wantErr: "not solved by any challenge", + }, + { + name: "solved by disabled challenge", + authz: core.Authorization{ + Identifier: identifier.NewDNS("example.com"), + Challenges: []core.Challenge{{Type: core.ChallengeTypeDNS01, Status: core.StatusValid}}, + }, + enabled: map[core.AcmeChallenge]bool{core.ChallengeTypeHTTP01: true}, + wantErr: "disabled challenge type", + }, + { + name: "solved by wrong kind of challenge", + authz: core.Authorization{ + Identifier: identifier.NewDNS("*.example.com"), + Challenges: []core.Challenge{{Type: core.ChallengeTypeHTTP01, Status: core.StatusValid}}, + }, + wantErr: "inapplicable challenge type", + }, + { + name: "valid authz", + authz: core.Authorization{ + Identifier: identifier.NewDNS("example.com"), + Challenges: []core.Challenge{{Type: core.ChallengeTypeTLSALPN01, Status: core.StatusValid}}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + pa := paImpl(t) + + if tc.enabled != nil { + pa.enabledChallenges = tc.enabled + } + + err := pa.CheckAuthzChallenges(&tc.authz) + + if tc.wantErr == "" { + test.AssertNotError(t, err, "should have succeeded") + } else { + test.AssertError(t, err, "should have errored") + test.AssertContains(t, err.Error(), tc.wantErr) + } + }) + } +} + +func TestWillingToIssue_IdentifierType(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + ident identifier.ACMEIdentifier + enabled map[identifier.IdentifierType]bool + wantErr string + }{ + { + name: "DNS identifier, none enabled", + ident: identifier.NewDNS("example.com"), + enabled: nil, + wantErr: "The ACME server has disabled this identifier type", + }, + { + name: "DNS identifier, DNS enabled", + ident: identifier.NewDNS("example.com"), + enabled: map[identifier.IdentifierType]bool{identifier.TypeDNS: true}, + wantErr: "", + }, + { + name: "DNS identifier, DNS & IP enabled", + ident: identifier.NewDNS("example.com"), + enabled: map[identifier.IdentifierType]bool{identifier.TypeDNS: true, identifier.TypeIP: true}, + wantErr: "", + }, + { + name: "DNS identifier, IP enabled", + ident: identifier.NewDNS("example.com"), + enabled: map[identifier.IdentifierType]bool{identifier.TypeIP: true}, + wantErr: "The ACME server has disabled this identifier type", + }, + { + name: "IP identifier, none enabled", + ident: identifier.NewIP(netip.MustParseAddr("9.9.9.9")), + enabled: nil, + wantErr: "The ACME server has disabled this identifier type", + }, + { + name: "IP identifier, DNS enabled", + ident: identifier.NewIP(netip.MustParseAddr("9.9.9.9")), + enabled: map[identifier.IdentifierType]bool{identifier.TypeDNS: true}, + wantErr: "The ACME server has disabled this identifier type", + }, + { + name: "IP identifier, DNS & IP enabled", + ident: identifier.NewIP(netip.MustParseAddr("9.9.9.9")), + enabled: map[identifier.IdentifierType]bool{identifier.TypeDNS: true, identifier.TypeIP: true}, + wantErr: "", + }, + { + name: "IP identifier, IP enabled", + ident: identifier.NewIP(netip.MustParseAddr("9.9.9.9")), + enabled: map[identifier.IdentifierType]bool{identifier.TypeIP: true}, + wantErr: "", + }, + { + name: "invalid identifier type", + ident: identifier.ACMEIdentifier{Type: "drywall", Value: "oh yeah!"}, + enabled: map[identifier.IdentifierType]bool{"drywall": true}, + wantErr: "Invalid identifier type", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + policy := blockedIdentsPolicy{ + HighRiskBlockedNames: []string{"zombo.gov.us"}, + ExactBlockedNames: []string{`highvalue.website1.org`}, + AdminBlockedNames: []string{`banned.in.dc.com`}, + } + + yamlPolicyBytes, err := yaml.Marshal(policy) + test.AssertNotError(t, err, "Couldn't YAML serialize blocklist") + yamlPolicyFile, _ := os.CreateTemp("", "test-blocklist.*.yaml") + defer os.Remove(yamlPolicyFile.Name()) + err = os.WriteFile(yamlPolicyFile.Name(), yamlPolicyBytes, 0640) + test.AssertNotError(t, err, "Couldn't write YAML blocklist") + + pa := paImpl(t) + + err = pa.LoadIdentPolicyFile(yamlPolicyFile.Name()) + test.AssertNotError(t, err, "Couldn't load rules") + + pa.enabledIdentifiers = tc.enabled + + err = pa.WillingToIssue(identifier.ACMEIdentifiers{tc.ident}) + + if tc.wantErr == "" { + if err != nil { + t.Errorf("should have succeeded, but got error: %s", err.Error()) + } + } else { + if err == nil { + t.Errorf("should have failed") + } else if !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("wrong error; wanted '%s', but got '%s'", tc.wantErr, err.Error()) + } + } + }) + } } diff --git a/policyasn1/policy.go b/policyasn1/policy.go deleted file mode 100644 index a708ddc67af..00000000000 --- a/policyasn1/policy.go +++ /dev/null @@ -1,21 +0,0 @@ -// policyasn1 contains structures required to encode the RFC 5280 -// PolicyInformation ASN.1 structures. -package policyasn1 - -import "encoding/asn1" - -// CPSQualifierOID contains the id-qt-cps OID that is used to indicate the -// CPS policy qualifier type -var CPSQualifierOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1} - -// PolicyQualifier represents the PolicyQualifierInfo ASN.1 structure -type PolicyQualifier struct { - OID asn1.ObjectIdentifier - Value string `asn1:"optional,ia5"` -} - -// PolicyInformation represents the PolicyInformation ASN.1 structure -type PolicyInformation struct { - Policy asn1.ObjectIdentifier - Qualifiers []PolicyQualifier `asn1:"optional"` -} diff --git a/precert/corr.go b/precert/corr.go new file mode 100644 index 00000000000..a0708e28a21 --- /dev/null +++ b/precert/corr.go @@ -0,0 +1,222 @@ +package precert + +import ( + "bytes" + encoding_asn1 "encoding/asn1" + "errors" + "fmt" + + "golang.org/x/crypto/cryptobyte" + "golang.org/x/crypto/cryptobyte/asn1" +) + +// Correspond returns nil if the two certificates are a valid precertificate/final certificate pair. +// Order of the arguments matters: the precertificate is first and the final certificate is second. +// Note that RFC 6962 allows the precertificate and final certificate to have different Issuers, but +// this function rejects such pairs. +func Correspond(precertDER, finalDER []byte) error { + preTBS, err := tbsDERFromCertDER(precertDER) + if err != nil { + return fmt.Errorf("parsing precert: %w", err) + } + + finalTBS, err := tbsDERFromCertDER(finalDER) + if err != nil { + return fmt.Errorf("parsing final cert: %w", err) + } + + // The first 7 fields of TBSCertificate must be byte-for-byte identical. + // The next 2 fields (issuerUniqueID and subjectUniqueID) are forbidden + // by the Baseline Requirements so we assume they are not present (if they + // are, they will fail the next check, for extensions). + // https://datatracker.ietf.org/doc/html/rfc5280#page-117 + // TBSCertificate ::= SEQUENCE { + // version [0] Version DEFAULT v1, + // serialNumber CertificateSerialNumber, + // signature AlgorithmIdentifier, + // issuer Name, + // validity Validity, + // subject Name, + // subjectPublicKeyInfo SubjectPublicKeyInfo, + // issuerUniqueID [1] IMPLICIT UniqueIdentifier OPTIONAL, + // -- If present, version MUST be v2 or v3 + // subjectUniqueID [2] IMPLICIT UniqueIdentifier OPTIONAL, + // -- If present, version MUST be v2 or v3 + // extensions [3] Extensions OPTIONAL + // -- If present, version MUST be v3 -- } + for i := range 7 { + if err := readIdenticalElement(&preTBS, &finalTBS); err != nil { + return fmt.Errorf("checking for identical field %d: %w", i, err) + } + } + + // The extensions should be mostly the same, with these exceptions: + // - The precertificate should have exactly one precertificate poison extension + // not present in the final certificate. + // - The final certificate should have exactly one SCTList extension not present + // in the precertificate. + // - As a consequence, the byte lengths of the extensions fields will not be the + // same, so we ignore the lengths (so long as they parse) + precertExtensionBytes, err := unwrapExtensions(preTBS) + if err != nil { + return fmt.Errorf("parsing precert extensions: %w", err) + } + + finalCertExtensionBytes, err := unwrapExtensions(finalTBS) + if err != nil { + return fmt.Errorf("parsing final cert extensions: %w", err) + } + + precertParser := extensionParser{bytes: precertExtensionBytes, skippableOID: poisonOID} + finalCertParser := extensionParser{bytes: finalCertExtensionBytes, skippableOID: sctListOID} + + for i := 0; ; i++ { + precertExtn, err := precertParser.Next() + if err != nil { + return err + } + + finalCertExtn, err := finalCertParser.Next() + if err != nil { + return err + } + + if !bytes.Equal(precertExtn, finalCertExtn) { + return fmt.Errorf("precert extension %d (%x) not equal to final cert extension %d (%x)", + i+precertParser.skipped, precertExtn, i+finalCertParser.skipped, finalCertExtn) + } + + if precertExtn == nil && finalCertExtn == nil { + break + } + } + + if precertParser.skipped == 0 { + return fmt.Errorf("no poison extension found in precert") + } + if precertParser.skipped > 1 { + return fmt.Errorf("multiple poison extensions found in precert") + } + if finalCertParser.skipped == 0 { + return fmt.Errorf("no SCTList extension found in final cert") + } + if finalCertParser.skipped > 1 { + return fmt.Errorf("multiple SCTList extensions found in final cert") + } + return nil +} + +var poisonOID = []int{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3} +var sctListOID = []int{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} + +// extensionParser takes a sequence of bytes representing the inner bytes of the +// `extensions` field. Repeated calls to Next() will return all the extensions +// except those that match the skippableOID. The skipped extensions will be +// counted in `skipped`. +type extensionParser struct { + skippableOID encoding_asn1.ObjectIdentifier + bytes cryptobyte.String + skipped int +} + +// Next returns the next extension in the sequence, skipping (and counting) +// any extension that matches the skippableOID. +// Returns nil, nil when there are no more extensions. +func (e *extensionParser) Next() (cryptobyte.String, error) { + if e.bytes.Empty() { + return nil, nil + } + + var next cryptobyte.String + if !e.bytes.ReadASN1(&next, asn1.SEQUENCE) { + return nil, fmt.Errorf("failed to parse extension") + } + + var oid encoding_asn1.ObjectIdentifier + nextCopy := next + if !nextCopy.ReadASN1ObjectIdentifier(&oid) { + return nil, fmt.Errorf("failed to parse extension OID") + } + + if oid.Equal(e.skippableOID) { + e.skipped++ + return e.Next() + } + + return next, nil +} + +// unwrapExtensions takes a given a sequence of bytes representing the `extensions` field +// of a TBSCertificate and parses away the outermost two layers, returning the inner bytes +// of the Extensions sequence. +// +// https://datatracker.ietf.org/doc/html/rfc5280#page-117 +// +// TBSCertificate ::= SEQUENCE { +// ... +// extensions [3] Extensions OPTIONAL +// } +// +// Extensions ::= SEQUENCE SIZE (1..MAX) OF Extension +func unwrapExtensions(field cryptobyte.String) (cryptobyte.String, error) { + var extensions cryptobyte.String + if !field.ReadASN1(&extensions, asn1.Tag(3).Constructed().ContextSpecific()) { + return nil, errors.New("error reading extensions") + } + + var extensionsInner cryptobyte.String + if !extensions.ReadASN1(&extensionsInner, asn1.SEQUENCE) { + return nil, errors.New("error reading extensions inner") + } + + return extensionsInner, nil +} + +// readIdenticalElement parses a single ASN1 element and returns an error if +// their tags are different or their contents are different. +func readIdenticalElement(a, b *cryptobyte.String) error { + var aInner, bInner cryptobyte.String + var aTag, bTag asn1.Tag + if !a.ReadAnyASN1Element(&aInner, &aTag) { + return fmt.Errorf("failed to read element from first input") + } + if !b.ReadAnyASN1Element(&bInner, &bTag) { + return fmt.Errorf("failed to read element from first input") + } + if aTag != bTag { + return fmt.Errorf("tags differ: %d != %d", aTag, bTag) + } + if !bytes.Equal([]byte(aInner), []byte(bInner)) { + return fmt.Errorf("elements differ: %x != %x", aInner, bInner) + } + return nil +} + +// tbsDERFromCertDER takes a Certificate object encoded as DER, and parses +// away the outermost two sequences to get the inner bytes of the TBSCertificate. +// +// https://datatracker.ietf.org/doc/html/rfc5280#page-116 +// +// Certificate ::= SEQUENCE { +// tbsCertificate TBSCertificate, +// ... +// +// TBSCertificate ::= SEQUENCE { +// version [0] Version DEFAULT v1, +// serialNumber CertificateSerialNumber, +// ... +func tbsDERFromCertDER(certDER []byte) (cryptobyte.String, error) { + var inner cryptobyte.String + input := cryptobyte.String(certDER) + + if !input.ReadASN1(&inner, asn1.SEQUENCE) { + return nil, fmt.Errorf("failed to read outer sequence") + } + + var tbsCertificate cryptobyte.String + if !inner.ReadASN1(&tbsCertificate, asn1.SEQUENCE) { + return nil, fmt.Errorf("failed to read tbsCertificate") + } + + return tbsCertificate, nil +} diff --git a/precert/corr_test.go b/precert/corr_test.go new file mode 100644 index 00000000000..8d29ee077e4 --- /dev/null +++ b/precert/corr_test.go @@ -0,0 +1,341 @@ +package precert + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "os" + "strings" + "testing" + "time" +) + +func TestCorrespondIncorrectArgumentOrder(t *testing.T) { + pre, final, err := readPair("testdata/good/precert.pem", "testdata/good/final.pem") + if err != nil { + t.Fatal(err) + } + + // The final cert is in the precert position and vice versa. + err = Correspond(final, pre) + if err == nil { + t.Errorf("expected failure when final and precertificates were in wrong order, got success") + } +} + +func TestCorrespondGood(t *testing.T) { + pre, final, err := readPair("testdata/good/precert.pem", "testdata/good/final.pem") + if err != nil { + t.Fatal(err) + } + + err = Correspond(pre, final) + if err != nil { + t.Errorf("expected testdata/good/ certs to correspond, got %s", err) + } +} + +func TestCorrespondBad(t *testing.T) { + pre, final, err := readPair("testdata/bad/precert.pem", "testdata/bad/final.pem") + if err != nil { + t.Fatal(err) + } + + err = Correspond(pre, final) + if err == nil { + t.Errorf("expected testdata/bad/ certs to not correspond, got nil error") + } + expected := "precert extension 7 (0603551d20040c300a3008060667810c010201) not equal to final cert extension 7 (0603551d20044530433008060667810c0102013037060b2b0601040182df130101013028302606082b06010505070201161a687474703a2f2f6370732e6c657473656e63727970742e6f7267)" + if !strings.Contains(err.Error(), expected) { + t.Errorf("expected error to contain %q, got %q", expected, err.Error()) + } +} + +func TestCorrespondCompleteMismatch(t *testing.T) { + pre, final, err := readPair("testdata/good/precert.pem", "testdata/bad/final.pem") + if err != nil { + t.Fatal(err) + } + + err = Correspond(pre, final) + if err == nil { + t.Errorf("expected testdata/good and testdata/bad/ certs to not correspond, got nil error") + } + expected := "checking for identical field 1: elements differ: 021203d91c3d22b404f20df3c1631c22e1754b8d != 021203e2267b786b7e338317ddd62e764fcb3c71" + if !strings.Contains(err.Error(), expected) { + t.Errorf("expected error to contain %q, got %q", expected, err.Error()) + } +} + +func readPair(a, b string) ([]byte, []byte, error) { + aDER, err := derFromPEMFile(a) + if err != nil { + return nil, nil, err + } + bDER, err := derFromPEMFile(b) + if err != nil { + return nil, nil, err + } + return aDER, bDER, nil +} + +// derFromPEMFile reads a PEM file and returns the DER-encoded bytes. +func derFromPEMFile(filename string) ([]byte, error) { + precertPEM, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("reading %s: %w", filename, err) + } + + precertPEMBlock, _ := pem.Decode(precertPEM) + if precertPEMBlock == nil { + return nil, fmt.Errorf("error PEM decoding %s", filename) + } + + return precertPEMBlock.Bytes, nil +} + +func TestMismatches(t *testing.T) { + now := time.Now() + + issuerKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + + // A separate issuer key, used for signing the final certificate, but + // using the same simulated issuer certificate. + untrustedIssuerKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + + subscriberKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + + // By reading the crypto/x509 code, we know that Subject is the only field + // of the issuer certificate that we need to care about for the purposes + // of signing below. + issuer := x509.Certificate{ + Subject: pkix.Name{ + CommonName: "Some Issuer", + }, + } + + precertTemplate := x509.Certificate{ + SerialNumber: big.NewInt(3141592653589793238), + NotBefore: now, + NotAfter: now.Add(24 * time.Hour), + DNSNames: []string{"example.com"}, + ExtraExtensions: []pkix.Extension{ + { + Id: poisonOID, + Value: []byte{0x5, 0x0}, + }, + }, + } + + precertDER, err := x509.CreateCertificate(rand.Reader, &precertTemplate, &issuer, &subscriberKey.PublicKey, issuerKey) + if err != nil { + t.Fatal(err) + } + + // Sign a final certificate with the untrustedIssuerKey, first applying the + // given modify function to the default template. Return the DER encoded bytes. + makeFinalCert := func(modify func(c *x509.Certificate)) []byte { + t.Helper() + finalCertTemplate := &x509.Certificate{ + SerialNumber: big.NewInt(3141592653589793238), + NotBefore: now, + NotAfter: now.Add(24 * time.Hour), + DNSNames: []string{"example.com"}, + ExtraExtensions: []pkix.Extension{ + { + Id: sctListOID, + Value: nil, + }, + }, + } + + modify(finalCertTemplate) + + finalCertDER, err := x509.CreateCertificate(rand.Reader, finalCertTemplate, + &issuer, &subscriberKey.PublicKey, untrustedIssuerKey) + if err != nil { + t.Fatal(err) + } + + return finalCertDER + } + + // Expect success with a matching precert and final cert + finalCertDER := makeFinalCert(func(c *x509.Certificate) {}) + err = Correspond(precertDER, finalCertDER) + if err != nil { + t.Errorf("expected precert and final cert to correspond, got: %s", err) + } + + // Set up a precert / final cert pair where the SCTList and poison extensions are + // not in the same position + precertTemplate2 := x509.Certificate{ + SerialNumber: big.NewInt(3141592653589793238), + NotBefore: now, + NotAfter: now.Add(24 * time.Hour), + DNSNames: []string{"example.com"}, + ExtraExtensions: []pkix.Extension{ + { + Id: poisonOID, + Value: []byte{0x5, 0x0}, + }, + // Arbitrary extension to make poisonOID not be the last extension + { + Id: []int{1, 2, 3, 4}, + Value: []byte{0x5, 0x0}, + }, + }, + } + + precertDER2, err := x509.CreateCertificate(rand.Reader, &precertTemplate2, &issuer, &subscriberKey.PublicKey, issuerKey) + if err != nil { + t.Fatal(err) + } + + finalCertDER = makeFinalCert(func(c *x509.Certificate) { + c.ExtraExtensions = []pkix.Extension{ + { + Id: []int{1, 2, 3, 4}, + Value: []byte{0x5, 0x0}, + }, + { + Id: sctListOID, + Value: nil, + }, + } + }) + err = Correspond(precertDER2, finalCertDER) + if err != nil { + t.Errorf("expected precert and final cert to correspond with differently positioned extensions, got: %s", err) + } + + // Expect failure with a mismatched Issuer + issuer = x509.Certificate{ + Subject: pkix.Name{ + CommonName: "Some Other Issuer", + }, + } + + finalCertDER = makeFinalCert(func(c *x509.Certificate) {}) + err = Correspond(precertDER, finalCertDER) + if err == nil { + t.Errorf("expected error for mismatched issuer, got nil error") + } + + // Restore original issuer + issuer = x509.Certificate{ + Subject: pkix.Name{ + CommonName: "Some Issuer", + }, + } + + // Expect failure with a mismatched Serial + finalCertDER = makeFinalCert(func(c *x509.Certificate) { + c.SerialNumber = big.NewInt(2718281828459045) + }) + err = Correspond(precertDER, finalCertDER) + if err == nil { + t.Errorf("expected error for mismatched serial, got nil error") + } + + // Expect failure with mismatched names + finalCertDER = makeFinalCert(func(c *x509.Certificate) { + c.DNSNames = []string{"example.com", "www.example.com"} + }) + + err = Correspond(precertDER, finalCertDER) + if err == nil { + t.Errorf("expected error for mismatched names, got nil error") + } + + // Expect failure with mismatched NotBefore + finalCertDER = makeFinalCert(func(c *x509.Certificate) { + c.NotBefore = now.Add(24 * time.Hour) + }) + + err = Correspond(precertDER, finalCertDER) + if err == nil { + t.Errorf("expected error for mismatched NotBefore, got nil error") + } + + // Expect failure with mismatched NotAfter + finalCertDER = makeFinalCert(func(c *x509.Certificate) { + c.NotAfter = now.Add(48 * time.Hour) + }) + err = Correspond(precertDER, finalCertDER) + if err == nil { + t.Errorf("expected error for mismatched NotAfter, got nil error") + } + + // Expect failure for mismatched extensions + finalCertDER = makeFinalCert(func(c *x509.Certificate) { + c.ExtraExtensions = append(c.ExtraExtensions, pkix.Extension{ + Critical: true, + Id: []int{1, 2, 3}, + Value: []byte("hello"), + }) + }) + + err = Correspond(precertDER, finalCertDER) + if err == nil { + t.Errorf("expected error for mismatched extensions, got nil error") + } + expectedError := "precert extension 2 () not equal to final cert extension 2 (06022a030101ff040568656c6c6f)" + if err.Error() != expectedError { + t.Errorf("expected error %q, got %q", expectedError, err) + } +} + +func TestUnwrapExtensions(t *testing.T) { + validExtensionsOuter := []byte{0xA3, 0x3, 0x30, 0x1, 0x0} + _, err := unwrapExtensions(validExtensionsOuter) + if err != nil { + t.Errorf("expected success for validExtensionsOuter, got %s", err) + } + + invalidExtensionsOuter := []byte{0xA3, 0x99, 0x30, 0x1, 0x0} + _, err = unwrapExtensions(invalidExtensionsOuter) + if err == nil { + t.Error("expected error for invalidExtensionsOuter, got none") + } + + invalidExtensionsInner := []byte{0xA3, 0x3, 0x30, 0x99, 0x0} + _, err = unwrapExtensions(invalidExtensionsInner) + if err == nil { + t.Error("expected error for invalidExtensionsInner, got none") + } +} + +func TestTBSFromCertDER(t *testing.T) { + validCertOuter := []byte{0x30, 0x3, 0x30, 0x1, 0x0} + _, err := tbsDERFromCertDER(validCertOuter) + if err != nil { + t.Errorf("expected success for validCertOuter, got %s", err) + } + + invalidCertOuter := []byte{0x30, 0x99, 0x30, 0x1, 0x0} + _, err = tbsDERFromCertDER(invalidCertOuter) + if err == nil { + t.Error("expected error for invalidCertOuter, got none") + } + + invalidCertInner := []byte{0x30, 0x3, 0x30, 0x99, 0x0} + _, err = tbsDERFromCertDER(invalidCertInner) + if err == nil { + t.Error("expected error for invalidExtensionsInner, got none") + } +} diff --git a/precert/testdata/README.md b/precert/testdata/README.md new file mode 100644 index 00000000000..e6852915bc0 --- /dev/null +++ b/precert/testdata/README.md @@ -0,0 +1,8 @@ +The data in this directory consists of real certificates issued by Let's +Encrypt in 2023. The ones under the `bad` directory were issued during +the Duplicate Serial Numbers incident (https://bugzilla.mozilla.org/show_bug.cgi?id=1838667) +and differ in the presence / absence of a second policyIdentifier in the +Certificate Policies extension. + +The ones under the `good` directory were issued shortly after recovery +from the incident and represent a correct correspondence relationship. diff --git a/precert/testdata/bad/final.pem b/precert/testdata/bad/final.pem new file mode 100644 index 00000000000..bfc9847c93b --- /dev/null +++ b/precert/testdata/bad/final.pem @@ -0,0 +1,36 @@ +-----BEGIN CERTIFICATE----- +MIIGRjCCBS6gAwIBAgISA+Ime3hrfjODF93WLnZPyzxxMA0GCSqGSIb3DQEBCwUA +MDIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQD +EwJSMzAeFw0yMzA2MTUxNDM2MTZaFw0yMzA5MTMxNDM2MTVaMB4xHDAaBgNVBAMM +EyouN2FjbnIubW9uZ29kYi5uZXQwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCjLiLXI/mTBSEkSKVucC3NcnXGu/M2qwLIk1uenifnoNMmdJmEyp+oWFUS +n9rIXtHw27YTlJLRRYLSIzqqujDV5PmXzFrSJ/9JrgIbNUowaVF3j9bf1+NPENEH +81RnNGevtKUN5NoEo3fAmZaMWrGjWioNnpIsegSjvvuHeqMqC7SNrGSvtKLBiPkO +bL5oScPYj/cHzt3RYJ17ru6xWgUDV6aqvEblrxcXvPmd/1SxB3Vkdkc+bCuSLSNM +/NmcET0YUhWizanjodJarpYJRuW1SjGmPda0jBAQZQDPmZHCEgwTBcCEIg5J3XzA +fFUZPPlTVgE+7Mbjd/DK7iz46D0uHOigVTZto3lPYRdRiyVFNUMAN0GLAlkaJ7Td +0FnAxvhE74lSjI7lFqDNtiyA8ovp/JbKfPmnvfH+fQa7vEFbR5H9v4UZt0XLeI6W +dV4pYoCwuK5mfr0NQLCy/015OAU8WF4MLM+Fyt+GG+sOk2Maz6ysAShMOvdNH7B3 +GSn65xBVgBxlPWyYpodW9SS1NSVgrgbKMg0yHzx/PdosQehyh9p6OpuTaeEi2iQg +yTODKGHX+cmjzUx0iCG2ByC9bvMo32eZXiC+itZCaHb0FGXh+K7UcOCsvsi7NLGR +ngVKK7u7gZmPu4UkVUBpF3jz/OK3OsudHcflZIGd6nf8w4lp0wIDAQABo4ICaDCC +AmQwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD +AjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBREcOX3VXl7+uM7aqTQ/coniJsAAjAf +BgNVHSMEGDAWgBQULrMXt1hWy65QCUDmH6+dixTCxjBVBggrBgEFBQcBAQRJMEcw +IQYIKwYBBQUHMAGGFWh0dHA6Ly9yMy5vLmxlbmNyLm9yZzAiBggrBgEFBQcwAoYW +aHR0cDovL3IzLmkubGVuY3Iub3JnLzA4BgNVHREEMTAvghgqLjdhY25yLm1lc2gu +bW9uZ29kYi5uZXSCEyouN2FjbnIubW9uZ29kYi5uZXQwTAYDVR0gBEUwQzAIBgZn +gQwBAgEwNwYLKwYBBAGC3xMBAQEwKDAmBggrBgEFBQcCARYaaHR0cDovL2Nwcy5s +ZXRzZW5jcnlwdC5vcmcwggEEBgorBgEEAdZ5AgQCBIH1BIHyAPAAdgC3Pvsk35xN +unXyOcW6WPRsXfxCz3qfNcSeHQmBJe20mQAAAYi/s0QZAAAEAwBHMEUCID4vc7PN +WNauTkmkS7CqSwdiyOV+LYIT9g8KygWW4atTAiEA6Re4Cz7BsEMi+/U8G+r9Lmqb +qwGXGS4mXG7RiEfeQEcAdgB6MoxU2LcttiDqOOBSHumEFnAyE4VNO9IrwTpXo1Lr +UgAAAYi/s0RQAAAEAwBHMEUCIQD95SqDycwXGZ+JKBUVBR+hBxn4BRIQ7EPIaMTI +/+854gIgDpJm5BFX9vKUf5tKWn9f/Fagktt5J6hPnrmURSV/egAwDQYJKoZIhvcN +AQELBQADggEBAKWyDSRmiM9N+2AhYgRuzh3JnxtvhmEXUBEgwuFnlQyCm5ZvScvW +Kmw2sqcj+gI2UNUxmWjq3PbIVBrTLDEgXtVN+JU6HwC4TdYPIB4LzfrWsGY7cc2a +aY76YbWlwEyhN9niQLijZORKhZ6HLM7MI76FM7oJ9eZmvnfypjJ7E0J9ek/y7S1w +qg5EM+QiAf03YcjSxUCyL3/+EzlYRz65diLh7Eb6gBd58rWLOa1nbgTOFsToAkBE +7qR3HymfWysxApDN8x95jDzubbkqiyuk3dvzjn3oouN1H8NsG/xYrYmMMwnJ8xul +1AJ31ZMxJ9hr29G122DSEaX9smAyyzWhAwM= +-----END CERTIFICATE----- diff --git a/precert/testdata/bad/precert.pem b/precert/testdata/bad/precert.pem new file mode 100644 index 00000000000..ab323b7fcc9 --- /dev/null +++ b/precert/testdata/bad/precert.pem @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFGjCCBAKgAwIBAgISA+Ime3hrfjODF93WLnZPyzxxMA0GCSqGSIb3DQEBCwUA +MDIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQD +EwJSMzAeFw0yMzA2MTUxNDM2MTZaFw0yMzA5MTMxNDM2MTVaMB4xHDAaBgNVBAMM +EyouN2FjbnIubW9uZ29kYi5uZXQwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCjLiLXI/mTBSEkSKVucC3NcnXGu/M2qwLIk1uenifnoNMmdJmEyp+oWFUS +n9rIXtHw27YTlJLRRYLSIzqqujDV5PmXzFrSJ/9JrgIbNUowaVF3j9bf1+NPENEH +81RnNGevtKUN5NoEo3fAmZaMWrGjWioNnpIsegSjvvuHeqMqC7SNrGSvtKLBiPkO +bL5oScPYj/cHzt3RYJ17ru6xWgUDV6aqvEblrxcXvPmd/1SxB3Vkdkc+bCuSLSNM +/NmcET0YUhWizanjodJarpYJRuW1SjGmPda0jBAQZQDPmZHCEgwTBcCEIg5J3XzA +fFUZPPlTVgE+7Mbjd/DK7iz46D0uHOigVTZto3lPYRdRiyVFNUMAN0GLAlkaJ7Td +0FnAxvhE74lSjI7lFqDNtiyA8ovp/JbKfPmnvfH+fQa7vEFbR5H9v4UZt0XLeI6W +dV4pYoCwuK5mfr0NQLCy/015OAU8WF4MLM+Fyt+GG+sOk2Maz6ysAShMOvdNH7B3 +GSn65xBVgBxlPWyYpodW9SS1NSVgrgbKMg0yHzx/PdosQehyh9p6OpuTaeEi2iQg +yTODKGHX+cmjzUx0iCG2ByC9bvMo32eZXiC+itZCaHb0FGXh+K7UcOCsvsi7NLGR +ngVKK7u7gZmPu4UkVUBpF3jz/OK3OsudHcflZIGd6nf8w4lp0wIDAQABo4IBPDCC +ATgwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD +AjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBREcOX3VXl7+uM7aqTQ/coniJsAAjAf +BgNVHSMEGDAWgBQULrMXt1hWy65QCUDmH6+dixTCxjBVBggrBgEFBQcBAQRJMEcw +IQYIKwYBBQUHMAGGFWh0dHA6Ly9yMy5vLmxlbmNyLm9yZzAiBggrBgEFBQcwAoYW +aHR0cDovL3IzLmkubGVuY3Iub3JnLzA4BgNVHREEMTAvghgqLjdhY25yLm1lc2gu +bW9uZ29kYi5uZXSCEyouN2FjbnIubW9uZ29kYi5uZXQwEwYDVR0gBAwwCjAIBgZn +gQwBAgEwEwYKKwYBBAHWeQIEAwEB/wQCBQAwDQYJKoZIhvcNAQELBQADggEBALIU +rHns6TWfT/kfJ60D9R1Ek4YGB/jVsrh2d3uiIU2hiRBBjgDkCLyKd7oXM761uXX3 +LL4H4JPegqTrZAPO88tUtzBSb3IF4yA0o1NWhE6ceLnBk9fl5TRCC8QASliApsOi +gDgRi1VFmyFOHpHnVZdbpPucy6T+CdKXKfj4iNw+aOZcoQxJ70XECXxQbdqJ7VdY +f0B+wtk5HZU8cuVVCj1i/iDv1zqITCzaavbz870QugiHO/8rj2ctrA07SX3Ovs4J +GbCGuMzlpxeIFtQDWVufVbu1ZZltzPlSHFqv6mPKW9stYtt8JCjmPwNW6UdrlBtN +gvFgkgDpz+Q6/Vu+u7g= +-----END CERTIFICATE----- diff --git a/precert/testdata/good/final.pem b/precert/testdata/good/final.pem new file mode 100644 index 00000000000..0b27cc646ef --- /dev/null +++ b/precert/testdata/good/final.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIE/TCCA+WgAwIBAgISA9kcPSK0BPIN88FjHCLhdUuNMA0GCSqGSIb3DQEBCwUAMDIxCzAJBgNVBAYT +AlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQDEwJSMzAeFw0yMzA2MTUxNTAxNDRaFw0y +MzA5MTMxNTAxNDNaMCIxIDAeBgNVBAMTF2hvdXNldHJhaW5pbmdwdXBweS5pbmZvMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr/XUbBzyFKRMJ0vYSpqw4Wy2Y2eV+vSCix5TcGNxTR9tB9EX+hNd +C7/zlKJAGUj9ZTSfbJO27HvleVN3D5idhIFxfP2tdfAp4OxQkf4a4nqKXZzPJpTlDs2LQNjKcwszaxKY +CMzGThieeBm7jUiWL6fuAX+sCsBIO0frJ9klq77f7NplfwJ3FcKWFyvMo71rtFZCoLt7dfgKim+SBGYn +agfNe8mmxy4ipqvWtGzMO3cdcKdiRijMzZG1upRjhoggHI/vS2JkWP4bNoZdGCAvaxriEoBdS5K9LqHQ +P6GurVXM5B3kuJkMBN+OmnrXxvcnWbYY6JwAO3KZ1+Vbi2ryPQIDAQABo4ICGzCCAhcwDgYDVR0PAQH/ +BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQW +BBQmE8zNXgf+dOmQ3kFb3p4xfznLjTAfBgNVHSMEGDAWgBQULrMXt1hWy65QCUDmH6+dixTCxjBVBggr +BgEFBQcBAQRJMEcwIQYIKwYBBQUHMAGGFWh0dHA6Ly9yMy5vLmxlbmNyLm9yZzAiBggrBgEFBQcwAoYW +aHR0cDovL3IzLmkubGVuY3Iub3JnLzAiBgNVHREEGzAZghdob3VzZXRyYWluaW5ncHVwcHkuaW5mbzAT +BgNVHSAEDDAKMAgGBmeBDAECATCCAQYGCisGAQQB1nkCBAIEgfcEgfQA8gB3AHoyjFTYty22IOo44FIe +6YQWcDIThU070ivBOlejUutSAAABiL/Kk3wAAAQDAEgwRgIhAN//jI1iByfobY0b+JXWFhc5zQpKC+mI +qXIWrWlXPgrqAiEAiArpAl0FCxvy5vv/C/t+ZOFh0OTxMc2w9rj0GlAhPrAAdwDoPtDaPvUGNTLnVyi8 +iWvJA9PL0RFr7Otp4Xd9bQa9bgAAAYi/ypP1AAAEAwBIMEYCIQC7XKe+yYzkIeu/294qGrQB/G4I8+hz +//3HJVWFam+6KQIhAMy2iY3IITazdGhmQXGQAUPSzXt2wtm1PGHPmyNmIQnXMA0GCSqGSIb3DQEBCwUA +A4IBAQBtrtoi4zea7CnswZc/1Ql3aV0j7nblq4gXxiMoHdoq1srZbypnqvDIFaEp5BjSccEc0D0jK4u2 +nwnFzIljjRi/HXoTBJBHKIxX/s9G/tWFgfnrRSonyN1mguyi7avfWLELrl+Or2+h1K4LZIasrlN8oJpu +a4msgl8HXRdla9Kej7x6fYgyBOJEAcb82i7Ur4bM5OGKZObePHGK6NDsTcpdmqBAjAuKLYMtpHXpFo4/ +14X2A027hOdDBFkeNcRF2KZsbSvp78qIZsSYtjEyYBlTPWLh/aoXx2sc2vl43VaLYOlEIfuzrEKCTiqr +D3TU5CmThOuzm/H0HeCmtlNuQlzK +-----END CERTIFICATE----- diff --git a/precert/testdata/good/precert.pem b/precert/testdata/good/precert.pem new file mode 100644 index 00000000000..9791bc5bb29 --- /dev/null +++ b/precert/testdata/good/precert.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIECDCCAvCgAwIBAgISA9kcPSK0BPIN88FjHCLhdUuNMA0GCSqGSIb3DQEBCwUAMDIxCzAJBgNVBAYT +AlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQDEwJSMzAeFw0yMzA2MTUxNTAxNDRaFw0y +MzA5MTMxNTAxNDNaMCIxIDAeBgNVBAMTF2hvdXNldHJhaW5pbmdwdXBweS5pbmZvMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr/XUbBzyFKRMJ0vYSpqw4Wy2Y2eV+vSCix5TcGNxTR9tB9EX+hNd +C7/zlKJAGUj9ZTSfbJO27HvleVN3D5idhIFxfP2tdfAp4OxQkf4a4nqKXZzPJpTlDs2LQNjKcwszaxKY +CMzGThieeBm7jUiWL6fuAX+sCsBIO0frJ9klq77f7NplfwJ3FcKWFyvMo71rtFZCoLt7dfgKim+SBGYn +agfNe8mmxy4ipqvWtGzMO3cdcKdiRijMzZG1upRjhoggHI/vS2JkWP4bNoZdGCAvaxriEoBdS5K9LqHQ +P6GurVXM5B3kuJkMBN+OmnrXxvcnWbYY6JwAO3KZ1+Vbi2ryPQIDAQABo4IBJjCCASIwDgYDVR0PAQH/ +BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQW +BBQmE8zNXgf+dOmQ3kFb3p4xfznLjTAfBgNVHSMEGDAWgBQULrMXt1hWy65QCUDmH6+dixTCxjBVBggr +BgEFBQcBAQRJMEcwIQYIKwYBBQUHMAGGFWh0dHA6Ly9yMy5vLmxlbmNyLm9yZzAiBggrBgEFBQcwAoYW +aHR0cDovL3IzLmkubGVuY3Iub3JnLzAiBgNVHREEGzAZghdob3VzZXRyYWluaW5ncHVwcHkuaW5mbzAT +BgNVHSAEDDAKMAgGBmeBDAECATATBgorBgEEAdZ5AgQDAQH/BAIFADANBgkqhkiG9w0BAQsFAAOCAQEA +n8r5gDWJjoEEE9+hmk/61EleSVQA9SslR7deQnCrItdSOZQo877FJfWtfoRZNItcOfml9E7uYjXhzEOc +bVRe9+VbBt1jjUUu3xLLM7RA5+2pvb+cN1LJ2ijIsnkJwSgYhudGPx+1EgKEJ2huKQTVXqu8AT6rp9Tr +vs/3gXzqlVncXcfEb+5PjvcibCugdt9pE5BfRYBP5V2GcwOQs3zr2DShPuSPmXiLSoUxVczltfndPfM+ +WYaj5VOkvW5UNsm+IVPRlEcbHGmHwEHkBeBGHn4kvgv/14fKpEClkZ+VxgnRky6x951NDMVEJLdV9Vbs +G04Vh0wRjRyiuTPyT5Zj3g== +-----END CERTIFICATE----- diff --git a/privatekey/privatekey.go b/privatekey/privatekey.go index d3c82ae8ea7..3f5fb59b5e3 100644 --- a/privatekey/privatekey.go +++ b/privatekey/privatekey.go @@ -11,7 +11,7 @@ import ( "errors" "fmt" "hash" - "io/ioutil" + "os" ) func makeVerifyHash() (hash.Hash, error) { @@ -87,7 +87,7 @@ func verify(privateKey crypto.Signer) (crypto.Signer, crypto.PublicKey, error) { // match for the private key and returned as a crypto.PublicKey. This function // is only intended for use in administrative tooling and tests. func Load(keyPath string) (crypto.Signer, crypto.PublicKey, error) { - keyBytes, err := ioutil.ReadFile(keyPath) + keyBytes, err := os.ReadFile(keyPath) if err != nil { return nil, nil, fmt.Errorf("could not read key file %q", keyPath) } @@ -103,17 +103,29 @@ func Load(keyPath string) (crypto.Signer, crypto.PublicKey, error) { return nil, nil, fmt.Errorf("no PEM formatted block found in %q", keyPath) } + sign, pk, err := LoadDER(keyDER) + if err != nil { + return nil, nil, fmt.Errorf("parsing %q: %w", keyPath, err) + } + + return sign, pk, nil +} + +func LoadDER(keyDER *pem.Block) (crypto.Signer, crypto.PublicKey, error) { // Attempt to parse the PEM block as a private key in a PKCS #8 container. signer, err := x509.ParsePKCS8PrivateKey(keyDER.Bytes) if err == nil { - crytoSigner, ok := signer.(crypto.Signer) + cryptoSigner, ok := signer.(crypto.Signer) if ok { - return verify(crytoSigner) + return verify(cryptoSigner) } } // Attempt to parse the PEM block as a private key in a PKCS #1 container. rsaSigner, err := x509.ParsePKCS1PrivateKey(keyDER.Bytes) + if err != nil && keyDER.Type == "RSA PRIVATE KEY" { + return nil, nil, fmt.Errorf("unable to parse %q as a PKCS#1 RSA private key: %w", keyDER.Type, err) + } if err == nil { return verify(rsaSigner) } @@ -123,5 +135,5 @@ func Load(keyPath string) (crypto.Signer, crypto.PublicKey, error) { if err == nil { return verify(ecdsaSigner) } - return nil, nil, fmt.Errorf("unable to parse %q as a private key", keyPath) + return nil, nil, fmt.Errorf("unable to parse %q as a private key", keyDER.Type) } diff --git a/privatekey/privatekey_test.go b/privatekey/privatekey_test.go index f3fe653f88b..bcc2ecf3873 100644 --- a/privatekey/privatekey_test.go +++ b/privatekey/privatekey_test.go @@ -57,6 +57,6 @@ func TestLoad(t *testing.T) { signer, public, err = Load("../test/hierarchy/ee-e1.cert.pem") test.AssertError(t, err, "Should have failed, file is a certificate") - test.AssertEquals(t, signer, nil) - test.AssertEquals(t, public, nil) + test.AssertNil(t, signer, "Signer should be nil") + test.AssertNil(t, public, "Public should be nil") } diff --git a/probs/probs.go b/probs/probs.go index 3736e8d391e..fc8ba057656 100644 --- a/probs/probs.go +++ b/probs/probs.go @@ -4,32 +4,48 @@ import ( "fmt" "net/http" + "github.com/go-jose/go-jose/v4" + "github.com/letsencrypt/boulder/identifier" ) -// Error types that can be used in ACME payloads const ( + // Error types that can be used in ACME payloads. These are sorted in the + // same order as they are defined in RFC8555 Section 6.7. We do not implement + // the `compound`, `externalAccountRequired`, or `userActionRequired` errors, + // because we have no path that would return them. + AccountDoesNotExistProblem = ProblemType("accountDoesNotExist") + // AlreadyReplacedProblem is a problem type that is defined in Section 7.4 + // of draft-ietf-acme-ari-08, for more information see: + // https://datatracker.ietf.org/doc/html/draft-ietf-acme-ari-08#section-7.4 + AlreadyReplacedProblem = ProblemType("alreadyReplaced") + AlreadyRevokedProblem = ProblemType("alreadyRevoked") + BadCSRProblem = ProblemType("badCSR") + BadNonceProblem = ProblemType("badNonce") + BadPublicKeyProblem = ProblemType("badPublicKey") + BadRevocationReasonProblem = ProblemType("badRevocationReason") + BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm") + CAAProblem = ProblemType("caa") + // ConflictProblem is a problem type that is not defined in RFC8555. + ConflictProblem = ProblemType("conflict") ConnectionProblem = ProblemType("connection") + DNSProblem = ProblemType("dns") + InvalidContactProblem = ProblemType("invalidContact") MalformedProblem = ProblemType("malformed") + OrderNotReadyProblem = ProblemType("orderNotReady") + PausedProblem = ProblemType("rateLimited") + RateLimitedProblem = ProblemType("rateLimited") + RejectedIdentifierProblem = ProblemType("rejectedIdentifier") ServerInternalProblem = ProblemType("serverInternal") TLSProblem = ProblemType("tls") UnauthorizedProblem = ProblemType("unauthorized") - RateLimitedProblem = ProblemType("rateLimited") - BadNonceProblem = ProblemType("badNonce") - InvalidEmailProblem = ProblemType("invalidEmail") - RejectedIdentifierProblem = ProblemType("rejectedIdentifier") - AccountDoesNotExistProblem = ProblemType("accountDoesNotExist") - CAAProblem = ProblemType("caa") - DNSProblem = ProblemType("dns") - AlreadyRevokedProblem = ProblemType("alreadyRevoked") - OrderNotReadyProblem = ProblemType("orderNotReady") - BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm") - BadPublicKeyProblem = ProblemType("badPublicKey") - BadRevocationReasonProblem = ProblemType("badRevocationReason") - BadCSRProblem = ProblemType("badCSR") + UnsupportedContactProblem = ProblemType("unsupportedContact") + UnsupportedIdentifierProblem = ProblemType("unsupportedIdentifier") + + // Defined in https://datatracker.ietf.org/doc/draft-aaron-acme-profiles/ + InvalidProfileProblem = ProblemType("invalidProfile") - V1ErrorNS = "urn:acme:error:" - V2ErrorNS = "urn:ietf:params:acme:error:" + ErrorNS = "urn:ietf:params:acme:error:" ) // ProblemType defines the error types in the ACME protocol @@ -46,6 +62,10 @@ type ProblemDetails struct { // SubProblems are optional additional per-identifier problems. See // RFC 8555 Section 6.7.1: https://tools.ietf.org/html/rfc8555#section-6.7.1 SubProblems []SubProblemDetails `json:"subproblems,omitempty"` + // Algorithms is an extension field defined only for problem documents of type + // badSignatureAlgorithm. See RFC 8555, Section 6.2: + // https://datatracker.ietf.org/doc/html/rfc8555#section-6.2 + Algorithms []jose.SignatureAlgorithm `json:"algorithms,omitempty"` } // SubProblemDetails represents sub-problems specific to an identifier that are @@ -56,7 +76,7 @@ type SubProblemDetails struct { Identifier identifier.ACMEIdentifier `json:"identifier"` } -func (pd *ProblemDetails) Error() string { +func (pd *ProblemDetails) String() string { return fmt.Sprintf("%s :: %s", pd.Type, pd.Detail) } @@ -71,220 +91,187 @@ func (pd *ProblemDetails) WithSubProblems(subProbs []SubProblemDetails) *Problem } } -// statusTooManyRequests is the HTTP status code meant for rate limiting -// errors. It's not currently in the net/http library so we add it here. -const statusTooManyRequests = 429 +// Helper functions which construct the basic RFC8555 Problem Documents, with +// the Type already set and the Details supplied by the caller. -// ProblemDetailsToStatusCode inspects the given ProblemDetails to figure out -// what HTTP status code it should represent. It should only be used by the WFE -// but is included in this package because of its reliance on ProblemTypes. -func ProblemDetailsToStatusCode(prob *ProblemDetails) int { - if prob.HTTPStatus != 0 { - return prob.HTTPStatus - } - switch prob.Type { - case - ConnectionProblem, - MalformedProblem, - BadSignatureAlgorithmProblem, - BadPublicKeyProblem, - TLSProblem, - BadNonceProblem, - InvalidEmailProblem, - RejectedIdentifierProblem, - AccountDoesNotExistProblem, - BadRevocationReasonProblem: - return http.StatusBadRequest - case ServerInternalProblem: - return http.StatusInternalServerError - case - UnauthorizedProblem, - CAAProblem: - return http.StatusForbidden - case RateLimitedProblem: - return statusTooManyRequests - default: - return http.StatusInternalServerError +// AccountDoesNotExist returns a ProblemDetails representing an +// AccountDoesNotExistProblem error +func AccountDoesNotExist(detail string) *ProblemDetails { + return &ProblemDetails{ + Type: AccountDoesNotExistProblem, + Detail: detail, + HTTPStatus: http.StatusBadRequest, } } -// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad -// Request status code. -func BadNonce(detail string) *ProblemDetails { +// AlreadyReplaced returns a ProblemDetails with a AlreadyReplacedProblem and a +// 409 Conflict status code. +func AlreadyReplaced(detail string) *ProblemDetails { return &ProblemDetails{ - Type: BadNonceProblem, + Type: AlreadyReplacedProblem, Detail: detail, - HTTPStatus: http.StatusBadRequest, + HTTPStatus: http.StatusConflict, } } -// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad +// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad // Request status code. -func RejectedIdentifier(detail string) *ProblemDetails { +func AlreadyRevoked(detail string) *ProblemDetails { return &ProblemDetails{ - Type: RejectedIdentifierProblem, + Type: AlreadyRevokedProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict -// status code. -func Conflict(detail string) *ProblemDetails { +// BadCSR returns a ProblemDetails representing a BadCSRProblem. +func BadCSR(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: BadCSRProblem, Detail: detail, - HTTPStatus: http.StatusConflict, + HTTPStatus: http.StatusBadRequest, } } -// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad +// BadNonce returns a ProblemDetails with a BadNonceProblem and a 400 Bad // Request status code. -func AlreadyRevoked(detail string, a ...interface{}) *ProblemDetails { +func BadNonce(detail string) *ProblemDetails { return &ProblemDetails{ - Type: AlreadyRevokedProblem, - Detail: fmt.Sprintf(detail, a...), + Type: BadNonceProblem, + Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad +// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad // Request status code. -func Malformed(detail string, args ...interface{}) *ProblemDetails { - if len(args) > 0 { - detail = fmt.Sprintf(detail, args...) - } +func BadPublicKey(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: BadPublicKeyProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request -// Timeout status code. -func Canceled(detail string, args ...interface{}) *ProblemDetails { - if len(args) > 0 { - detail = fmt.Sprintf(detail, args...) - } +// BadRevocationReason returns a ProblemDetails representing +// a BadRevocationReasonProblem +func BadRevocationReason(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: BadRevocationReasonProblem, Detail: detail, - HTTPStatus: http.StatusRequestTimeout, + HTTPStatus: http.StatusBadRequest, } } // BadSignatureAlgorithm returns a ProblemDetails with a BadSignatureAlgorithmProblem // and a 400 Bad Request status code. -func BadSignatureAlgorithm(detail string, a ...interface{}) *ProblemDetails { +func BadSignatureAlgorithm(detail string) *ProblemDetails { return &ProblemDetails{ Type: BadSignatureAlgorithmProblem, - Detail: fmt.Sprintf(detail, a...), + Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad -// Request status code. -func BadPublicKey(detail string, a ...interface{}) *ProblemDetails { +// CAA returns a ProblemDetails representing a CAAProblem +func CAA(detail string) *ProblemDetails { return &ProblemDetails{ - Type: BadPublicKeyProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, + Type: CAAProblem, + Detail: detail, + HTTPStatus: http.StatusForbidden, } } -// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found -// status code. -func NotFound(detail string) *ProblemDetails { +// Connection returns a ProblemDetails representing a ConnectionProblem +// error +func Connection(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: ConnectionProblem, Detail: detail, - HTTPStatus: http.StatusNotFound, + HTTPStatus: http.StatusBadRequest, } } -// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a -// 500 Internal Server Failure status code. -func ServerInternal(detail string) *ProblemDetails { +// DNS returns a ProblemDetails representing a DNSProblem +func DNS(detail string) *ProblemDetails { return &ProblemDetails{ - Type: ServerInternalProblem, + Type: DNSProblem, Detail: detail, - HTTPStatus: http.StatusInternalServerError, + HTTPStatus: http.StatusBadRequest, } } -// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403 -// Forbidden status code. -func Unauthorized(detail string) *ProblemDetails { +// InvalidContact returns a ProblemDetails representing an InvalidContactProblem. +func InvalidContact(detail string) *ProblemDetails { return &ProblemDetails{ - Type: UnauthorizedProblem, + Type: InvalidContactProblem, Detail: detail, - HTTPStatus: http.StatusForbidden, + HTTPStatus: http.StatusBadRequest, } } -// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP -// method error. -func MethodNotAllowed() *ProblemDetails { +// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad +// Request status code. +func Malformed(detail string, a ...any) *ProblemDetails { + if len(a) > 0 { + detail = fmt.Sprintf(detail, a...) + } return &ProblemDetails{ Type: MalformedProblem, - Detail: "Method not allowed", - HTTPStatus: http.StatusMethodNotAllowed, + Detail: detail, + HTTPStatus: http.StatusBadRequest, } } -// ContentLengthRequired returns a ProblemDetails representing a missing -// Content-Length header error -func ContentLengthRequired() *ProblemDetails { +// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem +func OrderNotReady(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, - Detail: "missing Content-Length header", - HTTPStatus: http.StatusLengthRequired, + Type: OrderNotReadyProblem, + Detail: detail, + HTTPStatus: http.StatusForbidden, } } -// InvalidContentType returns a ProblemDetails suitable for a missing -// ContentType header, or an incorrect ContentType header -func InvalidContentType(detail string) *ProblemDetails { +// RateLimited returns a ProblemDetails representing a RateLimitedProblem error +func RateLimited(detail string) *ProblemDetails { return &ProblemDetails{ - Type: MalformedProblem, + Type: RateLimitedProblem, Detail: detail, - HTTPStatus: http.StatusUnsupportedMediaType, + HTTPStatus: http.StatusTooManyRequests, } } -// InvalidEmail returns a ProblemDetails representing an invalid email address -// error -func InvalidEmail(detail string) *ProblemDetails { +// Paused returns a ProblemDetails representing a RateLimitedProblem error +func Paused(detail string) *ProblemDetails { return &ProblemDetails{ - Type: InvalidEmailProblem, + Type: PausedProblem, Detail: detail, - HTTPStatus: http.StatusBadRequest, + HTTPStatus: http.StatusTooManyRequests, } } -// ConnectionFailure returns a ProblemDetails representing a ConnectionProblem -// error -func ConnectionFailure(detail string) *ProblemDetails { +// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad +// Request status code. +func RejectedIdentifier(detail string) *ProblemDetails { return &ProblemDetails{ - Type: ConnectionProblem, + Type: RejectedIdentifierProblem, Detail: detail, HTTPStatus: http.StatusBadRequest, } } -// RateLimited returns a ProblemDetails representing a RateLimitedProblem error -func RateLimited(detail string) *ProblemDetails { +// ServerInternal returns a ProblemDetails with a ServerInternalProblem and a +// 500 Internal Server Failure status code. +func ServerInternal(detail string) *ProblemDetails { return &ProblemDetails{ - Type: RateLimitedProblem, + Type: ServerInternalProblem, Detail: detail, - HTTPStatus: statusTooManyRequests, + HTTPStatus: http.StatusInternalServerError, } } -// TLSError returns a ProblemDetails representing a TLSProblem error -func TLSError(detail string) *ProblemDetails { +// TLS returns a ProblemDetails representing a TLSProblem error +func TLS(detail string) *ProblemDetails { return &ProblemDetails{ Type: TLSProblem, Detail: detail, @@ -292,58 +279,75 @@ func TLSError(detail string) *ProblemDetails { } } -// AccountDoesNotExist returns a ProblemDetails representing an -// AccountDoesNotExistProblem error -func AccountDoesNotExist(detail string) *ProblemDetails { +// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403 +// Forbidden status code. +func Unauthorized(detail string) *ProblemDetails { return &ProblemDetails{ - Type: AccountDoesNotExistProblem, + Type: UnauthorizedProblem, Detail: detail, - HTTPStatus: http.StatusBadRequest, + HTTPStatus: http.StatusForbidden, } } -// CAA returns a ProblemDetails representing a CAAProblem -func CAA(detail string) *ProblemDetails { +// UnsupportedContact returns a ProblemDetails representing an +// UnsupportedContactProblem +func UnsupportedContact(detail string) *ProblemDetails { return &ProblemDetails{ - Type: CAAProblem, + Type: UnsupportedContactProblem, Detail: detail, - HTTPStatus: http.StatusForbidden, + HTTPStatus: http.StatusBadRequest, } } -// DNS returns a ProblemDetails representing a DNSProblem -func DNS(detail string) *ProblemDetails { +// UnsupportedIdentifier returns a ProblemDetails representing an +// UnsupportedIdentifierProblem +func UnsupportedIdentifier(detail string, a ...any) *ProblemDetails { return &ProblemDetails{ - Type: DNSProblem, - Detail: detail, + Type: UnsupportedIdentifierProblem, + Detail: fmt.Sprintf(detail, a...), HTTPStatus: http.StatusBadRequest, } } -// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem -func OrderNotReady(detail string, a ...interface{}) *ProblemDetails { +// Additional helper functions that return variations on MalformedProblem with +// different HTTP status codes set. + +// Conflict returns a ProblemDetails with a ConflictProblem and a 409 Conflict +// status code. +func Conflict(detail string) *ProblemDetails { return &ProblemDetails{ - Type: OrderNotReadyProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusForbidden, + Type: ConflictProblem, + Detail: detail, + HTTPStatus: http.StatusConflict, } } -// BadRevocationReason returns a ProblemDetails representing -// a BadRevocationReasonProblem -func BadRevocationReason(detail string, a ...interface{}) *ProblemDetails { +// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP +// method error. +func MethodNotAllowed() *ProblemDetails { return &ProblemDetails{ - Type: BadRevocationReasonProblem, - Detail: fmt.Sprintf(detail, a...), - HTTPStatus: http.StatusBadRequest, + Type: MalformedProblem, + Detail: "Method not allowed", + HTTPStatus: http.StatusMethodNotAllowed, } } -// BadCSR returns a ProblemDetails representing a BadCSRProblem. -func BadCSR(detail string, a ...interface{}) *ProblemDetails { +// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found +// status code. +func NotFound(detail string) *ProblemDetails { return &ProblemDetails{ - Type: BadCSRProblem, - Detail: fmt.Sprintf(detail, a...), + Type: MalformedProblem, + Detail: detail, + HTTPStatus: http.StatusNotFound, + } +} + +// InvalidProfile returns a ProblemDetails with type InvalidProfile, specified +// in https://datatracker.ietf.org/doc/draft-aaron-acme-profiles/. +func InvalidProfile(detail string) *ProblemDetails { + return &ProblemDetails{ + Type: InvalidProfileProblem, + Detail: detail, HTTPStatus: http.StatusBadRequest, } } diff --git a/probs/probs_test.go b/probs/probs_test.go index 6d2a3891b4d..ceefdfc64f9 100644 --- a/probs/probs_test.go +++ b/probs/probs_test.go @@ -15,35 +15,7 @@ func TestProblemDetails(t *testing.T) { Detail: "Wat? o.O", HTTPStatus: 403, } - test.AssertEquals(t, pd.Error(), "malformed :: Wat? o.O") -} - -func TestProblemDetailsToStatusCode(t *testing.T) { - testCases := []struct { - pb *ProblemDetails - statusCode int - }{ - {&ProblemDetails{Type: ConnectionProblem}, http.StatusBadRequest}, - {&ProblemDetails{Type: MalformedProblem}, http.StatusBadRequest}, - {&ProblemDetails{Type: ServerInternalProblem}, http.StatusInternalServerError}, - {&ProblemDetails{Type: TLSProblem}, http.StatusBadRequest}, - {&ProblemDetails{Type: UnauthorizedProblem}, http.StatusForbidden}, - {&ProblemDetails{Type: RateLimitedProblem}, statusTooManyRequests}, - {&ProblemDetails{Type: BadNonceProblem}, http.StatusBadRequest}, - {&ProblemDetails{Type: InvalidEmailProblem}, http.StatusBadRequest}, - {&ProblemDetails{Type: "foo"}, http.StatusInternalServerError}, - {&ProblemDetails{Type: "foo", HTTPStatus: 200}, 200}, - {&ProblemDetails{Type: ConnectionProblem, HTTPStatus: 200}, 200}, - {&ProblemDetails{Type: AccountDoesNotExistProblem}, http.StatusBadRequest}, - {&ProblemDetails{Type: BadRevocationReasonProblem}, http.StatusBadRequest}, - } - - for _, c := range testCases { - p := ProblemDetailsToStatusCode(c.pb) - if c.statusCode != p { - t.Errorf("Incorrect status code for %s. Expected %d, got %d", c.pb.Type, c.statusCode, p) - } - } + test.AssertEquals(t, pd.String(), "malformed :: Wat? o.O") } func TestProblemDetailsConvenience(t *testing.T) { @@ -53,14 +25,14 @@ func TestProblemDetailsConvenience(t *testing.T) { statusCode int detail string }{ - {InvalidEmail("invalid email detail"), InvalidEmailProblem, http.StatusBadRequest, "invalid email detail"}, - {ConnectionFailure("connection failure detail"), ConnectionProblem, http.StatusBadRequest, "connection failure detail"}, + {InvalidContact("invalid email detail"), InvalidContactProblem, http.StatusBadRequest, "invalid email detail"}, + {Connection("connection failure detail"), ConnectionProblem, http.StatusBadRequest, "connection failure detail"}, {Malformed("malformed detail"), MalformedProblem, http.StatusBadRequest, "malformed detail"}, {ServerInternal("internal error detail"), ServerInternalProblem, http.StatusInternalServerError, "internal error detail"}, {Unauthorized("unauthorized detail"), UnauthorizedProblem, http.StatusForbidden, "unauthorized detail"}, - {RateLimited("rate limited detail"), RateLimitedProblem, statusTooManyRequests, "rate limited detail"}, + {RateLimited("rate limited detail"), RateLimitedProblem, http.StatusTooManyRequests, "rate limited detail"}, {BadNonce("bad nonce detail"), BadNonceProblem, http.StatusBadRequest, "bad nonce detail"}, - {TLSError("TLS error detail"), TLSProblem, http.StatusBadRequest, "TLS error detail"}, + {TLS("TLS error detail"), TLSProblem, http.StatusBadRequest, "TLS error detail"}, {RejectedIdentifier("rejected identifier detail"), RejectedIdentifierProblem, http.StatusBadRequest, "rejected identifier detail"}, {AccountDoesNotExist("no account detail"), AccountDoesNotExistProblem, http.StatusBadRequest, "no account detail"}, {BadRevocationReason("only reason xxx is supported"), BadRevocationReasonProblem, http.StatusBadRequest, "only reason xxx is supported"}, @@ -91,19 +63,19 @@ func TestWithSubProblems(t *testing.T) { topProb := &ProblemDetails{ Type: RateLimitedProblem, Detail: "don't you think you have enough certificates already?", - HTTPStatus: statusTooManyRequests, + HTTPStatus: http.StatusTooManyRequests, } subProbs := []SubProblemDetails{ { - Identifier: identifier.DNSIdentifier("example.com"), + Identifier: identifier.NewDNS("example.com"), ProblemDetails: ProblemDetails{ Type: RateLimitedProblem, Detail: "don't you think you have enough certificates already?", - HTTPStatus: statusTooManyRequests, + HTTPStatus: http.StatusTooManyRequests, }, }, { - Identifier: identifier.DNSIdentifier("what about example.com"), + Identifier: identifier.NewDNS("what about example.com"), ProblemDetails: ProblemDetails{ Type: MalformedProblem, Detail: "try a real identifier value next time", @@ -120,11 +92,11 @@ func TestWithSubProblems(t *testing.T) { test.AssertDeepEquals(t, outResult.SubProblems, subProbs) // Adding another sub problem shouldn't squash the original sub problems anotherSubProb := SubProblemDetails{ - Identifier: identifier.DNSIdentifier("another ident"), + Identifier: identifier.NewDNS("another ident"), ProblemDetails: ProblemDetails{ Type: RateLimitedProblem, Detail: "yet another rate limit err", - HTTPStatus: statusTooManyRequests, + HTTPStatus: http.StatusTooManyRequests, }, } outResult = outResult.WithSubProblems([]SubProblemDetails{anotherSubProb}) diff --git a/publisher/proto/publisher.pb.go b/publisher/proto/publisher.pb.go index 7e361b11b4d..50574d43616 100644 --- a/publisher/proto/publisher.pb.go +++ b/publisher/proto/publisher.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.6 +// protoc-gen-go v1.36.5 +// protoc v3.20.1 // source: publisher.proto package proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -20,24 +21,73 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type SubmissionType int32 + +const ( + SubmissionType_unknown SubmissionType = 0 + SubmissionType_sct SubmissionType = 1 // Submitting a precert with the intent of getting SCTs + SubmissionType_info SubmissionType = 2 // Submitting a precert on a best-effort basis + SubmissionType_final SubmissionType = 3 // Submitting a final cert on a best-effort basis +) + +// Enum value maps for SubmissionType. +var ( + SubmissionType_name = map[int32]string{ + 0: "unknown", + 1: "sct", + 2: "info", + 3: "final", + } + SubmissionType_value = map[string]int32{ + "unknown": 0, + "sct": 1, + "info": 2, + "final": 3, + } +) + +func (x SubmissionType) Enum() *SubmissionType { + p := new(SubmissionType) + *p = x + return p +} + +func (x SubmissionType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubmissionType) Descriptor() protoreflect.EnumDescriptor { + return file_publisher_proto_enumTypes[0].Descriptor() +} + +func (SubmissionType) Type() protoreflect.EnumType { + return &file_publisher_proto_enumTypes[0] +} + +func (x SubmissionType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubmissionType.Descriptor instead. +func (SubmissionType) EnumDescriptor() ([]byte, []int) { + return file_publisher_proto_rawDescGZIP(), []int{0} +} + type Request struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Der []byte `protobuf:"bytes,1,opt,name=der,proto3" json:"der,omitempty"` + LogURL string `protobuf:"bytes,2,opt,name=LogURL,proto3" json:"LogURL,omitempty"` + LogPublicKey string `protobuf:"bytes,3,opt,name=LogPublicKey,proto3" json:"LogPublicKey,omitempty"` + Kind SubmissionType `protobuf:"varint,5,opt,name=kind,proto3,enum=SubmissionType" json:"kind,omitempty"` unknownFields protoimpl.UnknownFields - - Der []byte `protobuf:"bytes,1,opt,name=der,proto3" json:"der,omitempty"` - LogURL string `protobuf:"bytes,2,opt,name=LogURL,proto3" json:"LogURL,omitempty"` - LogPublicKey string `protobuf:"bytes,3,opt,name=LogPublicKey,proto3" json:"LogPublicKey,omitempty"` - Precert bool `protobuf:"varint,4,opt,name=precert,proto3" json:"precert,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Request) Reset() { *x = Request{} - if protoimpl.UnsafeEnabled { - mi := &file_publisher_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_publisher_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Request) String() string { @@ -48,7 +98,7 @@ func (*Request) ProtoMessage() {} func (x *Request) ProtoReflect() protoreflect.Message { mi := &file_publisher_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -84,28 +134,25 @@ func (x *Request) GetLogPublicKey() string { return "" } -func (x *Request) GetPrecert() bool { +func (x *Request) GetKind() SubmissionType { if x != nil { - return x.Precert + return x.Kind } - return false + return SubmissionType_unknown } type Result struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Sct []byte `protobuf:"bytes,1,opt,name=sct,proto3" json:"sct,omitempty"` unknownFields protoimpl.UnknownFields - - Sct []byte `protobuf:"bytes,1,opt,name=sct,proto3" json:"sct,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Result) Reset() { *x = Result{} - if protoimpl.UnsafeEnabled { - mi := &file_publisher_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_publisher_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Result) String() string { @@ -116,7 +163,7 @@ func (*Result) ProtoMessage() {} func (x *Result) ProtoReflect() protoreflect.Message { mi := &file_publisher_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -140,52 +187,60 @@ func (x *Result) GetSct() []byte { var File_publisher_proto protoreflect.FileDescriptor -var file_publisher_proto_rawDesc = []byte{ +var file_publisher_proto_rawDesc = string([]byte{ 0x0a, 0x0f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x71, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x16, - 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x55, 0x52, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x4c, 0x6f, 0x67, 0x55, 0x52, 0x4c, 0x12, 0x22, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x50, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x4c, 0x6f, - 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, - 0x65, 0x63, 0x65, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x70, 0x72, 0x65, - 0x63, 0x65, 0x72, 0x74, 0x22, 0x1a, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x10, - 0x0a, 0x03, 0x73, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x63, 0x74, - 0x32, 0x3e, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x72, 0x12, 0x31, 0x0a, - 0x1a, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x54, 0x6f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, - 0x54, 0x57, 0x69, 0x74, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x08, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x07, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x00, - 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, - 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, - 0x65, 0x72, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} + 0x6f, 0x22, 0x82, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, + 0x16, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x55, 0x52, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x4c, 0x6f, 0x67, 0x55, 0x52, 0x4c, 0x12, 0x22, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x4c, + 0x6f, 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x04, 0x6b, + 0x69, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x53, 0x75, 0x62, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x1a, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x73, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, + 0x63, 0x74, 0x2a, 0x3b, 0x0a, 0x0e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, + 0x00, 0x12, 0x07, 0x0a, 0x03, 0x73, 0x63, 0x74, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x69, 0x6e, + 0x66, 0x6f, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x10, 0x03, 0x32, + 0x3e, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x1a, + 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x54, 0x6f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x54, + 0x57, 0x69, 0x74, 0x68, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x08, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x07, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x00, 0x42, + 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, + 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, + 0x72, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) var ( file_publisher_proto_rawDescOnce sync.Once - file_publisher_proto_rawDescData = file_publisher_proto_rawDesc + file_publisher_proto_rawDescData []byte ) func file_publisher_proto_rawDescGZIP() []byte { file_publisher_proto_rawDescOnce.Do(func() { - file_publisher_proto_rawDescData = protoimpl.X.CompressGZIP(file_publisher_proto_rawDescData) + file_publisher_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_publisher_proto_rawDesc), len(file_publisher_proto_rawDesc))) }) return file_publisher_proto_rawDescData } +var file_publisher_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_publisher_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_publisher_proto_goTypes = []interface{}{ - (*Request)(nil), // 0: Request - (*Result)(nil), // 1: Result +var file_publisher_proto_goTypes = []any{ + (SubmissionType)(0), // 0: SubmissionType + (*Request)(nil), // 1: Request + (*Result)(nil), // 2: Result } var file_publisher_proto_depIdxs = []int32{ - 0, // 0: Publisher.SubmitToSingleCTWithResult:input_type -> Request - 1, // 1: Publisher.SubmitToSingleCTWithResult:output_type -> Result - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: Request.kind:type_name -> SubmissionType + 1, // 1: Publisher.SubmitToSingleCTWithResult:input_type -> Request + 2, // 2: Publisher.SubmitToSingleCTWithResult:output_type -> Result + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_publisher_proto_init() } @@ -193,48 +248,22 @@ func file_publisher_proto_init() { if File_publisher_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_publisher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Request); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_publisher_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Result); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_publisher_proto_rawDesc, - NumEnums: 0, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_publisher_proto_rawDesc), len(file_publisher_proto_rawDesc)), + NumEnums: 1, NumMessages: 2, NumExtensions: 0, NumServices: 1, }, GoTypes: file_publisher_proto_goTypes, DependencyIndexes: file_publisher_proto_depIdxs, + EnumInfos: file_publisher_proto_enumTypes, MessageInfos: file_publisher_proto_msgTypes, }.Build() File_publisher_proto = out.File - file_publisher_proto_rawDesc = nil file_publisher_proto_goTypes = nil file_publisher_proto_depIdxs = nil } diff --git a/publisher/proto/publisher.proto b/publisher/proto/publisher.proto index 4149517a12b..b155afdc426 100644 --- a/publisher/proto/publisher.proto +++ b/publisher/proto/publisher.proto @@ -5,11 +5,19 @@ service Publisher { rpc SubmitToSingleCTWithResult(Request) returns (Result) {} } +enum SubmissionType { + unknown = 0; + sct = 1; // Submitting a precert with the intent of getting SCTs + info = 2; // Submitting a precert on a best-effort basis + final = 3; // Submitting a final cert on a best-effort basis +} + message Request { bytes der = 1; string LogURL = 2; string LogPublicKey = 3; - bool precert = 4; + reserved 4; // Previously precert + SubmissionType kind = 5; } message Result { diff --git a/publisher/proto/publisher_grpc.pb.go b/publisher/proto/publisher_grpc.pb.go index d5dbc40650c..852b6bc2b7b 100644 --- a/publisher/proto/publisher_grpc.pb.go +++ b/publisher/proto/publisher_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.20.1 +// source: publisher.proto package proto @@ -11,8 +15,12 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Publisher_SubmitToSingleCTWithResult_FullMethodName = "/Publisher/SubmitToSingleCTWithResult" +) // PublisherClient is the client API for Publisher service. // @@ -30,8 +38,9 @@ func NewPublisherClient(cc grpc.ClientConnInterface) PublisherClient { } func (c *publisherClient) SubmitToSingleCTWithResult(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Result, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Result) - err := c.cc.Invoke(ctx, "/Publisher/SubmitToSingleCTWithResult", in, out, opts...) + err := c.cc.Invoke(ctx, Publisher_SubmitToSingleCTWithResult_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -40,20 +49,24 @@ func (c *publisherClient) SubmitToSingleCTWithResult(ctx context.Context, in *Re // PublisherServer is the server API for Publisher service. // All implementations must embed UnimplementedPublisherServer -// for forward compatibility +// for forward compatibility. type PublisherServer interface { SubmitToSingleCTWithResult(context.Context, *Request) (*Result, error) mustEmbedUnimplementedPublisherServer() } -// UnimplementedPublisherServer must be embedded to have forward compatible implementations. -type UnimplementedPublisherServer struct { -} +// UnimplementedPublisherServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedPublisherServer struct{} func (UnimplementedPublisherServer) SubmitToSingleCTWithResult(context.Context, *Request) (*Result, error) { return nil, status.Errorf(codes.Unimplemented, "method SubmitToSingleCTWithResult not implemented") } func (UnimplementedPublisherServer) mustEmbedUnimplementedPublisherServer() {} +func (UnimplementedPublisherServer) testEmbeddedByValue() {} // UnsafePublisherServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to PublisherServer will @@ -63,6 +76,13 @@ type UnsafePublisherServer interface { } func RegisterPublisherServer(s grpc.ServiceRegistrar, srv PublisherServer) { + // If the following call pancis, it indicates UnimplementedPublisherServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Publisher_ServiceDesc, srv) } @@ -76,7 +96,7 @@ func _Publisher_SubmitToSingleCTWithResult_Handler(srv interface{}, ctx context. } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/Publisher/SubmitToSingleCTWithResult", + FullMethod: Publisher_SubmitToSingleCTWithResult_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PublisherServer).SubmitToSingleCTWithResult(ctx, req.(*Request)) diff --git a/publisher/publisher.go b/publisher/publisher.go index 1b710c246cf..9b4ac9f4a51 100644 --- a/publisher/publisher.go +++ b/publisher/publisher.go @@ -24,8 +24,8 @@ import ( "github.com/google/certificate-transparency-go/jsonclient" cttls "github.com/google/certificate-transparency-go/tls" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/letsencrypt/boulder/canceled" "github.com/letsencrypt/boulder/core" "github.com/letsencrypt/boulder/issuance" blog "github.com/letsencrypt/boulder/log" @@ -40,11 +40,20 @@ type Log struct { client *ctClient.LogClient } +// cacheKey is a comparable type for use as a key within a logCache. It holds +// both the log URI and its log_id (base64 encoding of its pubkey), so that +// the cache won't interfere if the RA decides that a log's URI or pubkey has +// changed. +type cacheKey struct { + uri string + pubkey string +} + // logCache contains a cache of *Log's that are constructed as required by // `SubmitToSingleCT` type logCache struct { sync.RWMutex - logs map[string]*Log + logs map[cacheKey]*Log } // AddLog adds a *Log to the cache by constructing the statName, client and @@ -52,7 +61,7 @@ type logCache struct { func (c *logCache) AddLog(uri, b64PK, userAgent string, logger blog.Logger) (*Log, error) { // Lock the mutex for reading to check the cache c.RLock() - log, present := c.logs[b64PK] + log, present := c.logs[cacheKey{uri, b64PK}] c.RUnlock() // If we have already added this log, give it back @@ -69,7 +78,7 @@ func (c *logCache) AddLog(uri, b64PK, userAgent string, logger blog.Logger) (*Lo if err != nil { return nil, err } - c.logs[b64PK] = log + c.logs[cacheKey{uri, b64PK}] = log return log, nil } @@ -84,8 +93,9 @@ type logAdaptor struct { blog.Logger } -func (la logAdaptor) Printf(s string, args ...interface{}) { - la.Logger.Infof(s, args...) +func (la logAdaptor) Printf(s string, args ...any) { + // Do nothing. `jsonclient`'s logs are all variations of "backing off", and add lots of noise + // when a CT log is unavailable. } // NewLog returns an initialized Log struct @@ -96,12 +106,15 @@ func NewLog(uri, b64PK, userAgent string, logger blog.Logger) (*Log, error) { } url.Path = strings.TrimSuffix(url.Path, "/") - pemPK := fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", - b64PK) + derPK, err := base64.StdEncoding.DecodeString(b64PK) + if err != nil { + return nil, err + } + opts := jsonclient.Options{ - Logger: logAdaptor{logger}, - PublicKey: pemPK, - UserAgent: userAgent, + Logger: logAdaptor{logger}, + PublicKeyDER: derPK, + UserAgent: userAgent, } httpClient := &http.Client{ // We set the HTTP client timeout to about half of what we expect @@ -120,6 +133,7 @@ func NewLog(uri, b64PK, userAgent string, logger blog.Logger) (*Log, error) { // "unlimited," which would be bad. Transport: &http.Transport{ MaxIdleConns: http.DefaultTransport.(*http.Transport).MaxIdleConns, + MaxIdleConnsPerHost: http.DefaultTransport.(*http.Transport).MaxIdleConns, IdleConnTimeout: http.DefaultTransport.(*http.Transport).IdleConnTimeout, TLSHandshakeTimeout: http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout, // In Boulder Issue 3821[0] we found that HTTP/2 support was causing hard @@ -159,52 +173,42 @@ type pubMetrics struct { } func initMetrics(stats prometheus.Registerer) *pubMetrics { - submissionLatency := prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "ct_submission_time_seconds", - Help: "Time taken to submit a certificate to a CT log", - Buckets: metrics.InternetFacingBuckets, - }, - []string{"log", "status", "http_status"}, - ) - stats.MustRegister(submissionLatency) - - probeLatency := prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "ct_probe_time_seconds", - Help: "Time taken to probe a CT log", - Buckets: metrics.InternetFacingBuckets, - }, - []string{"log", "status"}, - ) - stats.MustRegister(probeLatency) - - errorCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "ct_errors_count", - Help: "Count of errors by type", - }, - []string{"type"}, - ) - stats.MustRegister(errorCount) + submissionLatency := promauto.With(stats).NewHistogramVec(prometheus.HistogramOpts{ + Name: "ct_submission_time_seconds", + Help: "Time taken to submit a certificate to a CT log", + Buckets: metrics.InternetFacingBuckets, + }, []string{"log", "type", "status", "http_status"}) + + probeLatency := promauto.With(stats).NewHistogramVec(prometheus.HistogramOpts{ + Name: "ct_probe_time_seconds", + Help: "Time taken to probe a CT log", + Buckets: metrics.InternetFacingBuckets, + }, []string{"log", "status"}) + + errorCount := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ + Name: "ct_errors_count", + Help: "Count of errors by type", + }, []string{"log", "type"}) return &pubMetrics{submissionLatency, probeLatency, errorCount} } // Impl defines a Publisher type Impl struct { - pubpb.UnimplementedPublisherServer + pubpb.UnsafePublisherServer log blog.Logger userAgent string - issuerBundles map[issuance.IssuerNameID][]ct.ASN1Cert + issuerBundles map[issuance.NameID][]ct.ASN1Cert ctLogsCache logCache metrics *pubMetrics } +var _ pubpb.PublisherServer = (*Impl)(nil) + // New creates a Publisher that will submit certificates // to requested CT logs func New( - bundles map[issuance.IssuerNameID][]ct.ASN1Cert, + bundles map[issuance.NameID][]ct.ASN1Cert, userAgent string, logger blog.Logger, stats prometheus.Registerer, @@ -213,28 +217,31 @@ func New( issuerBundles: bundles, userAgent: userAgent, ctLogsCache: logCache{ - logs: make(map[string]*Log), + logs: make(map[cacheKey]*Log), }, log: logger, metrics: initMetrics(stats), } } -// SubmitToSingleCTWithResult will submit the certificate represented by certDER to the CT -// log specified by log URL and public key (base64) and return the SCT to the caller +// SubmitToSingleCTWithResult will submit the certificate represented by certDER +// to the CT log specified by log URL and public key (base64) and return the SCT +// to the caller. func (pub *Impl) SubmitToSingleCTWithResult(ctx context.Context, req *pubpb.Request) (*pubpb.Result, error) { + if core.IsAnyNilOrZero(req.Der, req.LogURL, req.LogPublicKey, req.Kind) { + return nil, errors.New("incomplete gRPC request message") + } + cert, err := x509.ParseCertificate(req.Der) if err != nil { - pub.log.AuditErrf("Failed to parse certificate: %s", err) return nil, err } + chain := []ct.ASN1Cert{{Data: req.Der}} - id := issuance.GetIssuerNameID(cert) + id := issuance.IssuerNameID(cert) issuerBundle, ok := pub.issuerBundles[id] if !ok { - err := fmt.Errorf("No issuerBundle matching issuerNameID: %d", int64(id)) - pub.log.AuditErrf("Failed to submit certificate to CT log: %s", err) - return nil, err + return nil, fmt.Errorf("No issuerBundle matching issuerNameID: %d", int64(id)) } chain = append(chain, issuerBundle...) @@ -243,20 +250,12 @@ func (pub *Impl) SubmitToSingleCTWithResult(ctx context.Context, req *pubpb.Requ // and returned. ctLog, err := pub.ctLogsCache.AddLog(req.LogURL, req.LogPublicKey, pub.userAgent, pub.log) if err != nil { - pub.log.AuditErrf("Making Log: %s", err) - return nil, err + return nil, fmt.Errorf("adding CT log to internal cache: %s", err) } - isPrecert := req.Precert - - sct, err := pub.singleLogSubmit( - ctx, - chain, - isPrecert, - core.SerialToString(cert.SerialNumber), - ctLog) + sct, err := pub.singleLogSubmit(ctx, chain, req.Kind, ctLog) if err != nil { - if canceled.Is(err) { + if core.IsCanceled(err) { return nil, err } var body string @@ -264,8 +263,15 @@ func (pub *Impl) SubmitToSingleCTWithResult(ctx context.Context, req *pubpb.Requ if errors.As(err, &rspErr) && rspErr.StatusCode < 500 { body = string(rspErr.Body) } - pub.log.AuditErrf("Failed to submit certificate to CT log at %s: %s Body=%q", - ctLog.uri, err, body) + pub.log.InfoObject("Failed to submit certificate to CT log", struct { + LogURL string + Error string + Body string + }{ + LogURL: ctLog.uri, + Error: err.Error(), + Body: body, + }) return nil, err } @@ -279,13 +285,11 @@ func (pub *Impl) SubmitToSingleCTWithResult(ctx context.Context, req *pubpb.Requ func (pub *Impl) singleLogSubmit( ctx context.Context, chain []ct.ASN1Cert, - isPrecert bool, - serial string, + kind pubpb.SubmissionType, ctLog *Log, ) (*ct.SignedCertificateTimestamp, error) { - var submissionMethod func(context.Context, []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) - submissionMethod = ctLog.client.AddChain - if isPrecert { + submissionMethod := ctLog.client.AddChain + if kind == pubpb.SubmissionType_sct || kind == pubpb.SubmissionType_info { submissionMethod = ctLog.client.AddPreChain } @@ -294,7 +298,7 @@ func (pub *Impl) singleLogSubmit( took := time.Since(start).Seconds() if err != nil { status := "error" - if canceled.Is(err) { + if core.IsCanceled(err) { status = "canceled" } httpStatus := "" @@ -304,30 +308,33 @@ func (pub *Impl) singleLogSubmit( } pub.metrics.submissionLatency.With(prometheus.Labels{ "log": ctLog.uri, + "type": kind.String(), "status": status, "http_status": httpStatus, }).Observe(took) - if isPrecert { - pub.metrics.errorCount.WithLabelValues("precert").Inc() - } else { - pub.metrics.errorCount.WithLabelValues("final").Inc() - } + pub.metrics.errorCount.With(prometheus.Labels{ + "log": ctLog.uri, + "type": kind.String(), + }).Inc() return nil, err } pub.metrics.submissionLatency.With(prometheus.Labels{ "log": ctLog.uri, + "type": kind.String(), "status": "success", "http_status": "", }).Observe(took) - timestamp := time.Unix(int64(sct.Timestamp)/1000, 0) - if time.Until(timestamp) > time.Minute { - return nil, fmt.Errorf("SCT Timestamp was too far in the future (%s)", timestamp) + threshold := uint64(time.Now().Add(time.Minute).UnixMilli()) //nolint: gosec // Current-ish timestamp is guaranteed to fit in a uint64 + if sct.Timestamp > threshold { + return nil, fmt.Errorf("SCT Timestamp was too far in the future (%d > %d)", sct.Timestamp, threshold) } + // For regular certificates, we could get an old SCT, but that shouldn't // happen for precertificates. - if isPrecert && time.Until(timestamp) < -10*time.Minute { - return nil, fmt.Errorf("SCT Timestamp was too far in the past (%s)", timestamp) + threshold = uint64(time.Now().Add(-10 * time.Minute).UnixMilli()) //nolint: gosec // Current-ish timestamp is guaranteed to fit in a uint64 + if kind != pubpb.SubmissionType_final && sct.Timestamp < threshold { + return nil, fmt.Errorf("SCT Timestamp was too far in the past (%d < %d)", sct.Timestamp, threshold) } return sct, nil @@ -358,7 +365,7 @@ func CreateTestingSignedSCT(req []string, k *ecdsa.PrivateKey, precert bool, tim // Sign the SCT rawKey, _ := x509.MarshalPKIXPublicKey(&k.PublicKey) logID := sha256.Sum256(rawKey) - timestampMillis := uint64(timestamp.UnixNano()) / 1e6 + timestampMillis := uint64(timestamp.UnixMilli()) //nolint: gosec // Current-ish timestamp is guaranteed to fit in a uint64 serialized, _ := ct.SerializeSCTSignatureInput(ct.SignedCertificateTimestamp{ SCTVersion: ct.V1, LogID: ct.LogID{KeyID: logID}, @@ -399,7 +406,7 @@ func CreateTestingSignedSCT(req []string, k *ecdsa.PrivateKey, precert bool, tim // GetCTBundleForChain takes a slice of *issuance.Certificate(s) // representing a certificate chain and returns a slice of -// ct.ANS1Cert(s) in the same order +// ct.ASN1Cert(s) in the same order func GetCTBundleForChain(chain []*issuance.Certificate) []ct.ASN1Cert { var ctBundle []ct.ASN1Cert for _, cert := range chain { diff --git a/publisher/publisher_test.go b/publisher/publisher_test.go index 0f33b980680..98a501989fd 100644 --- a/publisher/publisher_test.go +++ b/publisher/publisher_test.go @@ -67,10 +67,7 @@ func logSrv(k *ecdsa.PrivateKey) *testLogSrv { if err != nil { return } - precert := false - if r.URL.Path == "/ct/v1/add-pre-chain" { - precert = true - } + precert := r.URL.Path == "/ct/v1/add-pre-chain" sct := CreateTestingSignedSCT(jsonReq.Chain, k, precert, time.Now()) fmt.Fprint(w, string(sct)) atomic.AddInt64(&testLog.submissions, 1) @@ -92,10 +89,7 @@ func lyingLogSrv(k *ecdsa.PrivateKey, timestamp time.Time) *testLogSrv { if err != nil { return } - precert := false - if r.URL.Path == "/ct/v1/add-pre-chain" { - precert = true - } + precert := r.URL.Path == "/ct/v1/add-pre-chain" sct := CreateTestingSignedSCT(jsonReq.Chain, k, precert, timestamp) fmt.Fprint(w, string(sct)) atomic.AddInt64(&testLog.submissions, 1) @@ -141,7 +135,7 @@ func setup(t *testing.T) (*Impl, *x509.Certificate, *ecdsa.PrivateKey) { test.AssertNotError(t, err, "failed to load chain3.") // Create an example issuerNameID to CT bundle mapping - issuerBundles := map[issuance.IssuerNameID][]ct.ASN1Cert{ + issuerBundles := map[issuance.NameID][]ct.ASN1Cert{ chain1[0].NameID(): GetCTBundleForChain(chain1), chain2[0].NameID(): GetCTBundleForChain(chain2), chain3[0].NameID(): GetCTBundleForChain(chain3), @@ -162,7 +156,7 @@ func setup(t *testing.T) (*Impl, *x509.Certificate, *ecdsa.PrivateKey) { return pub, leaf, k } -func addLog(t *testing.T, pub *Impl, port int, pubKey *ecdsa.PublicKey) *Log { +func addLog(t *testing.T, port int, pubKey *ecdsa.PublicKey) *Log { uri := fmt.Sprintf("http://localhost:%d", port) der, err := x509.MarshalPKIXPublicKey(pubKey) test.AssertNotError(t, err, "Failed to marshal key") @@ -172,7 +166,7 @@ func addLog(t *testing.T, pub *Impl, port int, pubKey *ecdsa.PublicKey) *Log { return newLog } -func makePrecert(k *ecdsa.PrivateKey) (map[issuance.IssuerNameID][]ct.ASN1Cert, []byte, error) { +func makePrecert(k *ecdsa.PrivateKey) (map[issuance.NameID][]ct.ASN1Cert, []byte, error) { rootTmpl := x509.Certificate{ SerialNumber: big.NewInt(0), Subject: pkix.Name{CommonName: "root"}, @@ -201,8 +195,8 @@ func makePrecert(k *ecdsa.PrivateKey) (map[issuance.IssuerNameID][]ct.ASN1Cert, if err != nil { return nil, nil, err } - precertIssuerNameID := issuance.GetIssuerNameID(precertX509) - bundles := map[issuance.IssuerNameID][]ct.ASN1Cert{ + precertIssuerNameID := issuance.IssuerNameID(precertX509) + bundles := map[issuance.NameID][]ct.ASN1Cert{ precertIssuerNameID: { ct.ASN1Cert{Data: rootBytes}, }, @@ -217,14 +211,19 @@ func TestTimestampVerificationFuture(t *testing.T) { defer server.Close() port, err := getPort(server.URL) test.AssertNotError(t, err, "Failed to get test server port") - testLog := addLog(t, pub, port, &k.PublicKey) + testLog := addLog(t, port, &k.PublicKey) // Precert issuerBundles, precert, err := makePrecert(k) test.AssertNotError(t, err, "Failed to create test leaf") pub.issuerBundles = issuerBundles - _, err = pub.SubmitToSingleCTWithResult(ctx, &pubpb.Request{LogURL: testLog.uri, LogPublicKey: testLog.logID, Der: precert, Precert: true}) + _, err = pub.SubmitToSingleCTWithResult(ctx, &pubpb.Request{ + LogURL: testLog.uri, + LogPublicKey: testLog.logID, + Der: precert, + Kind: pubpb.SubmissionType_sct, + }) if err == nil { t.Fatal("Expected error for lying log server, got none") } @@ -240,7 +239,7 @@ func TestTimestampVerificationPast(t *testing.T) { defer server.Close() port, err := getPort(server.URL) test.AssertNotError(t, err, "Failed to get test server port") - testLog := addLog(t, pub, port, &k.PublicKey) + testLog := addLog(t, port, &k.PublicKey) // Precert issuerBundles, precert, err := makePrecert(k) @@ -248,7 +247,12 @@ func TestTimestampVerificationPast(t *testing.T) { pub.issuerBundles = issuerBundles - _, err = pub.SubmitToSingleCTWithResult(ctx, &pubpb.Request{LogURL: testLog.uri, LogPublicKey: testLog.logID, Der: precert, Precert: true}) + _, err = pub.SubmitToSingleCTWithResult(ctx, &pubpb.Request{ + LogURL: testLog.uri, + LogPublicKey: testLog.logID, + Der: precert, + Kind: pubpb.SubmissionType_sct, + }) if err == nil { t.Fatal("Expected error for lying log server, got none") } @@ -259,7 +263,7 @@ func TestTimestampVerificationPast(t *testing.T) { func TestLogCache(t *testing.T) { cache := logCache{ - logs: make(map[string]*Log), + logs: make(map[cacheKey]*Log), } // Adding a log with an invalid base64 public key should error @@ -323,57 +327,121 @@ func TestLogErrorBody(t *testing.T) { LogURL: logURI, LogPublicKey: pkB64, Der: leaf.Raw, + Kind: pubpb.SubmissionType_final, }) test.AssertError(t, err, "SubmitToSingleCTWithResult didn't fail") test.AssertEquals(t, len(log.GetAllMatching("well this isn't good now is it")), 1) } -func TestHTTPStatusMetric(t *testing.T) { +// TestErrorMetrics checks that the ct_errors_count and +// ct_submission_time_seconds metrics are updated with the correct labels when +// the publisher encounters errors. +func TestErrorMetrics(t *testing.T) { pub, leaf, k := setup(t) + pkDER, err := x509.MarshalPKIXPublicKey(&k.PublicKey) + test.AssertNotError(t, err, "Failed to marshal key") + pkB64 := base64.StdEncoding.EncodeToString(pkDER) + + // Set up a bad server that will always produce errors. badSrv := errorBodyLogSrv() defer badSrv.Close() port, err := getPort(badSrv.URL) test.AssertNotError(t, err, "Failed to get test server port") logURI := fmt.Sprintf("http://localhost:%d", port) - pkDER, err := x509.MarshalPKIXPublicKey(&k.PublicKey) - test.AssertNotError(t, err, "Failed to marshal key") - pkB64 := base64.StdEncoding.EncodeToString(pkDER) _, err = pub.SubmitToSingleCTWithResult(context.Background(), &pubpb.Request{ LogURL: logURI, LogPublicKey: pkB64, Der: leaf.Raw, + Kind: pubpb.SubmissionType_sct, + }) + test.AssertError(t, err, "SubmitToSingleCTWithResult didn't fail") + test.AssertMetricWithLabelsEquals(t, pub.metrics.submissionLatency, prometheus.Labels{ + "log": logURI, + "type": "sct", + "status": "error", + "http_status": "400", + }, 1) + test.AssertMetricWithLabelsEquals(t, pub.metrics.errorCount, prometheus.Labels{ + "log": logURI, + "type": "sct", + }, 1) + + _, err = pub.SubmitToSingleCTWithResult(context.Background(), &pubpb.Request{ + LogURL: logURI, + LogPublicKey: pkB64, + Der: leaf.Raw, + Kind: pubpb.SubmissionType_final, }) test.AssertError(t, err, "SubmitToSingleCTWithResult didn't fail") test.AssertMetricWithLabelsEquals(t, pub.metrics.submissionLatency, prometheus.Labels{ "log": logURI, + "type": "final", "status": "error", "http_status": "400", }, 1) + test.AssertMetricWithLabelsEquals(t, pub.metrics.errorCount, prometheus.Labels{ + "log": logURI, + "type": "final", + }, 1) - pub, leaf, k = setup(t) - pkDER, err = x509.MarshalPKIXPublicKey(&k.PublicKey) + _, err = pub.SubmitToSingleCTWithResult(context.Background(), &pubpb.Request{ + LogURL: logURI, + LogPublicKey: pkB64, + Der: leaf.Raw, + Kind: pubpb.SubmissionType_info, + }) + test.AssertError(t, err, "SubmitToSingleCTWithResult didn't fail") + test.AssertMetricWithLabelsEquals(t, pub.metrics.submissionLatency, prometheus.Labels{ + "log": logURI, + "type": "info", + "status": "error", + "http_status": "400", + }, 1) + test.AssertMetricWithLabelsEquals(t, pub.metrics.errorCount, prometheus.Labels{ + "log": logURI, + "type": "info", + }, 1) +} + +// TestSuccessMetrics checks that the ct_errors_count and +// ct_submission_time_seconds metrics are updated with the correct labels when +// the publisher succeeds. +func TestSuccessMetrics(t *testing.T) { + pub, leaf, k := setup(t) + + pkDER, err := x509.MarshalPKIXPublicKey(&k.PublicKey) test.AssertNotError(t, err, "Failed to marshal key") - pkB64 = base64.StdEncoding.EncodeToString(pkDER) + pkB64 := base64.StdEncoding.EncodeToString(pkDER) + + // Set up a working server that will succeed. workingSrv := logSrv(k) defer workingSrv.Close() - port, err = getPort(workingSrv.URL) + port, err := getPort(workingSrv.URL) test.AssertNotError(t, err, "Failed to get test server port") - logURI = fmt.Sprintf("http://localhost:%d", port) + logURI := fmt.Sprintf("http://localhost:%d", port) + // Only the latency metric should be updated on a success. _, err = pub.SubmitToSingleCTWithResult(context.Background(), &pubpb.Request{ LogURL: logURI, LogPublicKey: pkB64, Der: leaf.Raw, + Kind: pubpb.SubmissionType_final, }) test.AssertNotError(t, err, "SubmitToSingleCTWithResult failed") test.AssertMetricWithLabelsEquals(t, pub.metrics.submissionLatency, prometheus.Labels{ "log": logURI, + "type": "final", "status": "success", "http_status": "", }, 1) + test.AssertMetricWithLabelsEquals(t, pub.metrics.errorCount, prometheus.Labels{ + "log": logURI, + "type": "final", + }, 0) } + func Test_GetCTBundleForChain(t *testing.T) { chain, err := issuance.LoadChain([]string{ "../test/hierarchy/int-r3.cert.pem", diff --git a/ra/mock_test.go b/ra/mock_test.go deleted file mode 100644 index 1ca8533cf95..00000000000 --- a/ra/mock_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package ra - -import ( - "context" - "time" - - corepb "github.com/letsencrypt/boulder/core/proto" - "github.com/letsencrypt/boulder/mocks" - sapb "github.com/letsencrypt/boulder/sa/proto" - grpc "google.golang.org/grpc" -) - -type mockInvalidAuthorizationsAuthority struct { - mocks.StorageAuthority - domainWithFailures string -} - -func (sa *mockInvalidAuthorizationsAuthority) CountOrders(_ context.Context, _ *sapb.CountOrdersRequest, _ ...grpc.CallOption) (*sapb.Count, error) { - return &sapb.Count{}, nil -} - -func (sa *mockInvalidAuthorizationsAuthority) PreviousCertificateExists(_ context.Context, _ *sapb.PreviousCertificateExistsRequest, _ ...grpc.CallOption) (*sapb.Exists, error) { - return &sapb.Exists{ - Exists: false, - }, nil -} - -func (sa *mockInvalidAuthorizationsAuthority) CountInvalidAuthorizations2(ctx context.Context, req *sapb.CountInvalidAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Count, error) { - if req.Hostname == sa.domainWithFailures { - return &sapb.Count{Count: 1}, nil - } else { - return &sapb.Count{}, nil - } -} - -// An authority that returns nonzero failures for CountInvalidAuthorizations2, -// and also returns existing authzs for the same domain from GetAuthorizations2 -type mockInvalidPlusValidAuthzAuthority struct { - mockInvalidAuthorizationsAuthority -} - -func (sa *mockInvalidPlusValidAuthzAuthority) GetAuthorizations2(ctx context.Context, req *sapb.GetAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) { - return &sapb.Authorizations{ - Authz: []*sapb.Authorizations_MapElement{ - { - Domain: sa.domainWithFailures, Authz: &corepb.Authorization{ - Id: "1234", - Status: "valid", - Identifier: sa.domainWithFailures, - RegistrationID: 1234, - Expires: time.Date(2101, 12, 3, 0, 0, 0, 0, time.UTC).Unix(), - }, - }, - }, - }, nil -} diff --git a/ra/proto/ra.pb.go b/ra/proto/ra.pb.go index eacd38579ab..b9bf35038b6 100644 --- a/ra/proto/ra.pb.go +++ b/ra/proto/ra.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.6 +// protoc-gen-go v1.36.5 +// protoc v3.20.1 // source: ra.proto package proto @@ -10,9 +10,11 @@ import ( proto "github.com/letsencrypt/boulder/core/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" emptypb "google.golang.org/protobuf/types/known/emptypb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -22,33 +24,29 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type UpdateRegistrationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type SCTRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + PrecertDER []byte `protobuf:"bytes,1,opt,name=precertDER,proto3" json:"precertDER,omitempty"` unknownFields protoimpl.UnknownFields - - Base *proto.Registration `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"` - Update *proto.Registration `protobuf:"bytes,2,opt,name=update,proto3" json:"update,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *UpdateRegistrationRequest) Reset() { - *x = UpdateRegistrationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ra_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *SCTRequest) Reset() { + *x = SCTRequest{} + mi := &file_ra_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UpdateRegistrationRequest) String() string { +func (x *SCTRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateRegistrationRequest) ProtoMessage() {} +func (*SCTRequest) ProtoMessage() {} -func (x *UpdateRegistrationRequest) ProtoReflect() protoreflect.Message { +func (x *SCTRequest) ProtoReflect() protoreflect.Message { mi := &file_ra_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -58,53 +56,85 @@ func (x *UpdateRegistrationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateRegistrationRequest.ProtoReflect.Descriptor instead. -func (*UpdateRegistrationRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use SCTRequest.ProtoReflect.Descriptor instead. +func (*SCTRequest) Descriptor() ([]byte, []int) { return file_ra_proto_rawDescGZIP(), []int{0} } -func (x *UpdateRegistrationRequest) GetBase() *proto.Registration { +func (x *SCTRequest) GetPrecertDER() []byte { if x != nil { - return x.Base + return x.PrecertDER } return nil } -func (x *UpdateRegistrationRequest) GetUpdate() *proto.Registration { +type SCTResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + SctDER [][]byte `protobuf:"bytes,1,rep,name=sctDER,proto3" json:"sctDER,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SCTResponse) Reset() { + *x = SCTResponse{} + mi := &file_ra_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SCTResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SCTResponse) ProtoMessage() {} + +func (x *SCTResponse) ProtoReflect() protoreflect.Message { + mi := &file_ra_proto_msgTypes[1] if x != nil { - return x.Update + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SCTResponse.ProtoReflect.Descriptor instead. +func (*SCTResponse) Descriptor() ([]byte, []int) { + return file_ra_proto_rawDescGZIP(), []int{1} +} + +func (x *SCTResponse) GetSctDER() [][]byte { + if x != nil { + return x.SctDER } return nil } -type UpdateAuthorizationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type GenerateOCSPRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` unknownFields protoimpl.UnknownFields - - Authz *proto.Authorization `protobuf:"bytes,1,opt,name=authz,proto3" json:"authz,omitempty"` - ChallengeIndex int64 `protobuf:"varint,2,opt,name=challengeIndex,proto3" json:"challengeIndex,omitempty"` - Response *proto.Challenge `protobuf:"bytes,3,opt,name=response,proto3" json:"response,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *UpdateAuthorizationRequest) Reset() { - *x = UpdateAuthorizationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ra_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GenerateOCSPRequest) Reset() { + *x = GenerateOCSPRequest{} + mi := &file_ra_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UpdateAuthorizationRequest) String() string { +func (x *GenerateOCSPRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateAuthorizationRequest) ProtoMessage() {} +func (*GenerateOCSPRequest) ProtoMessage() {} -func (x *UpdateAuthorizationRequest) ProtoReflect() protoreflect.Message { - mi := &file_ra_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GenerateOCSPRequest) ProtoReflect() protoreflect.Message { + mi := &file_ra_proto_msgTypes[2] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -114,59 +144,139 @@ func (x *UpdateAuthorizationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateAuthorizationRequest.ProtoReflect.Descriptor instead. -func (*UpdateAuthorizationRequest) Descriptor() ([]byte, []int) { - return file_ra_proto_rawDescGZIP(), []int{1} +// Deprecated: Use GenerateOCSPRequest.ProtoReflect.Descriptor instead. +func (*GenerateOCSPRequest) Descriptor() ([]byte, []int) { + return file_ra_proto_rawDescGZIP(), []int{2} } -func (x *UpdateAuthorizationRequest) GetAuthz() *proto.Authorization { +func (x *GenerateOCSPRequest) GetSerial() string { if x != nil { - return x.Authz + return x.Serial } - return nil + return "" } -func (x *UpdateAuthorizationRequest) GetChallengeIndex() int64 { +type UpdateRegistrationKeyRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + Jwk []byte `protobuf:"bytes,2,opt,name=jwk,proto3" json:"jwk,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateRegistrationKeyRequest) Reset() { + *x = UpdateRegistrationKeyRequest{} + mi := &file_ra_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateRegistrationKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateRegistrationKeyRequest) ProtoMessage() {} + +func (x *UpdateRegistrationKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_ra_proto_msgTypes[3] if x != nil { - return x.ChallengeIndex + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateRegistrationKeyRequest.ProtoReflect.Descriptor instead. +func (*UpdateRegistrationKeyRequest) Descriptor() ([]byte, []int) { + return file_ra_proto_rawDescGZIP(), []int{3} +} + +func (x *UpdateRegistrationKeyRequest) GetRegistrationID() int64 { + if x != nil { + return x.RegistrationID } return 0 } -func (x *UpdateAuthorizationRequest) GetResponse() *proto.Challenge { +func (x *UpdateRegistrationKeyRequest) GetJwk() []byte { if x != nil { - return x.Response + return x.Jwk } return nil } -type PerformValidationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type DeactivateRegistrationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeactivateRegistrationRequest) Reset() { + *x = DeactivateRegistrationRequest{} + mi := &file_ra_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} - Authz *proto.Authorization `protobuf:"bytes,1,opt,name=authz,proto3" json:"authz,omitempty"` - ChallengeIndex int64 `protobuf:"varint,2,opt,name=challengeIndex,proto3" json:"challengeIndex,omitempty"` +func (x *DeactivateRegistrationRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *PerformValidationRequest) Reset() { - *x = PerformValidationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ra_proto_msgTypes[2] +func (*DeactivateRegistrationRequest) ProtoMessage() {} + +func (x *DeactivateRegistrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_ra_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } + return mi.MessageOf(x) } -func (x *PerformValidationRequest) String() string { +// Deprecated: Use DeactivateRegistrationRequest.ProtoReflect.Descriptor instead. +func (*DeactivateRegistrationRequest) Descriptor() ([]byte, []int) { + return file_ra_proto_rawDescGZIP(), []int{4} +} + +func (x *DeactivateRegistrationRequest) GetRegistrationID() int64 { + if x != nil { + return x.RegistrationID + } + return 0 +} + +type UpdateAuthorizationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Authz *proto.Authorization `protobuf:"bytes,1,opt,name=authz,proto3" json:"authz,omitempty"` + ChallengeIndex int64 `protobuf:"varint,2,opt,name=challengeIndex,proto3" json:"challengeIndex,omitempty"` + Response *proto.Challenge `protobuf:"bytes,3,opt,name=response,proto3" json:"response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateAuthorizationRequest) Reset() { + *x = UpdateAuthorizationRequest{} + mi := &file_ra_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateAuthorizationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PerformValidationRequest) ProtoMessage() {} +func (*UpdateAuthorizationRequest) ProtoMessage() {} -func (x *PerformValidationRequest) ProtoReflect() protoreflect.Message { - mi := &file_ra_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateAuthorizationRequest) ProtoReflect() protoreflect.Message { + mi := &file_ra_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -176,53 +286,56 @@ func (x *PerformValidationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PerformValidationRequest.ProtoReflect.Descriptor instead. -func (*PerformValidationRequest) Descriptor() ([]byte, []int) { - return file_ra_proto_rawDescGZIP(), []int{2} +// Deprecated: Use UpdateAuthorizationRequest.ProtoReflect.Descriptor instead. +func (*UpdateAuthorizationRequest) Descriptor() ([]byte, []int) { + return file_ra_proto_rawDescGZIP(), []int{5} } -func (x *PerformValidationRequest) GetAuthz() *proto.Authorization { +func (x *UpdateAuthorizationRequest) GetAuthz() *proto.Authorization { if x != nil { return x.Authz } return nil } -func (x *PerformValidationRequest) GetChallengeIndex() int64 { +func (x *UpdateAuthorizationRequest) GetChallengeIndex() int64 { if x != nil { return x.ChallengeIndex } return 0 } -type RevokeCertificateWithRegRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *UpdateAuthorizationRequest) GetResponse() *proto.Challenge { + if x != nil { + return x.Response + } + return nil +} - Cert []byte `protobuf:"bytes,1,opt,name=cert,proto3" json:"cert,omitempty"` - Code int64 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` - RegID int64 `protobuf:"varint,3,opt,name=regID,proto3" json:"regID,omitempty"` +type PerformValidationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Authz *proto.Authorization `protobuf:"bytes,1,opt,name=authz,proto3" json:"authz,omitempty"` + ChallengeIndex int64 `protobuf:"varint,2,opt,name=challengeIndex,proto3" json:"challengeIndex,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RevokeCertificateWithRegRequest) Reset() { - *x = RevokeCertificateWithRegRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ra_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *PerformValidationRequest) Reset() { + *x = PerformValidationRequest{} + mi := &file_ra_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RevokeCertificateWithRegRequest) String() string { +func (x *PerformValidationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RevokeCertificateWithRegRequest) ProtoMessage() {} +func (*PerformValidationRequest) ProtoMessage() {} -func (x *RevokeCertificateWithRegRequest) ProtoReflect() protoreflect.Message { - mi := &file_ra_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { +func (x *PerformValidationRequest) ProtoReflect() protoreflect.Message { + mi := &file_ra_proto_msgTypes[6] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -232,49 +345,39 @@ func (x *RevokeCertificateWithRegRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RevokeCertificateWithRegRequest.ProtoReflect.Descriptor instead. -func (*RevokeCertificateWithRegRequest) Descriptor() ([]byte, []int) { - return file_ra_proto_rawDescGZIP(), []int{3} +// Deprecated: Use PerformValidationRequest.ProtoReflect.Descriptor instead. +func (*PerformValidationRequest) Descriptor() ([]byte, []int) { + return file_ra_proto_rawDescGZIP(), []int{6} } -func (x *RevokeCertificateWithRegRequest) GetCert() []byte { +func (x *PerformValidationRequest) GetAuthz() *proto.Authorization { if x != nil { - return x.Cert + return x.Authz } return nil } -func (x *RevokeCertificateWithRegRequest) GetCode() int64 { - if x != nil { - return x.Code - } - return 0 -} - -func (x *RevokeCertificateWithRegRequest) GetRegID() int64 { +func (x *PerformValidationRequest) GetChallengeIndex() int64 { if x != nil { - return x.RegID + return x.ChallengeIndex } return 0 } type RevokeCertByApplicantRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Cert []byte `protobuf:"bytes,1,opt,name=cert,proto3" json:"cert,omitempty"` + Code int64 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` + RegID int64 `protobuf:"varint,3,opt,name=regID,proto3" json:"regID,omitempty"` unknownFields protoimpl.UnknownFields - - Cert []byte `protobuf:"bytes,1,opt,name=cert,proto3" json:"cert,omitempty"` - Code int64 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` - RegID int64 `protobuf:"varint,3,opt,name=regID,proto3" json:"regID,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RevokeCertByApplicantRequest) Reset() { *x = RevokeCertByApplicantRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ra_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_ra_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RevokeCertByApplicantRequest) String() string { @@ -284,8 +387,8 @@ func (x *RevokeCertByApplicantRequest) String() string { func (*RevokeCertByApplicantRequest) ProtoMessage() {} func (x *RevokeCertByApplicantRequest) ProtoReflect() protoreflect.Message { - mi := &file_ra_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_ra_proto_msgTypes[7] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -297,7 +400,7 @@ func (x *RevokeCertByApplicantRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RevokeCertByApplicantRequest.ProtoReflect.Descriptor instead. func (*RevokeCertByApplicantRequest) Descriptor() ([]byte, []int) { - return file_ra_proto_rawDescGZIP(), []int{4} + return file_ra_proto_rawDescGZIP(), []int{7} } func (x *RevokeCertByApplicantRequest) GetCert() []byte { @@ -322,21 +425,17 @@ func (x *RevokeCertByApplicantRequest) GetRegID() int64 { } type RevokeCertByKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Cert []byte `protobuf:"bytes,1,opt,name=cert,proto3" json:"cert,omitempty"` unknownFields protoimpl.UnknownFields - - Cert []byte `protobuf:"bytes,1,opt,name=cert,proto3" json:"cert,omitempty"` - Code int64 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RevokeCertByKeyRequest) Reset() { *x = RevokeCertByKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ra_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_ra_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RevokeCertByKeyRequest) String() string { @@ -346,8 +445,8 @@ func (x *RevokeCertByKeyRequest) String() string { func (*RevokeCertByKeyRequest) ProtoMessage() {} func (x *RevokeCertByKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_ra_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_ra_proto_msgTypes[8] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -359,7 +458,7 @@ func (x *RevokeCertByKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RevokeCertByKeyRequest.ProtoReflect.Descriptor instead. func (*RevokeCertByKeyRequest) Descriptor() ([]byte, []int) { - return file_ra_proto_rawDescGZIP(), []int{5} + return file_ra_proto_rawDescGZIP(), []int{8} } func (x *RevokeCertByKeyRequest) GetCert() []byte { @@ -369,32 +468,36 @@ func (x *RevokeCertByKeyRequest) GetCert() []byte { return nil } -func (x *RevokeCertByKeyRequest) GetCode() int64 { - if x != nil { - return x.Code - } - return 0 -} - type AdministrativelyRevokeCertificateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Cert []byte `protobuf:"bytes,1,opt,name=cert,proto3" json:"cert,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + // Deprecated: this field is ignored. + Cert []byte `protobuf:"bytes,1,opt,name=cert,proto3" json:"cert,omitempty"` + // The `serial` field is required. Serial string `protobuf:"bytes,4,opt,name=serial,proto3" json:"serial,omitempty"` Code int64 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` AdminName string `protobuf:"bytes,3,opt,name=adminName,proto3" json:"adminName,omitempty"` SkipBlockKey bool `protobuf:"varint,5,opt,name=skipBlockKey,proto3" json:"skipBlockKey,omitempty"` + // If the malformed flag is set, the RA will not attempt to parse the + // certificate in question. In this case, the keyCompromise reason cannot be + // specified, because the key cannot be blocked. + Malformed bool `protobuf:"varint,6,opt,name=malformed,proto3" json:"malformed,omitempty"` + // The CRL shard to store the revocation in. + // + // This is used when revoking malformed certificates, to allow human judgement + // in setting the CRL shard instead of automatically determining it by parsing + // the certificate. + // + // Passing a nonzero crlShard with malformed=false returns error. + CrlShard int64 `protobuf:"varint,7,opt,name=crlShard,proto3" json:"crlShard,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AdministrativelyRevokeCertificateRequest) Reset() { *x = AdministrativelyRevokeCertificateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ra_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_ra_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AdministrativelyRevokeCertificateRequest) String() string { @@ -404,8 +507,8 @@ func (x *AdministrativelyRevokeCertificateRequest) String() string { func (*AdministrativelyRevokeCertificateRequest) ProtoMessage() {} func (x *AdministrativelyRevokeCertificateRequest) ProtoReflect() protoreflect.Message { - mi := &file_ra_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_ra_proto_msgTypes[9] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -417,7 +520,7 @@ func (x *AdministrativelyRevokeCertificateRequest) ProtoReflect() protoreflect.M // Deprecated: Use AdministrativelyRevokeCertificateRequest.ProtoReflect.Descriptor instead. func (*AdministrativelyRevokeCertificateRequest) Descriptor() ([]byte, []int) { - return file_ra_proto_rawDescGZIP(), []int{6} + return file_ra_proto_rawDescGZIP(), []int{9} } func (x *AdministrativelyRevokeCertificateRequest) GetCert() []byte { @@ -455,22 +558,39 @@ func (x *AdministrativelyRevokeCertificateRequest) GetSkipBlockKey() bool { return false } -type NewOrderRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *AdministrativelyRevokeCertificateRequest) GetMalformed() bool { + if x != nil { + return x.Malformed + } + return false +} + +func (x *AdministrativelyRevokeCertificateRequest) GetCrlShard() int64 { + if x != nil { + return x.CrlShard + } + return 0 +} - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"` +type NewOrderRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 9 + RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + Identifiers []*proto.Identifier `protobuf:"bytes,8,rep,name=identifiers,proto3" json:"identifiers,omitempty"` + CertificateProfileName string `protobuf:"bytes,5,opt,name=certificateProfileName,proto3" json:"certificateProfileName,omitempty"` + // Replaces is the ARI certificate Id that this order replaces. + Replaces string `protobuf:"bytes,7,opt,name=replaces,proto3" json:"replaces,omitempty"` + // ReplacesSerial is the serial number of the certificate that this order replaces. + ReplacesSerial string `protobuf:"bytes,3,opt,name=replacesSerial,proto3" json:"replacesSerial,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *NewOrderRequest) Reset() { *x = NewOrderRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ra_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_ra_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NewOrderRequest) String() string { @@ -480,8 +600,8 @@ func (x *NewOrderRequest) String() string { func (*NewOrderRequest) ProtoMessage() {} func (x *NewOrderRequest) ProtoReflect() protoreflect.Message { - mi := &file_ra_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_ra_proto_msgTypes[10] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -493,7 +613,7 @@ func (x *NewOrderRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use NewOrderRequest.ProtoReflect.Descriptor instead. func (*NewOrderRequest) Descriptor() ([]byte, []int) { - return file_ra_proto_rawDescGZIP(), []int{7} + return file_ra_proto_rawDescGZIP(), []int{10} } func (x *NewOrderRequest) GetRegistrationID() int64 { @@ -503,29 +623,91 @@ func (x *NewOrderRequest) GetRegistrationID() int64 { return 0 } -func (x *NewOrderRequest) GetNames() []string { +func (x *NewOrderRequest) GetIdentifiers() []*proto.Identifier { if x != nil { - return x.Names + return x.Identifiers } return nil } -type FinalizeOrderRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +func (x *NewOrderRequest) GetCertificateProfileName() string { + if x != nil { + return x.CertificateProfileName + } + return "" +} + +func (x *NewOrderRequest) GetReplaces() string { + if x != nil { + return x.Replaces + } + return "" +} + +func (x *NewOrderRequest) GetReplacesSerial() string { + if x != nil { + return x.ReplacesSerial + } + return "" +} + +type GetAuthorizationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - Order *proto.Order `protobuf:"bytes,1,opt,name=order,proto3" json:"order,omitempty"` - Csr []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` +func (x *GetAuthorizationRequest) Reset() { + *x = GetAuthorizationRequest{} + mi := &file_ra_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *FinalizeOrderRequest) Reset() { - *x = FinalizeOrderRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_ra_proto_msgTypes[8] +func (x *GetAuthorizationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAuthorizationRequest) ProtoMessage() {} + +func (x *GetAuthorizationRequest) ProtoReflect() protoreflect.Message { + mi := &file_ra_proto_msgTypes[11] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAuthorizationRequest.ProtoReflect.Descriptor instead. +func (*GetAuthorizationRequest) Descriptor() ([]byte, []int) { + return file_ra_proto_rawDescGZIP(), []int{11} +} + +func (x *GetAuthorizationRequest) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +type FinalizeOrderRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Order *proto.Order `protobuf:"bytes,1,opt,name=order,proto3" json:"order,omitempty"` + Csr []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FinalizeOrderRequest) Reset() { + *x = FinalizeOrderRequest{} + mi := &file_ra_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FinalizeOrderRequest) String() string { @@ -535,8 +717,8 @@ func (x *FinalizeOrderRequest) String() string { func (*FinalizeOrderRequest) ProtoMessage() {} func (x *FinalizeOrderRequest) ProtoReflect() protoreflect.Message { - mi := &file_ra_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_ra_proto_msgTypes[12] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -548,7 +730,7 @@ func (x *FinalizeOrderRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FinalizeOrderRequest.ProtoReflect.Descriptor instead. func (*FinalizeOrderRequest) Descriptor() ([]byte, []int) { - return file_ra_proto_rawDescGZIP(), []int{8} + return file_ra_proto_rawDescGZIP(), []int{12} } func (x *FinalizeOrderRequest) GetOrder() *proto.Order { @@ -565,192 +747,498 @@ func (x *FinalizeOrderRequest) GetCsr() []byte { return nil } +type UnpauseAccountRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The registrationID to be unpaused so issuance can be resumed. + RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UnpauseAccountRequest) Reset() { + *x = UnpauseAccountRequest{} + mi := &file_ra_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UnpauseAccountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnpauseAccountRequest) ProtoMessage() {} + +func (x *UnpauseAccountRequest) ProtoReflect() protoreflect.Message { + mi := &file_ra_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnpauseAccountRequest.ProtoReflect.Descriptor instead. +func (*UnpauseAccountRequest) Descriptor() ([]byte, []int) { + return file_ra_proto_rawDescGZIP(), []int{13} +} + +func (x *UnpauseAccountRequest) GetRegistrationID() int64 { + if x != nil { + return x.RegistrationID + } + return 0 +} + +type UnpauseAccountResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Count is the number of identifiers which were unpaused for the input regid. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UnpauseAccountResponse) Reset() { + *x = UnpauseAccountResponse{} + mi := &file_ra_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UnpauseAccountResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnpauseAccountResponse) ProtoMessage() {} + +func (x *UnpauseAccountResponse) ProtoReflect() protoreflect.Message { + mi := &file_ra_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnpauseAccountResponse.ProtoReflect.Descriptor instead. +func (*UnpauseAccountResponse) Descriptor() ([]byte, []int) { + return file_ra_proto_rawDescGZIP(), []int{14} +} + +func (x *UnpauseAccountResponse) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +type AddRateLimitOverrideRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + LimitEnum int64 `protobuf:"varint,1,opt,name=limitEnum,proto3" json:"limitEnum,omitempty"` + BucketKey string `protobuf:"bytes,2,opt,name=bucketKey,proto3" json:"bucketKey,omitempty"` + Comment string `protobuf:"bytes,3,opt,name=comment,proto3" json:"comment,omitempty"` + Period *durationpb.Duration `protobuf:"bytes,4,opt,name=period,proto3" json:"period,omitempty"` + Count int64 `protobuf:"varint,5,opt,name=count,proto3" json:"count,omitempty"` + Burst int64 `protobuf:"varint,6,opt,name=burst,proto3" json:"burst,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AddRateLimitOverrideRequest) Reset() { + *x = AddRateLimitOverrideRequest{} + mi := &file_ra_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AddRateLimitOverrideRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddRateLimitOverrideRequest) ProtoMessage() {} + +func (x *AddRateLimitOverrideRequest) ProtoReflect() protoreflect.Message { + mi := &file_ra_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddRateLimitOverrideRequest.ProtoReflect.Descriptor instead. +func (*AddRateLimitOverrideRequest) Descriptor() ([]byte, []int) { + return file_ra_proto_rawDescGZIP(), []int{15} +} + +func (x *AddRateLimitOverrideRequest) GetLimitEnum() int64 { + if x != nil { + return x.LimitEnum + } + return 0 +} + +func (x *AddRateLimitOverrideRequest) GetBucketKey() string { + if x != nil { + return x.BucketKey + } + return "" +} + +func (x *AddRateLimitOverrideRequest) GetComment() string { + if x != nil { + return x.Comment + } + return "" +} + +func (x *AddRateLimitOverrideRequest) GetPeriod() *durationpb.Duration { + if x != nil { + return x.Period + } + return nil +} + +func (x *AddRateLimitOverrideRequest) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *AddRateLimitOverrideRequest) GetBurst() int64 { + if x != nil { + return x.Burst + } + return 0 +} + +type AddRateLimitOverrideResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Inserted bool `protobuf:"varint,1,opt,name=inserted,proto3" json:"inserted,omitempty"` + Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AddRateLimitOverrideResponse) Reset() { + *x = AddRateLimitOverrideResponse{} + mi := &file_ra_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AddRateLimitOverrideResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddRateLimitOverrideResponse) ProtoMessage() {} + +func (x *AddRateLimitOverrideResponse) ProtoReflect() protoreflect.Message { + mi := &file_ra_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddRateLimitOverrideResponse.ProtoReflect.Descriptor instead. +func (*AddRateLimitOverrideResponse) Descriptor() ([]byte, []int) { + return file_ra_proto_rawDescGZIP(), []int{16} +} + +func (x *AddRateLimitOverrideResponse) GetInserted() bool { + if x != nil { + return x.Inserted + } + return false +} + +func (x *AddRateLimitOverrideResponse) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + var File_ra_proto protoreflect.FileDescriptor -var file_ra_proto_rawDesc = []byte{ +var file_ra_proto_rawDesc = string([]byte{ 0x0a, 0x08, 0x72, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x72, 0x61, 0x1a, 0x15, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x6f, 0x0a, 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x26, 0x0a, 0x04, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x04, 0x62, 0x61, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x22, 0x9c, 0x01, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x12, 0x26, 0x0a, - 0x0e, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, - 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, - 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x18, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, - 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x68, 0x61, - 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0e, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x22, 0x5f, 0x0a, 0x1f, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x57, 0x69, 0x74, 0x68, 0x52, 0x65, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x04, 0x63, 0x65, 0x72, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, - 0x49, 0x44, 0x22, 0x5c, 0x0a, 0x1c, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, - 0x42, 0x79, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x04, 0x63, 0x65, 0x72, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, - 0x67, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, - 0x22, 0x40, 0x0a, 0x16, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x42, 0x79, - 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x65, 0x72, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x63, 0x6f, - 0x64, 0x65, 0x22, 0xac, 0x01, 0x0a, 0x28, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x69, 0x73, 0x74, 0x72, - 0x61, 0x74, 0x69, 0x76, 0x65, 0x6c, 0x79, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, - 0x65, 0x72, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, - 0x1c, 0x0a, 0x09, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, - 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4b, 0x65, 0x79, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4b, 0x65, - 0x79, 0x22, 0x4f, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x22, 0x4b, 0x0a, 0x14, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, - 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x05, 0x6f, 0x72, - 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x10, 0x0a, - 0x03, 0x63, 0x73, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x63, 0x73, 0x72, 0x32, - 0xcb, 0x06, 0x0a, 0x15, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x0f, 0x4e, 0x65, 0x77, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x2e, 0x72, - 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x00, 0x12, 0x48, 0x0a, 0x11, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x72, 0x61, 0x2e, 0x50, 0x65, 0x72, 0x66, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x2c, 0x0a, 0x0a, 0x53, 0x43, 0x54, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x44, 0x45, 0x52, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x44, 0x45, 0x52, + 0x22, 0x25, 0x0a, 0x0b, 0x53, 0x43, 0x54, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x63, 0x74, 0x44, 0x45, 0x52, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, + 0x06, 0x73, 0x63, 0x74, 0x44, 0x45, 0x52, 0x22, 0x2d, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x4f, 0x43, 0x53, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x58, 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, + 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x10, + 0x0a, 0x03, 0x6a, 0x77, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6a, 0x77, 0x6b, + 0x22, 0x47, 0x0a, 0x1d, 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x22, 0x9c, 0x01, 0x0a, 0x1a, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, + 0x7a, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, + 0x74, 0x68, 0x7a, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x68, 0x61, + 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2b, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x18, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x18, 0x52, - 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x57, 0x69, 0x74, 0x68, 0x52, 0x65, 0x67, 0x12, 0x23, 0x2e, 0x72, 0x61, 0x2e, 0x52, 0x65, 0x76, - 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x57, 0x69, - 0x74, 0x68, 0x52, 0x65, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x16, 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x48, - 0x0a, 0x17, 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x15, 0x52, 0x65, 0x76, 0x6f, - 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x42, 0x79, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x6e, - 0x74, 0x12, 0x20, 0x2e, 0x72, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, - 0x74, 0x42, 0x79, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x47, 0x0a, - 0x0f, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x42, 0x79, 0x4b, 0x65, 0x79, - 0x12, 0x1a, 0x2e, 0x72, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, - 0x42, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x21, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x69, - 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x76, 0x65, 0x6c, 0x79, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x2c, 0x2e, 0x72, 0x61, - 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x76, 0x65, 0x6c, - 0x79, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x12, + 0x26, 0x0a, 0x0e, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, + 0x67, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x5c, 0x0a, 0x1c, 0x52, 0x65, 0x76, 0x6f, 0x6b, + 0x65, 0x43, 0x65, 0x72, 0x74, 0x42, 0x79, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x65, 0x72, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x72, 0x65, 0x67, 0x49, 0x44, 0x22, 0x32, 0x0a, 0x16, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, + 0x65, 0x72, 0x74, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, + 0x65, 0x72, 0x74, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0xe6, 0x01, 0x0a, 0x28, 0x41, 0x64, + 0x6d, 0x69, 0x6e, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x76, 0x65, 0x6c, 0x79, 0x52, 0x65, + 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x65, 0x72, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x4b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x6b, 0x69, 0x70, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6d, 0x61, 0x6c, 0x66, + 0x6f, 0x72, 0x6d, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6d, 0x61, 0x6c, + 0x66, 0x6f, 0x72, 0x6d, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x72, 0x6c, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x63, 0x72, 0x6c, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x22, 0xfb, 0x01, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, + 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x32, + 0x0a, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x73, 0x12, 0x36, 0x0a, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x16, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4a, 0x04, + 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, + 0x22, 0x29, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x4b, 0x0a, 0x14, 0x46, + 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, + 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x73, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x03, 0x63, 0x73, 0x72, 0x22, 0x3f, 0x0a, 0x15, 0x55, 0x6e, 0x70, 0x61, + 0x75, 0x73, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x22, 0x2e, 0x0a, 0x16, 0x55, 0x6e, 0x70, + 0x61, 0x75, 0x73, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xd2, 0x01, 0x0a, 0x1b, 0x41, 0x64, + 0x64, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x45, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x12, + 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, + 0x6f, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x75, 0x72, 0x73, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x62, 0x75, 0x72, 0x73, 0x74, 0x22, 0x54, + 0x0a, 0x1c, 0x41, 0x64, 0x64, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, + 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x32, 0xf1, 0x07, 0x0a, 0x15, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3b, + 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x15, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x2e, 0x72, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x16, + 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x72, 0x61, 0x2e, 0x44, 0x65, 0x61, 0x63, + 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, + 0x48, 0x0a, 0x11, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x72, 0x61, 0x2e, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, + 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x17, 0x44, 0x65, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x22, 0x00, 0x12, 0x2e, 0x0a, 0x08, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, - 0x13, 0x2e, 0x72, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, - 0x72, 0x22, 0x00, 0x12, 0x38, 0x0a, 0x0d, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, - 0x72, 0x64, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x72, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, - 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x42, 0x29, 0x5a, - 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, - 0x72, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} + 0x79, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x15, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, + 0x74, 0x42, 0x79, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x6e, 0x74, 0x12, 0x20, 0x2e, 0x72, + 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x42, 0x79, 0x41, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0f, 0x52, 0x65, 0x76, 0x6f, + 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x2e, 0x72, 0x61, + 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x42, 0x79, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x00, 0x12, 0x6b, 0x0a, 0x21, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x6c, 0x79, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x2c, 0x2e, 0x72, 0x61, 0x2e, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x76, 0x65, 0x6c, 0x79, 0x52, 0x65, 0x76, 0x6f, + 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x2e, + 0x0a, 0x08, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x13, 0x2e, 0x72, 0x61, 0x2e, + 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12, 0x46, + 0x0a, 0x10, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x2e, 0x72, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x38, 0x0a, 0x0d, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, + 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x72, 0x61, 0x2e, 0x46, 0x69, 0x6e, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, + 0x12, 0x49, 0x0a, 0x0e, 0x55, 0x6e, 0x70, 0x61, 0x75, 0x73, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x19, 0x2e, 0x72, 0x61, 0x2e, 0x55, 0x6e, 0x70, 0x61, 0x75, 0x73, 0x65, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, + 0x72, 0x61, 0x2e, 0x55, 0x6e, 0x70, 0x61, 0x75, 0x73, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x14, 0x41, + 0x64, 0x64, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x72, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x72, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x61, 0x74, + 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0x3b, 0x0a, 0x0b, 0x53, 0x43, 0x54, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x53, 0x43, + 0x54, 0x73, 0x12, 0x0e, 0x2e, 0x72, 0x61, 0x2e, 0x53, 0x43, 0x54, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x72, 0x61, 0x2e, 0x53, 0x43, 0x54, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, + 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x72, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) var ( file_ra_proto_rawDescOnce sync.Once - file_ra_proto_rawDescData = file_ra_proto_rawDesc + file_ra_proto_rawDescData []byte ) func file_ra_proto_rawDescGZIP() []byte { file_ra_proto_rawDescOnce.Do(func() { - file_ra_proto_rawDescData = protoimpl.X.CompressGZIP(file_ra_proto_rawDescData) + file_ra_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ra_proto_rawDesc), len(file_ra_proto_rawDesc))) }) return file_ra_proto_rawDescData } -var file_ra_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_ra_proto_goTypes = []interface{}{ - (*UpdateRegistrationRequest)(nil), // 0: ra.UpdateRegistrationRequest - (*UpdateAuthorizationRequest)(nil), // 1: ra.UpdateAuthorizationRequest - (*PerformValidationRequest)(nil), // 2: ra.PerformValidationRequest - (*RevokeCertificateWithRegRequest)(nil), // 3: ra.RevokeCertificateWithRegRequest - (*RevokeCertByApplicantRequest)(nil), // 4: ra.RevokeCertByApplicantRequest - (*RevokeCertByKeyRequest)(nil), // 5: ra.RevokeCertByKeyRequest - (*AdministrativelyRevokeCertificateRequest)(nil), // 6: ra.AdministrativelyRevokeCertificateRequest - (*NewOrderRequest)(nil), // 7: ra.NewOrderRequest - (*FinalizeOrderRequest)(nil), // 8: ra.FinalizeOrderRequest - (*proto.Registration)(nil), // 9: core.Registration - (*proto.Authorization)(nil), // 10: core.Authorization - (*proto.Challenge)(nil), // 11: core.Challenge - (*proto.Order)(nil), // 12: core.Order - (*emptypb.Empty)(nil), // 13: google.protobuf.Empty +var file_ra_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_ra_proto_goTypes = []any{ + (*SCTRequest)(nil), // 0: ra.SCTRequest + (*SCTResponse)(nil), // 1: ra.SCTResponse + (*GenerateOCSPRequest)(nil), // 2: ra.GenerateOCSPRequest + (*UpdateRegistrationKeyRequest)(nil), // 3: ra.UpdateRegistrationKeyRequest + (*DeactivateRegistrationRequest)(nil), // 4: ra.DeactivateRegistrationRequest + (*UpdateAuthorizationRequest)(nil), // 5: ra.UpdateAuthorizationRequest + (*PerformValidationRequest)(nil), // 6: ra.PerformValidationRequest + (*RevokeCertByApplicantRequest)(nil), // 7: ra.RevokeCertByApplicantRequest + (*RevokeCertByKeyRequest)(nil), // 8: ra.RevokeCertByKeyRequest + (*AdministrativelyRevokeCertificateRequest)(nil), // 9: ra.AdministrativelyRevokeCertificateRequest + (*NewOrderRequest)(nil), // 10: ra.NewOrderRequest + (*GetAuthorizationRequest)(nil), // 11: ra.GetAuthorizationRequest + (*FinalizeOrderRequest)(nil), // 12: ra.FinalizeOrderRequest + (*UnpauseAccountRequest)(nil), // 13: ra.UnpauseAccountRequest + (*UnpauseAccountResponse)(nil), // 14: ra.UnpauseAccountResponse + (*AddRateLimitOverrideRequest)(nil), // 15: ra.AddRateLimitOverrideRequest + (*AddRateLimitOverrideResponse)(nil), // 16: ra.AddRateLimitOverrideResponse + (*proto.Authorization)(nil), // 17: core.Authorization + (*proto.Challenge)(nil), // 18: core.Challenge + (*proto.Identifier)(nil), // 19: core.Identifier + (*proto.Order)(nil), // 20: core.Order + (*durationpb.Duration)(nil), // 21: google.protobuf.Duration + (*proto.Registration)(nil), // 22: core.Registration + (*emptypb.Empty)(nil), // 23: google.protobuf.Empty } var file_ra_proto_depIdxs = []int32{ - 9, // 0: ra.UpdateRegistrationRequest.base:type_name -> core.Registration - 9, // 1: ra.UpdateRegistrationRequest.update:type_name -> core.Registration - 10, // 2: ra.UpdateAuthorizationRequest.authz:type_name -> core.Authorization - 11, // 3: ra.UpdateAuthorizationRequest.response:type_name -> core.Challenge - 10, // 4: ra.PerformValidationRequest.authz:type_name -> core.Authorization - 12, // 5: ra.FinalizeOrderRequest.order:type_name -> core.Order - 9, // 6: ra.RegistrationAuthority.NewRegistration:input_type -> core.Registration - 0, // 7: ra.RegistrationAuthority.UpdateRegistration:input_type -> ra.UpdateRegistrationRequest - 2, // 8: ra.RegistrationAuthority.PerformValidation:input_type -> ra.PerformValidationRequest - 3, // 9: ra.RegistrationAuthority.RevokeCertificateWithReg:input_type -> ra.RevokeCertificateWithRegRequest - 9, // 10: ra.RegistrationAuthority.DeactivateRegistration:input_type -> core.Registration - 10, // 11: ra.RegistrationAuthority.DeactivateAuthorization:input_type -> core.Authorization - 4, // 12: ra.RegistrationAuthority.RevokeCertByApplicant:input_type -> ra.RevokeCertByApplicantRequest - 5, // 13: ra.RegistrationAuthority.RevokeCertByKey:input_type -> ra.RevokeCertByKeyRequest - 6, // 14: ra.RegistrationAuthority.AdministrativelyRevokeCertificate:input_type -> ra.AdministrativelyRevokeCertificateRequest - 7, // 15: ra.RegistrationAuthority.NewOrder:input_type -> ra.NewOrderRequest - 8, // 16: ra.RegistrationAuthority.FinalizeOrder:input_type -> ra.FinalizeOrderRequest - 9, // 17: ra.RegistrationAuthority.NewRegistration:output_type -> core.Registration - 9, // 18: ra.RegistrationAuthority.UpdateRegistration:output_type -> core.Registration - 10, // 19: ra.RegistrationAuthority.PerformValidation:output_type -> core.Authorization - 13, // 20: ra.RegistrationAuthority.RevokeCertificateWithReg:output_type -> google.protobuf.Empty - 13, // 21: ra.RegistrationAuthority.DeactivateRegistration:output_type -> google.protobuf.Empty - 13, // 22: ra.RegistrationAuthority.DeactivateAuthorization:output_type -> google.protobuf.Empty - 13, // 23: ra.RegistrationAuthority.RevokeCertByApplicant:output_type -> google.protobuf.Empty - 13, // 24: ra.RegistrationAuthority.RevokeCertByKey:output_type -> google.protobuf.Empty - 13, // 25: ra.RegistrationAuthority.AdministrativelyRevokeCertificate:output_type -> google.protobuf.Empty - 12, // 26: ra.RegistrationAuthority.NewOrder:output_type -> core.Order - 12, // 27: ra.RegistrationAuthority.FinalizeOrder:output_type -> core.Order - 17, // [17:28] is the sub-list for method output_type - 6, // [6:17] is the sub-list for method input_type + 17, // 0: ra.UpdateAuthorizationRequest.authz:type_name -> core.Authorization + 18, // 1: ra.UpdateAuthorizationRequest.response:type_name -> core.Challenge + 17, // 2: ra.PerformValidationRequest.authz:type_name -> core.Authorization + 19, // 3: ra.NewOrderRequest.identifiers:type_name -> core.Identifier + 20, // 4: ra.FinalizeOrderRequest.order:type_name -> core.Order + 21, // 5: ra.AddRateLimitOverrideRequest.period:type_name -> google.protobuf.Duration + 22, // 6: ra.RegistrationAuthority.NewRegistration:input_type -> core.Registration + 3, // 7: ra.RegistrationAuthority.UpdateRegistrationKey:input_type -> ra.UpdateRegistrationKeyRequest + 4, // 8: ra.RegistrationAuthority.DeactivateRegistration:input_type -> ra.DeactivateRegistrationRequest + 6, // 9: ra.RegistrationAuthority.PerformValidation:input_type -> ra.PerformValidationRequest + 17, // 10: ra.RegistrationAuthority.DeactivateAuthorization:input_type -> core.Authorization + 7, // 11: ra.RegistrationAuthority.RevokeCertByApplicant:input_type -> ra.RevokeCertByApplicantRequest + 8, // 12: ra.RegistrationAuthority.RevokeCertByKey:input_type -> ra.RevokeCertByKeyRequest + 9, // 13: ra.RegistrationAuthority.AdministrativelyRevokeCertificate:input_type -> ra.AdministrativelyRevokeCertificateRequest + 10, // 14: ra.RegistrationAuthority.NewOrder:input_type -> ra.NewOrderRequest + 11, // 15: ra.RegistrationAuthority.GetAuthorization:input_type -> ra.GetAuthorizationRequest + 12, // 16: ra.RegistrationAuthority.FinalizeOrder:input_type -> ra.FinalizeOrderRequest + 13, // 17: ra.RegistrationAuthority.UnpauseAccount:input_type -> ra.UnpauseAccountRequest + 15, // 18: ra.RegistrationAuthority.AddRateLimitOverride:input_type -> ra.AddRateLimitOverrideRequest + 0, // 19: ra.SCTProvider.GetSCTs:input_type -> ra.SCTRequest + 22, // 20: ra.RegistrationAuthority.NewRegistration:output_type -> core.Registration + 22, // 21: ra.RegistrationAuthority.UpdateRegistrationKey:output_type -> core.Registration + 22, // 22: ra.RegistrationAuthority.DeactivateRegistration:output_type -> core.Registration + 17, // 23: ra.RegistrationAuthority.PerformValidation:output_type -> core.Authorization + 23, // 24: ra.RegistrationAuthority.DeactivateAuthorization:output_type -> google.protobuf.Empty + 23, // 25: ra.RegistrationAuthority.RevokeCertByApplicant:output_type -> google.protobuf.Empty + 23, // 26: ra.RegistrationAuthority.RevokeCertByKey:output_type -> google.protobuf.Empty + 23, // 27: ra.RegistrationAuthority.AdministrativelyRevokeCertificate:output_type -> google.protobuf.Empty + 20, // 28: ra.RegistrationAuthority.NewOrder:output_type -> core.Order + 17, // 29: ra.RegistrationAuthority.GetAuthorization:output_type -> core.Authorization + 20, // 30: ra.RegistrationAuthority.FinalizeOrder:output_type -> core.Order + 14, // 31: ra.RegistrationAuthority.UnpauseAccount:output_type -> ra.UnpauseAccountResponse + 16, // 32: ra.RegistrationAuthority.AddRateLimitOverride:output_type -> ra.AddRateLimitOverrideResponse + 1, // 33: ra.SCTProvider.GetSCTs:output_type -> ra.SCTResponse + 20, // [20:34] is the sub-list for method output_type + 6, // [6:20] is the sub-list for method input_type 6, // [6:6] is the sub-list for extension type_name 6, // [6:6] is the sub-list for extension extendee 0, // [0:6] is the sub-list for field type_name @@ -761,132 +1249,21 @@ func file_ra_proto_init() { if File_ra_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_ra_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateRegistrationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ra_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateAuthorizationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ra_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PerformValidationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ra_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RevokeCertificateWithRegRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ra_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RevokeCertByApplicantRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ra_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RevokeCertByKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ra_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AdministrativelyRevokeCertificateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ra_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NewOrderRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_ra_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FinalizeOrderRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_ra_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_ra_proto_rawDesc), len(file_ra_proto_rawDesc)), NumEnums: 0, - NumMessages: 9, + NumMessages: 17, NumExtensions: 0, - NumServices: 1, + NumServices: 2, }, GoTypes: file_ra_proto_goTypes, DependencyIndexes: file_ra_proto_depIdxs, MessageInfos: file_ra_proto_msgTypes, }.Build() File_ra_proto = out.File - file_ra_proto_rawDesc = nil file_ra_proto_goTypes = nil file_ra_proto_depIdxs = nil } diff --git a/ra/proto/ra.proto b/ra/proto/ra.proto index a551264c47e..5b1f519a8b5 100644 --- a/ra/proto/ra.proto +++ b/ra/proto/ra.proto @@ -5,24 +5,47 @@ option go_package = "github.com/letsencrypt/boulder/ra/proto"; import "core/proto/core.proto"; import "google/protobuf/empty.proto"; +import "google/protobuf/duration.proto"; service RegistrationAuthority { rpc NewRegistration(core.Registration) returns (core.Registration) {} - rpc UpdateRegistration(UpdateRegistrationRequest) returns (core.Registration) {} + rpc UpdateRegistrationKey(UpdateRegistrationKeyRequest) returns (core.Registration) {} + rpc DeactivateRegistration(DeactivateRegistrationRequest) returns (core.Registration) {} rpc PerformValidation(PerformValidationRequest) returns (core.Authorization) {} - rpc RevokeCertificateWithReg(RevokeCertificateWithRegRequest) returns (google.protobuf.Empty) {} - rpc DeactivateRegistration(core.Registration) returns (google.protobuf.Empty) {} rpc DeactivateAuthorization(core.Authorization) returns (google.protobuf.Empty) {} rpc RevokeCertByApplicant(RevokeCertByApplicantRequest) returns (google.protobuf.Empty) {} rpc RevokeCertByKey(RevokeCertByKeyRequest) returns (google.protobuf.Empty) {} rpc AdministrativelyRevokeCertificate(AdministrativelyRevokeCertificateRequest) returns (google.protobuf.Empty) {} rpc NewOrder(NewOrderRequest) returns (core.Order) {} + rpc GetAuthorization(GetAuthorizationRequest) returns (core.Authorization) {} rpc FinalizeOrder(FinalizeOrderRequest) returns (core.Order) {} + rpc UnpauseAccount(UnpauseAccountRequest) returns (UnpauseAccountResponse) {} + rpc AddRateLimitOverride(AddRateLimitOverrideRequest) returns (AddRateLimitOverrideResponse) {} } -message UpdateRegistrationRequest { - core.Registration base = 1; - core.Registration update = 2; +service SCTProvider { + rpc GetSCTs(SCTRequest) returns (SCTResponse) {} +} + +message SCTRequest { + bytes precertDER = 1; +} + +message SCTResponse { + repeated bytes sctDER = 1; +} + +message GenerateOCSPRequest { + string serial = 1; +} + +message UpdateRegistrationKeyRequest { + int64 registrationID = 1; + bytes jwk = 2; +} + +message DeactivateRegistrationRequest { + int64 registrationID = 1; } message UpdateAuthorizationRequest { @@ -36,12 +59,6 @@ message PerformValidationRequest { int64 challengeIndex = 2; } -message RevokeCertificateWithRegRequest { - bytes cert = 1; - int64 code = 2; - int64 regID = 3; -} - message RevokeCertByApplicantRequest { bytes cert = 1; int64 code = 2; @@ -50,23 +67,78 @@ message RevokeCertByApplicantRequest { message RevokeCertByKeyRequest { bytes cert = 1; - int64 code = 2; + reserved 2; // previously code } message AdministrativelyRevokeCertificateRequest { + // Deprecated: this field is ignored. bytes cert = 1; + // The `serial` field is required. string serial = 4; int64 code = 2; string adminName = 3; bool skipBlockKey = 5; + // If the malformed flag is set, the RA will not attempt to parse the + // certificate in question. In this case, the keyCompromise reason cannot be + // specified, because the key cannot be blocked. + bool malformed = 6; + // The CRL shard to store the revocation in. + // + // This is used when revoking malformed certificates, to allow human judgement + // in setting the CRL shard instead of automatically determining it by parsing + // the certificate. + // + // Passing a nonzero crlShard with malformed=false returns error. + int64 crlShard = 7; } message NewOrderRequest { + // Next unused field number: 9 int64 registrationID = 1; - repeated string names = 2; + reserved 2; // previously dnsNames + repeated core.Identifier identifiers = 8; + string certificateProfileName = 5; + // Replaces is the ARI certificate Id that this order replaces. + string replaces = 7; + // ReplacesSerial is the serial number of the certificate that this order replaces. + string replacesSerial = 3; + reserved 4; // previously isARIRenewal + reserved 6; // previously isRenewal +} + +message GetAuthorizationRequest { + int64 id = 1; } message FinalizeOrderRequest { core.Order order = 1; bytes csr = 2; } + +message UnpauseAccountRequest { + // Next unused field number: 2 + + // The registrationID to be unpaused so issuance can be resumed. + int64 registrationID = 1; +} + +message UnpauseAccountResponse { + // Next unused field number: 2 + + // Count is the number of identifiers which were unpaused for the input regid. + int64 count = 1; +} + +message AddRateLimitOverrideRequest { + int64 limitEnum = 1; + string bucketKey = 2; + string comment = 3; + google.protobuf.Duration period = 4; + int64 count = 5; + int64 burst = 6; +} + +message AddRateLimitOverrideResponse { + bool inserted = 1; + bool enabled = 2; +} diff --git a/ra/proto/ra_grpc.pb.go b/ra/proto/ra_grpc.pb.go index b5b4804fb2b..74bdfc80f1d 100644 --- a/ra/proto/ra_grpc.pb.go +++ b/ra/proto/ra_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.20.1 +// source: ra.proto package proto @@ -13,24 +17,42 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + RegistrationAuthority_NewRegistration_FullMethodName = "/ra.RegistrationAuthority/NewRegistration" + RegistrationAuthority_UpdateRegistrationKey_FullMethodName = "/ra.RegistrationAuthority/UpdateRegistrationKey" + RegistrationAuthority_DeactivateRegistration_FullMethodName = "/ra.RegistrationAuthority/DeactivateRegistration" + RegistrationAuthority_PerformValidation_FullMethodName = "/ra.RegistrationAuthority/PerformValidation" + RegistrationAuthority_DeactivateAuthorization_FullMethodName = "/ra.RegistrationAuthority/DeactivateAuthorization" + RegistrationAuthority_RevokeCertByApplicant_FullMethodName = "/ra.RegistrationAuthority/RevokeCertByApplicant" + RegistrationAuthority_RevokeCertByKey_FullMethodName = "/ra.RegistrationAuthority/RevokeCertByKey" + RegistrationAuthority_AdministrativelyRevokeCertificate_FullMethodName = "/ra.RegistrationAuthority/AdministrativelyRevokeCertificate" + RegistrationAuthority_NewOrder_FullMethodName = "/ra.RegistrationAuthority/NewOrder" + RegistrationAuthority_GetAuthorization_FullMethodName = "/ra.RegistrationAuthority/GetAuthorization" + RegistrationAuthority_FinalizeOrder_FullMethodName = "/ra.RegistrationAuthority/FinalizeOrder" + RegistrationAuthority_UnpauseAccount_FullMethodName = "/ra.RegistrationAuthority/UnpauseAccount" + RegistrationAuthority_AddRateLimitOverride_FullMethodName = "/ra.RegistrationAuthority/AddRateLimitOverride" +) // RegistrationAuthorityClient is the client API for RegistrationAuthority service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type RegistrationAuthorityClient interface { NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) - UpdateRegistration(ctx context.Context, in *UpdateRegistrationRequest, opts ...grpc.CallOption) (*proto.Registration, error) + UpdateRegistrationKey(ctx context.Context, in *UpdateRegistrationKeyRequest, opts ...grpc.CallOption) (*proto.Registration, error) + DeactivateRegistration(ctx context.Context, in *DeactivateRegistrationRequest, opts ...grpc.CallOption) (*proto.Registration, error) PerformValidation(ctx context.Context, in *PerformValidationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) - RevokeCertificateWithReg(ctx context.Context, in *RevokeCertificateWithRegRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - DeactivateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error) DeactivateAuthorization(ctx context.Context, in *proto.Authorization, opts ...grpc.CallOption) (*emptypb.Empty, error) RevokeCertByApplicant(ctx context.Context, in *RevokeCertByApplicantRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) RevokeCertByKey(ctx context.Context, in *RevokeCertByKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) AdministrativelyRevokeCertificate(ctx context.Context, in *AdministrativelyRevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) + GetAuthorization(ctx context.Context, in *GetAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) + UnpauseAccount(ctx context.Context, in *UnpauseAccountRequest, opts ...grpc.CallOption) (*UnpauseAccountResponse, error) + AddRateLimitOverride(ctx context.Context, in *AddRateLimitOverrideRequest, opts ...grpc.CallOption) (*AddRateLimitOverrideResponse, error) } type registrationAuthorityClient struct { @@ -42,17 +64,29 @@ func NewRegistrationAuthorityClient(cc grpc.ClientConnInterface) RegistrationAut } func (c *registrationAuthorityClient) NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(proto.Registration) - err := c.cc.Invoke(ctx, "/ra.RegistrationAuthority/NewRegistration", in, out, opts...) + err := c.cc.Invoke(ctx, RegistrationAuthority_NewRegistration_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *registrationAuthorityClient) UpdateRegistration(ctx context.Context, in *UpdateRegistrationRequest, opts ...grpc.CallOption) (*proto.Registration, error) { +func (c *registrationAuthorityClient) UpdateRegistrationKey(ctx context.Context, in *UpdateRegistrationKeyRequest, opts ...grpc.CallOption) (*proto.Registration, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(proto.Registration) - err := c.cc.Invoke(ctx, "/ra.RegistrationAuthority/UpdateRegistration", in, out, opts...) + err := c.cc.Invoke(ctx, RegistrationAuthority_UpdateRegistrationKey_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *registrationAuthorityClient) DeactivateRegistration(ctx context.Context, in *DeactivateRegistrationRequest, opts ...grpc.CallOption) (*proto.Registration, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Registration) + err := c.cc.Invoke(ctx, RegistrationAuthority_DeactivateRegistration_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -60,80 +94,99 @@ func (c *registrationAuthorityClient) UpdateRegistration(ctx context.Context, in } func (c *registrationAuthorityClient) PerformValidation(ctx context.Context, in *PerformValidationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(proto.Authorization) - err := c.cc.Invoke(ctx, "/ra.RegistrationAuthority/PerformValidation", in, out, opts...) + err := c.cc.Invoke(ctx, RegistrationAuthority_PerformValidation_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *registrationAuthorityClient) RevokeCertificateWithReg(ctx context.Context, in *RevokeCertificateWithRegRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *registrationAuthorityClient) DeactivateAuthorization(ctx context.Context, in *proto.Authorization, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/ra.RegistrationAuthority/RevokeCertificateWithReg", in, out, opts...) + err := c.cc.Invoke(ctx, RegistrationAuthority_DeactivateAuthorization_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *registrationAuthorityClient) DeactivateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *registrationAuthorityClient) RevokeCertByApplicant(ctx context.Context, in *RevokeCertByApplicantRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/ra.RegistrationAuthority/DeactivateRegistration", in, out, opts...) + err := c.cc.Invoke(ctx, RegistrationAuthority_RevokeCertByApplicant_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *registrationAuthorityClient) DeactivateAuthorization(ctx context.Context, in *proto.Authorization, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *registrationAuthorityClient) RevokeCertByKey(ctx context.Context, in *RevokeCertByKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/ra.RegistrationAuthority/DeactivateAuthorization", in, out, opts...) + err := c.cc.Invoke(ctx, RegistrationAuthority_RevokeCertByKey_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *registrationAuthorityClient) RevokeCertByApplicant(ctx context.Context, in *RevokeCertByApplicantRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *registrationAuthorityClient) AdministrativelyRevokeCertificate(ctx context.Context, in *AdministrativelyRevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/ra.RegistrationAuthority/RevokeCertByApplicant", in, out, opts...) + err := c.cc.Invoke(ctx, RegistrationAuthority_AdministrativelyRevokeCertificate_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *registrationAuthorityClient) RevokeCertByKey(ctx context.Context, in *RevokeCertByKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/ra.RegistrationAuthority/RevokeCertByKey", in, out, opts...) +func (c *registrationAuthorityClient) NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Order) + err := c.cc.Invoke(ctx, RegistrationAuthority_NewOrder_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *registrationAuthorityClient) AdministrativelyRevokeCertificate(ctx context.Context, in *AdministrativelyRevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/ra.RegistrationAuthority/AdministrativelyRevokeCertificate", in, out, opts...) +func (c *registrationAuthorityClient) GetAuthorization(ctx context.Context, in *GetAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Authorization) + err := c.cc.Invoke(ctx, RegistrationAuthority_GetAuthorization_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *registrationAuthorityClient) NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) { +func (c *registrationAuthorityClient) FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(proto.Order) - err := c.cc.Invoke(ctx, "/ra.RegistrationAuthority/NewOrder", in, out, opts...) + err := c.cc.Invoke(ctx, RegistrationAuthority_FinalizeOrder_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *registrationAuthorityClient) FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) { - out := new(proto.Order) - err := c.cc.Invoke(ctx, "/ra.RegistrationAuthority/FinalizeOrder", in, out, opts...) +func (c *registrationAuthorityClient) UnpauseAccount(ctx context.Context, in *UnpauseAccountRequest, opts ...grpc.CallOption) (*UnpauseAccountResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UnpauseAccountResponse) + err := c.cc.Invoke(ctx, RegistrationAuthority_UnpauseAccount_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *registrationAuthorityClient) AddRateLimitOverride(ctx context.Context, in *AddRateLimitOverrideRequest, opts ...grpc.CallOption) (*AddRateLimitOverrideResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(AddRateLimitOverrideResponse) + err := c.cc.Invoke(ctx, RegistrationAuthority_AddRateLimitOverride_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -142,41 +195,43 @@ func (c *registrationAuthorityClient) FinalizeOrder(ctx context.Context, in *Fin // RegistrationAuthorityServer is the server API for RegistrationAuthority service. // All implementations must embed UnimplementedRegistrationAuthorityServer -// for forward compatibility +// for forward compatibility. type RegistrationAuthorityServer interface { NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) - UpdateRegistration(context.Context, *UpdateRegistrationRequest) (*proto.Registration, error) + UpdateRegistrationKey(context.Context, *UpdateRegistrationKeyRequest) (*proto.Registration, error) + DeactivateRegistration(context.Context, *DeactivateRegistrationRequest) (*proto.Registration, error) PerformValidation(context.Context, *PerformValidationRequest) (*proto.Authorization, error) - RevokeCertificateWithReg(context.Context, *RevokeCertificateWithRegRequest) (*emptypb.Empty, error) - DeactivateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error) DeactivateAuthorization(context.Context, *proto.Authorization) (*emptypb.Empty, error) RevokeCertByApplicant(context.Context, *RevokeCertByApplicantRequest) (*emptypb.Empty, error) RevokeCertByKey(context.Context, *RevokeCertByKeyRequest) (*emptypb.Empty, error) AdministrativelyRevokeCertificate(context.Context, *AdministrativelyRevokeCertificateRequest) (*emptypb.Empty, error) NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error) + GetAuthorization(context.Context, *GetAuthorizationRequest) (*proto.Authorization, error) FinalizeOrder(context.Context, *FinalizeOrderRequest) (*proto.Order, error) + UnpauseAccount(context.Context, *UnpauseAccountRequest) (*UnpauseAccountResponse, error) + AddRateLimitOverride(context.Context, *AddRateLimitOverrideRequest) (*AddRateLimitOverrideResponse, error) mustEmbedUnimplementedRegistrationAuthorityServer() } -// UnimplementedRegistrationAuthorityServer must be embedded to have forward compatible implementations. -type UnimplementedRegistrationAuthorityServer struct { -} +// UnimplementedRegistrationAuthorityServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedRegistrationAuthorityServer struct{} func (UnimplementedRegistrationAuthorityServer) NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) { return nil, status.Errorf(codes.Unimplemented, "method NewRegistration not implemented") } -func (UnimplementedRegistrationAuthorityServer) UpdateRegistration(context.Context, *UpdateRegistrationRequest) (*proto.Registration, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateRegistration not implemented") +func (UnimplementedRegistrationAuthorityServer) UpdateRegistrationKey(context.Context, *UpdateRegistrationKeyRequest) (*proto.Registration, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateRegistrationKey not implemented") +} +func (UnimplementedRegistrationAuthorityServer) DeactivateRegistration(context.Context, *DeactivateRegistrationRequest) (*proto.Registration, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeactivateRegistration not implemented") } func (UnimplementedRegistrationAuthorityServer) PerformValidation(context.Context, *PerformValidationRequest) (*proto.Authorization, error) { return nil, status.Errorf(codes.Unimplemented, "method PerformValidation not implemented") } -func (UnimplementedRegistrationAuthorityServer) RevokeCertificateWithReg(context.Context, *RevokeCertificateWithRegRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method RevokeCertificateWithReg not implemented") -} -func (UnimplementedRegistrationAuthorityServer) DeactivateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeactivateRegistration not implemented") -} func (UnimplementedRegistrationAuthorityServer) DeactivateAuthorization(context.Context, *proto.Authorization) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeactivateAuthorization not implemented") } @@ -192,10 +247,20 @@ func (UnimplementedRegistrationAuthorityServer) AdministrativelyRevokeCertificat func (UnimplementedRegistrationAuthorityServer) NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error) { return nil, status.Errorf(codes.Unimplemented, "method NewOrder not implemented") } +func (UnimplementedRegistrationAuthorityServer) GetAuthorization(context.Context, *GetAuthorizationRequest) (*proto.Authorization, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAuthorization not implemented") +} func (UnimplementedRegistrationAuthorityServer) FinalizeOrder(context.Context, *FinalizeOrderRequest) (*proto.Order, error) { return nil, status.Errorf(codes.Unimplemented, "method FinalizeOrder not implemented") } +func (UnimplementedRegistrationAuthorityServer) UnpauseAccount(context.Context, *UnpauseAccountRequest) (*UnpauseAccountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnpauseAccount not implemented") +} +func (UnimplementedRegistrationAuthorityServer) AddRateLimitOverride(context.Context, *AddRateLimitOverrideRequest) (*AddRateLimitOverrideResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddRateLimitOverride not implemented") +} func (UnimplementedRegistrationAuthorityServer) mustEmbedUnimplementedRegistrationAuthorityServer() {} +func (UnimplementedRegistrationAuthorityServer) testEmbeddedByValue() {} // UnsafeRegistrationAuthorityServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to RegistrationAuthorityServer will @@ -205,6 +270,13 @@ type UnsafeRegistrationAuthorityServer interface { } func RegisterRegistrationAuthorityServer(s grpc.ServiceRegistrar, srv RegistrationAuthorityServer) { + // If the following call pancis, it indicates UnimplementedRegistrationAuthorityServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&RegistrationAuthority_ServiceDesc, srv) } @@ -218,7 +290,7 @@ func _RegistrationAuthority_NewRegistration_Handler(srv interface{}, ctx context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ra.RegistrationAuthority/NewRegistration", + FullMethod: RegistrationAuthority_NewRegistration_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RegistrationAuthorityServer).NewRegistration(ctx, req.(*proto.Registration)) @@ -226,74 +298,56 @@ func _RegistrationAuthority_NewRegistration_Handler(srv interface{}, ctx context return interceptor(ctx, in, info, handler) } -func _RegistrationAuthority_UpdateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateRegistrationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RegistrationAuthorityServer).UpdateRegistration(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/ra.RegistrationAuthority/UpdateRegistration", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RegistrationAuthorityServer).UpdateRegistration(ctx, req.(*UpdateRegistrationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _RegistrationAuthority_PerformValidation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PerformValidationRequest) +func _RegistrationAuthority_UpdateRegistrationKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateRegistrationKeyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(RegistrationAuthorityServer).PerformValidation(ctx, in) + return srv.(RegistrationAuthorityServer).UpdateRegistrationKey(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ra.RegistrationAuthority/PerformValidation", + FullMethod: RegistrationAuthority_UpdateRegistrationKey_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RegistrationAuthorityServer).PerformValidation(ctx, req.(*PerformValidationRequest)) + return srv.(RegistrationAuthorityServer).UpdateRegistrationKey(ctx, req.(*UpdateRegistrationKeyRequest)) } return interceptor(ctx, in, info, handler) } -func _RegistrationAuthority_RevokeCertificateWithReg_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RevokeCertificateWithRegRequest) +func _RegistrationAuthority_DeactivateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeactivateRegistrationRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(RegistrationAuthorityServer).RevokeCertificateWithReg(ctx, in) + return srv.(RegistrationAuthorityServer).DeactivateRegistration(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ra.RegistrationAuthority/RevokeCertificateWithReg", + FullMethod: RegistrationAuthority_DeactivateRegistration_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RegistrationAuthorityServer).RevokeCertificateWithReg(ctx, req.(*RevokeCertificateWithRegRequest)) + return srv.(RegistrationAuthorityServer).DeactivateRegistration(ctx, req.(*DeactivateRegistrationRequest)) } return interceptor(ctx, in, info, handler) } -func _RegistrationAuthority_DeactivateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(proto.Registration) +func _RegistrationAuthority_PerformValidation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PerformValidationRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(RegistrationAuthorityServer).DeactivateRegistration(ctx, in) + return srv.(RegistrationAuthorityServer).PerformValidation(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ra.RegistrationAuthority/DeactivateRegistration", + FullMethod: RegistrationAuthority_PerformValidation_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RegistrationAuthorityServer).DeactivateRegistration(ctx, req.(*proto.Registration)) + return srv.(RegistrationAuthorityServer).PerformValidation(ctx, req.(*PerformValidationRequest)) } return interceptor(ctx, in, info, handler) } @@ -308,7 +362,7 @@ func _RegistrationAuthority_DeactivateAuthorization_Handler(srv interface{}, ctx } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ra.RegistrationAuthority/DeactivateAuthorization", + FullMethod: RegistrationAuthority_DeactivateAuthorization_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RegistrationAuthorityServer).DeactivateAuthorization(ctx, req.(*proto.Authorization)) @@ -326,7 +380,7 @@ func _RegistrationAuthority_RevokeCertByApplicant_Handler(srv interface{}, ctx c } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ra.RegistrationAuthority/RevokeCertByApplicant", + FullMethod: RegistrationAuthority_RevokeCertByApplicant_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RegistrationAuthorityServer).RevokeCertByApplicant(ctx, req.(*RevokeCertByApplicantRequest)) @@ -344,7 +398,7 @@ func _RegistrationAuthority_RevokeCertByKey_Handler(srv interface{}, ctx context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ra.RegistrationAuthority/RevokeCertByKey", + FullMethod: RegistrationAuthority_RevokeCertByKey_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RegistrationAuthorityServer).RevokeCertByKey(ctx, req.(*RevokeCertByKeyRequest)) @@ -362,7 +416,7 @@ func _RegistrationAuthority_AdministrativelyRevokeCertificate_Handler(srv interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ra.RegistrationAuthority/AdministrativelyRevokeCertificate", + FullMethod: RegistrationAuthority_AdministrativelyRevokeCertificate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RegistrationAuthorityServer).AdministrativelyRevokeCertificate(ctx, req.(*AdministrativelyRevokeCertificateRequest)) @@ -380,7 +434,7 @@ func _RegistrationAuthority_NewOrder_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ra.RegistrationAuthority/NewOrder", + FullMethod: RegistrationAuthority_NewOrder_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RegistrationAuthorityServer).NewOrder(ctx, req.(*NewOrderRequest)) @@ -388,6 +442,24 @@ func _RegistrationAuthority_NewOrder_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _RegistrationAuthority_GetAuthorization_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAuthorizationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RegistrationAuthorityServer).GetAuthorization(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: RegistrationAuthority_GetAuthorization_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistrationAuthorityServer).GetAuthorization(ctx, req.(*GetAuthorizationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _RegistrationAuthority_FinalizeOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FinalizeOrderRequest) if err := dec(in); err != nil { @@ -398,7 +470,7 @@ func _RegistrationAuthority_FinalizeOrder_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ra.RegistrationAuthority/FinalizeOrder", + FullMethod: RegistrationAuthority_FinalizeOrder_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RegistrationAuthorityServer).FinalizeOrder(ctx, req.(*FinalizeOrderRequest)) @@ -406,6 +478,42 @@ func _RegistrationAuthority_FinalizeOrder_Handler(srv interface{}, ctx context.C return interceptor(ctx, in, info, handler) } +func _RegistrationAuthority_UnpauseAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UnpauseAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RegistrationAuthorityServer).UnpauseAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: RegistrationAuthority_UnpauseAccount_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistrationAuthorityServer).UnpauseAccount(ctx, req.(*UnpauseAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RegistrationAuthority_AddRateLimitOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddRateLimitOverrideRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RegistrationAuthorityServer).AddRateLimitOverride(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: RegistrationAuthority_AddRateLimitOverride_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistrationAuthorityServer).AddRateLimitOverride(ctx, req.(*AddRateLimitOverrideRequest)) + } + return interceptor(ctx, in, info, handler) +} + // RegistrationAuthority_ServiceDesc is the grpc.ServiceDesc for RegistrationAuthority service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -418,21 +526,17 @@ var RegistrationAuthority_ServiceDesc = grpc.ServiceDesc{ Handler: _RegistrationAuthority_NewRegistration_Handler, }, { - MethodName: "UpdateRegistration", - Handler: _RegistrationAuthority_UpdateRegistration_Handler, - }, - { - MethodName: "PerformValidation", - Handler: _RegistrationAuthority_PerformValidation_Handler, - }, - { - MethodName: "RevokeCertificateWithReg", - Handler: _RegistrationAuthority_RevokeCertificateWithReg_Handler, + MethodName: "UpdateRegistrationKey", + Handler: _RegistrationAuthority_UpdateRegistrationKey_Handler, }, { MethodName: "DeactivateRegistration", Handler: _RegistrationAuthority_DeactivateRegistration_Handler, }, + { + MethodName: "PerformValidation", + Handler: _RegistrationAuthority_PerformValidation_Handler, + }, { MethodName: "DeactivateAuthorization", Handler: _RegistrationAuthority_DeactivateAuthorization_Handler, @@ -453,10 +557,124 @@ var RegistrationAuthority_ServiceDesc = grpc.ServiceDesc{ MethodName: "NewOrder", Handler: _RegistrationAuthority_NewOrder_Handler, }, + { + MethodName: "GetAuthorization", + Handler: _RegistrationAuthority_GetAuthorization_Handler, + }, { MethodName: "FinalizeOrder", Handler: _RegistrationAuthority_FinalizeOrder_Handler, }, + { + MethodName: "UnpauseAccount", + Handler: _RegistrationAuthority_UnpauseAccount_Handler, + }, + { + MethodName: "AddRateLimitOverride", + Handler: _RegistrationAuthority_AddRateLimitOverride_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ra.proto", +} + +const ( + SCTProvider_GetSCTs_FullMethodName = "/ra.SCTProvider/GetSCTs" +) + +// SCTProviderClient is the client API for SCTProvider service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SCTProviderClient interface { + GetSCTs(ctx context.Context, in *SCTRequest, opts ...grpc.CallOption) (*SCTResponse, error) +} + +type sCTProviderClient struct { + cc grpc.ClientConnInterface +} + +func NewSCTProviderClient(cc grpc.ClientConnInterface) SCTProviderClient { + return &sCTProviderClient{cc} +} + +func (c *sCTProviderClient) GetSCTs(ctx context.Context, in *SCTRequest, opts ...grpc.CallOption) (*SCTResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SCTResponse) + err := c.cc.Invoke(ctx, SCTProvider_GetSCTs_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SCTProviderServer is the server API for SCTProvider service. +// All implementations must embed UnimplementedSCTProviderServer +// for forward compatibility. +type SCTProviderServer interface { + GetSCTs(context.Context, *SCTRequest) (*SCTResponse, error) + mustEmbedUnimplementedSCTProviderServer() +} + +// UnimplementedSCTProviderServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedSCTProviderServer struct{} + +func (UnimplementedSCTProviderServer) GetSCTs(context.Context, *SCTRequest) (*SCTResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSCTs not implemented") +} +func (UnimplementedSCTProviderServer) mustEmbedUnimplementedSCTProviderServer() {} +func (UnimplementedSCTProviderServer) testEmbeddedByValue() {} + +// UnsafeSCTProviderServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SCTProviderServer will +// result in compilation errors. +type UnsafeSCTProviderServer interface { + mustEmbedUnimplementedSCTProviderServer() +} + +func RegisterSCTProviderServer(s grpc.ServiceRegistrar, srv SCTProviderServer) { + // If the following call pancis, it indicates UnimplementedSCTProviderServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&SCTProvider_ServiceDesc, srv) +} + +func _SCTProvider_GetSCTs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SCTRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SCTProviderServer).GetSCTs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SCTProvider_GetSCTs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SCTProviderServer).GetSCTs(ctx, req.(*SCTRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// SCTProvider_ServiceDesc is the grpc.ServiceDesc for SCTProvider service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SCTProvider_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "ra.SCTProvider", + HandlerType: (*SCTProviderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSCTs", + Handler: _SCTProvider_GetSCTs_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "ra.proto", diff --git a/ra/ra.go b/ra/ra.go index 19eeaa657a4..f0d8ea2d00f 100644 --- a/ra/ra.go +++ b/ra/ra.go @@ -1,25 +1,35 @@ package ra import ( + "bytes" "context" + "crypto" "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" "encoding/json" "errors" "fmt" - "math/big" - "net" "net/url" - "reflect" - "sort" + "os" + "slices" "strconv" "strings" + "sync" "time" - "github.com/honeycombio/beeline-go" + "github.com/go-jose/go-jose/v4" "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/akamai" - akamaipb "github.com/letsencrypt/boulder/akamai/proto" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/letsencrypt/boulder/allowlist" capb "github.com/letsencrypt/boulder/ca/proto" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/core" corepb "github.com/letsencrypt/boulder/core/proto" csrlib "github.com/letsencrypt/boulder/csr" @@ -36,73 +46,75 @@ import ( "github.com/letsencrypt/boulder/probs" pubpb "github.com/letsencrypt/boulder/publisher/proto" rapb "github.com/letsencrypt/boulder/ra/proto" - "github.com/letsencrypt/boulder/ratelimit" - "github.com/letsencrypt/boulder/reloader" + "github.com/letsencrypt/boulder/ratelimits" "github.com/letsencrypt/boulder/revocation" sapb "github.com/letsencrypt/boulder/sa/proto" + "github.com/letsencrypt/boulder/va" vapb "github.com/letsencrypt/boulder/va/proto" + "github.com/letsencrypt/boulder/web" - "github.com/prometheus/client_golang/prometheus" - "github.com/weppos/publicsuffix-go/publicsuffix" - "golang.org/x/crypto/ocsp" - grpc "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" - "gopkg.in/square/go-jose.v2" ) var ( errIncompleteGRPCRequest = errors.New("incomplete gRPC request message") errIncompleteGRPCResponse = errors.New("incomplete gRPC response message") -) -type caaChecker interface { - IsCAAValid( - ctx context.Context, - in *vapb.IsCAAValidRequest, - opts ...grpc.CallOption, - ) (*vapb.IsCAAValidResponse, error) -} + // caaRecheckDuration is the amount of time after a CAA check that we will + // recheck the CAA records for a domain. Per Baseline Requirements, we must + // recheck CAA records within 8 hours of issuance. We set this to 7 hours to + // stay on the safe side. + caaRecheckDuration = -7 * time.Hour +) // RegistrationAuthorityImpl defines an RA. // // NOTE: All of the fields in RegistrationAuthorityImpl need to be // populated, or there is a risk of panic. type RegistrationAuthorityImpl struct { - rapb.UnimplementedRegistrationAuthorityServer + rapb.UnsafeRegistrationAuthorityServer + rapb.UnsafeSCTProviderServer CA capb.CertificateAuthorityClient - VA vapb.VAClient + VA va.RemoteClients SA sapb.StorageAuthorityClient PA core.PolicyAuthority publisher pubpb.PublisherClient - caa caaChecker - - clk clock.Clock - log blog.Logger - keyPolicy goodkey.KeyPolicy - // How long before a newly created authorization expires. - authorizationLifetime time.Duration - pendingAuthorizationLifetime time.Duration - rlPolicies ratelimit.Limits - maxContactsPerReg int - maxNames int - reuseValidAuthz bool - orderLifetime time.Duration - - issuersByNameID map[issuance.IssuerNameID]*issuance.Certificate - issuersByID map[issuance.IssuerID]*issuance.Certificate - purger akamaipb.AkamaiPurgerClient + + clk clock.Clock + log blog.Logger + keyPolicy goodkey.KeyPolicy + profiles *validationProfiles + maxContactsPerReg int + limiter *ratelimits.Limiter + txnBuilder *ratelimits.TransactionBuilder + finalizeTimeout time.Duration + drainWG sync.WaitGroup + + issuersByNameID map[issuance.NameID]*issuance.Certificate ctpolicy *ctpolicy.CTPolicy - ctpolicyResults *prometheus.HistogramVec - rateLimitCounter *prometheus.CounterVec - revocationReasonCounter *prometheus.CounterVec - namesPerCert *prometheus.HistogramVec - newRegCounter prometheus.Counter - reusedValidAuthzCounter prometheus.Counter - recheckCAACounter prometheus.Counter - newCertCounter prometheus.Counter - recheckCAAUsedAuthzLifetime prometheus.Counter + ctpolicyResults *prometheus.HistogramVec + revocationReasonCounter *prometheus.CounterVec + namesPerCert *prometheus.HistogramVec + newRegCounter prometheus.Counter + recheckCAACounter prometheus.Counter + newCertCounter prometheus.Counter + authzAges *prometheus.HistogramVec + orderAges *prometheus.HistogramVec + inflightFinalizes prometheus.Gauge + certCSRMismatch prometheus.Counter + pauseCounter *prometheus.CounterVec +} + +var _ rapb.RegistrationAuthorityServer = (*RegistrationAuthorityImpl)(nil) + +// Health implements our grpc.checker interface. This method will be called +// periodically to set the gRPC service's healthpb.Health.Check() status. +func (ra *RegistrationAuthorityImpl) Health(ctx context.Context) error { + if ra.txnBuilder.Ready() { + return nil + } + return errors.New("waiting for overrides") } // NewRegistrationAuthorityImpl constructs a new RA object. @@ -112,18 +124,16 @@ func NewRegistrationAuthorityImpl( stats prometheus.Registerer, maxContactsPerReg int, keyPolicy goodkey.KeyPolicy, + limiter *ratelimits.Limiter, + txnBuilder *ratelimits.TransactionBuilder, maxNames int, - reuseValidAuthz bool, - authorizationLifetime time.Duration, - pendingAuthorizationLifetime time.Duration, + profiles *validationProfiles, pubc pubpb.PublisherClient, - caaClient caaChecker, - orderLifetime time.Duration, + finalizeTimeout time.Duration, ctp *ctpolicy.CTPolicy, - purger akamaipb.AkamaiPurgerClient, issuers []*issuance.Certificate, ) *RegistrationAuthorityImpl { - ctpolicyResults := prometheus.NewHistogramVec( + ctpolicyResults := promauto.With(stats).NewHistogramVec( prometheus.HistogramOpts{ Name: "ctpolicy_results", Help: "Histogram of latencies of ctpolicy.GetSCTs calls with success/failure/deadlineExceeded labels", @@ -131,121 +141,256 @@ func NewRegistrationAuthorityImpl( }, []string{"result"}, ) - stats.MustRegister(ctpolicyResults) - namesPerCert := prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "names_per_cert", - Help: "Histogram of the number of SANs in requested and issued certificates", - // The namesPerCert buckets are chosen based on the current Let's Encrypt - // limit of 100 SANs per certificate. - Buckets: []float64{1, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100}, - }, - // Type label value is either "requested" or "issued". - []string{"type"}, - ) - stats.MustRegister(namesPerCert) - - rateLimitCounter := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "ra_ratelimits", - Help: "A counter of RA ratelimit checks labelled by type and pass/exceed", - }, []string{"limit", "result"}) - stats.MustRegister(rateLimitCounter) + namesPerCert := promauto.With(stats).NewHistogramVec(prometheus.HistogramOpts{ + Name: "names_per_cert", + Help: "Histogram of the number of SANs in requested and issued certificates", + // The namesPerCert buckets are chosen based on the current Let's Encrypt + // limit of 100 SANs per certificate. + Buckets: []float64{1, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100}, + }, []string{"type"}) - newRegCounter := prometheus.NewCounter(prometheus.CounterOpts{ + newRegCounter := promauto.With(stats).NewCounter(prometheus.CounterOpts{ Name: "new_registrations", Help: "A counter of new registrations", }) - stats.MustRegister(newRegCounter) - reusedValidAuthzCounter := prometheus.NewCounter(prometheus.CounterOpts{ - Name: "reused_valid_authz", - Help: "A counter of reused valid authorizations", - }) - stats.MustRegister(reusedValidAuthzCounter) - - recheckCAACounter := prometheus.NewCounter(prometheus.CounterOpts{ + recheckCAACounter := promauto.With(stats).NewCounter(prometheus.CounterOpts{ Name: "recheck_caa", Help: "A counter of CAA rechecks", }) - stats.MustRegister(recheckCAACounter) - - recheckCAAUsedAuthzLifetime := prometheus.NewCounter(prometheus.CounterOpts{ - Name: "recheck_caa_used_authz_lifetime", - Help: "A counter times the old codepath was used for CAA recheck time", - }) - stats.MustRegister(recheckCAAUsedAuthzLifetime) - newCertCounter := prometheus.NewCounter(prometheus.CounterOpts{ + newCertCounter := promauto.With(stats).NewCounter(prometheus.CounterOpts{ Name: "new_certificates", - Help: "A counter of new certificates", + Help: "A counter of issued certificates", }) - stats.MustRegister(newCertCounter) - revocationReasonCounter := prometheus.NewCounterVec(prometheus.CounterOpts{ + revocationReasonCounter := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ Name: "revocation_reason", Help: "A counter of certificate revocation reasons", }, []string{"reason"}) - stats.MustRegister(revocationReasonCounter) - issuersByNameID := make(map[issuance.IssuerNameID]*issuance.Certificate) - issuersByID := make(map[issuance.IssuerID]*issuance.Certificate) + authzAges := promauto.With(stats).NewHistogramVec(prometheus.HistogramOpts{ + Name: "authz_ages", + Help: "Histogram of ages, in seconds, of Authorization objects, labelled by method and type", + // authzAges keeps track of how old, in seconds, authorizations are when + // we attach them to a new order and again when we finalize that order. + // We give it a non-standard bucket distribution so that the leftmost + // (closest to zero) bucket can be used exclusively for brand-new (i.e. + // not reused) authzs. Our buckets are: one nanosecond, one second, one + // minute, one hour, 7 hours (our CAA reuse time), 1 day, 2 days, 7 + // days, 30 days, +inf (should be empty). + Buckets: []float64{0.000000001, 1, 60, 3600, 25200, 86400, 172800, 604800, 2592000, 7776000}, + }, []string{"method", "type"}) + + orderAges := promauto.With(stats).NewHistogramVec(prometheus.HistogramOpts{ + Name: "order_ages", + Help: "Histogram of ages, in seconds, of Order objects when they're reused and finalized, labelled by method", + // Orders currently have a max age of 7 days (168hrs), so our buckets + // are: one nanosecond (new), 1 second, 10 seconds, 1 minute, 10 + // minutes, 1 hour, 7 hours (our CAA reuse time), 1 day, 2 days, 7 days, +inf. + Buckets: []float64{0.000000001, 1, 10, 60, 600, 3600, 25200, 86400, 172800, 604800}, + }, []string{"method"}) + + inflightFinalizes := promauto.With(stats).NewGauge(prometheus.GaugeOpts{ + Name: "inflight_finalizes", + Help: "Gauge of the number of current asynchronous finalize goroutines", + }) + + certCSRMismatch := promauto.With(stats).NewCounter(prometheus.CounterOpts{ + Name: "cert_csr_mismatch", + Help: "Number of issued certificates that have failed ra.matchesCSR for any reason. This is _real bad_ and should be alerted upon.", + }) + + pauseCounter := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ + Name: "paused_pairs", + Help: "Number of times a pause operation is performed, labeled by paused=[bool], repaused=[bool], grace=[bool]", + }, []string{"paused", "repaused", "grace"}) + + issuersByNameID := make(map[issuance.NameID]*issuance.Certificate) for _, issuer := range issuers { issuersByNameID[issuer.NameID()] = issuer - issuersByID[issuer.ID()] = issuer } ra := &RegistrationAuthorityImpl{ - clk: clk, - log: logger, - authorizationLifetime: authorizationLifetime, - pendingAuthorizationLifetime: pendingAuthorizationLifetime, - rlPolicies: ratelimit.New(), - maxContactsPerReg: maxContactsPerReg, - keyPolicy: keyPolicy, - maxNames: maxNames, - reuseValidAuthz: reuseValidAuthz, - publisher: pubc, - caa: caaClient, - orderLifetime: orderLifetime, - ctpolicy: ctp, - ctpolicyResults: ctpolicyResults, - purger: purger, - issuersByNameID: issuersByNameID, - issuersByID: issuersByID, - namesPerCert: namesPerCert, - rateLimitCounter: rateLimitCounter, - newRegCounter: newRegCounter, - reusedValidAuthzCounter: reusedValidAuthzCounter, - recheckCAACounter: recheckCAACounter, - newCertCounter: newCertCounter, - revocationReasonCounter: revocationReasonCounter, - recheckCAAUsedAuthzLifetime: recheckCAAUsedAuthzLifetime, + clk: clk, + log: logger, + profiles: profiles, + maxContactsPerReg: maxContactsPerReg, + keyPolicy: keyPolicy, + limiter: limiter, + txnBuilder: txnBuilder, + publisher: pubc, + finalizeTimeout: finalizeTimeout, + ctpolicy: ctp, + ctpolicyResults: ctpolicyResults, + issuersByNameID: issuersByNameID, + namesPerCert: namesPerCert, + newRegCounter: newRegCounter, + recheckCAACounter: recheckCAACounter, + newCertCounter: newCertCounter, + revocationReasonCounter: revocationReasonCounter, + authzAges: authzAges, + orderAges: orderAges, + inflightFinalizes: inflightFinalizes, + certCSRMismatch: certCSRMismatch, + pauseCounter: pauseCounter, } return ra } -func (ra *RegistrationAuthorityImpl) SetRateLimitPoliciesFile(filename string) error { - _, err := reloader.New(filename, ra.rlPolicies.LoadPolicies, ra.rateLimitPoliciesLoadError) - if err != nil { - return err +// ValidationProfileConfig is a config struct which can be used to create a +// ValidationProfile. +type ValidationProfileConfig struct { + // PendingAuthzLifetime defines how far in the future an authorization's + // "expires" timestamp is set when it is first created, i.e. how much + // time the applicant has to attempt the challenge. + PendingAuthzLifetime config.Duration `validate:"required"` + // ValidAuthzLifetime defines how far in the future an authorization's + // "expires" timestamp is set when one of its challenges is fulfilled, + // i.e. how long a validated authorization may be reused. + ValidAuthzLifetime config.Duration `validate:"required"` + // OrderLifetime defines how far in the future an order's "expires" + // timestamp is set when it is first created, i.e. how much time the + // applicant has to fulfill all challenges and finalize the order. This is + // a maximum time: if the order reuses an authorization and that authz + // expires earlier than this OrderLifetime would otherwise set, then the + // order's expiration is brought in to match that authorization. + OrderLifetime config.Duration `validate:"required"` + // MaxNames is the maximum number of subjectAltNames in a single cert. + // The value supplied MUST be greater than 0 and no more than 100. These + // limits are per section 7.1 of our combined CP/CPS, under "DV-SSL + // Subscriber Certificate". The value must be less than or equal to the + // global (i.e. not per-profile) value configured in the CA. + MaxNames int `validate:"omitempty,min=1,max=100"` + // AllowList specifies the path to a YAML file containing a list of + // account IDs permitted to use this profile. If no path is + // specified, the profile is open to all accounts. If the file + // exists but is empty, the profile is closed to all accounts. + AllowList string `validate:"omitempty"` + // IdentifierTypes is a list of identifier types that may be issued under + // this profile. + IdentifierTypes []identifier.IdentifierType `validate:"required,dive,oneof=dns ip"` +} + +// validationProfile holds the attributes of a given validation profile. +type validationProfile struct { + // pendingAuthzLifetime defines how far in the future an authorization's + // "expires" timestamp is set when it is first created, i.e. how much + // time the applicant has to attempt the challenge. + pendingAuthzLifetime time.Duration + // validAuthzLifetime defines how far in the future an authorization's + // "expires" timestamp is set when one of its challenges is fulfilled, + // i.e. how long a validated authorization may be reused. + validAuthzLifetime time.Duration + // orderLifetime defines how far in the future an order's "expires" + // timestamp is set when it is first created, i.e. how much time the + // applicant has to fulfill all challenges and finalize the order. This is + // a maximum time: if the order reuses an authorization and that authz + // expires earlier than this OrderLifetime would otherwise set, then the + // order's expiration is brought in to match that authorization. + orderLifetime time.Duration + // maxNames is the maximum number of subjectAltNames in a single cert. + maxNames int + // allowList holds the set of account IDs allowed to use this profile. If + // nil, the profile is open to all accounts (everyone is allowed). + allowList *allowlist.List[int64] + // identifierTypes is a list of identifier types that may be issued under + // this profile. + identifierTypes []identifier.IdentifierType +} + +// validationProfiles provides access to the set of configured profiles, +// including the default profile for orders/authzs which do not specify one. +type validationProfiles struct { + defaultName string + byName map[string]*validationProfile +} + +// NewValidationProfiles builds a new validationProfiles struct from the given +// configs and default name. It enforces that the given authorization lifetimes +// are within the bounds mandated by the Baseline Requirements. +func NewValidationProfiles(defaultName string, configs map[string]*ValidationProfileConfig) (*validationProfiles, error) { + if defaultName == "" { + return nil, errors.New("default profile name must be configured") } - return nil + profiles := make(map[string]*validationProfile, len(configs)) + + for name, config := range configs { + // The Baseline Requirements v1.8.1 state that validation tokens "MUST + // NOT be used for more than 30 days from its creation". If unconfigured + // or the configured value pendingAuthorizationLifetimeDays is greater + // than 29 days, bail out. + if config.PendingAuthzLifetime.Duration <= 0 || config.PendingAuthzLifetime.Duration > 29*(24*time.Hour) { + return nil, fmt.Errorf("PendingAuthzLifetime value must be greater than 0 and less than 30d, but got %q", config.PendingAuthzLifetime.Duration) + } + + // Baseline Requirements v1.8.1 section 4.2.1: "any reused data, document, + // or completed validation MUST be obtained no more than 398 days prior + // to issuing the Certificate". If unconfigured or the configured value is + // greater than 397 days, bail out. + if config.ValidAuthzLifetime.Duration <= 0 || config.ValidAuthzLifetime.Duration > 397*(24*time.Hour) { + return nil, fmt.Errorf("ValidAuthzLifetime value must be greater than 0 and less than 398d, but got %q", config.ValidAuthzLifetime.Duration) + } + + if config.MaxNames <= 0 || config.MaxNames > 100 { + return nil, fmt.Errorf("MaxNames must be greater than 0 and at most 100") + } + + var allowList *allowlist.List[int64] + if config.AllowList != "" { + data, err := os.ReadFile(config.AllowList) + if err != nil { + return nil, fmt.Errorf("reading allowlist: %w", err) + } + allowList, err = allowlist.NewFromYAML[int64](data) + if err != nil { + return nil, fmt.Errorf("parsing allowlist: %w", err) + } + } + + profiles[name] = &validationProfile{ + pendingAuthzLifetime: config.PendingAuthzLifetime.Duration, + validAuthzLifetime: config.ValidAuthzLifetime.Duration, + orderLifetime: config.OrderLifetime.Duration, + maxNames: config.MaxNames, + allowList: allowList, + identifierTypes: config.IdentifierTypes, + } + } + + _, ok := profiles[defaultName] + if !ok { + return nil, fmt.Errorf("no profile configured matching default profile name %q", defaultName) + } + + return &validationProfiles{ + defaultName: defaultName, + byName: profiles, + }, nil } -func (ra *RegistrationAuthorityImpl) rateLimitPoliciesLoadError(err error) { - ra.log.Errf("error reloading rate limit policy: %s", err) +func (vp *validationProfiles) get(name string) (*validationProfile, error) { + if name == "" { + name = vp.defaultName + } + profile, ok := vp.byName[name] + if !ok { + return nil, berrors.InvalidProfileError("unrecognized profile name %q", name) + } + return profile, nil } -// certificateRequestAuthz is a struct for holding information about a valid -// authz referenced during a certificateRequestEvent. It holds both the -// authorization ID and the challenge type that made the authorization valid. We -// specifically include the challenge type that solved the authorization to make -// some common analysis easier. -type certificateRequestAuthz struct { - ID string - ChallengeType core.AcmeChallenge +// certificateRequestAuthz is a struct for logging information about when and +// how an identifier was validated. We include the challenge type that solved +// the authorization and when the challenge was completed to make some common +// analysis easier. +type identifierLog struct { + Ident identifier.ACMEIdentifier + Authz string + Challenge core.AcmeChallenge + Validated time.Time } // certificateRequestEvent is a struct for holding information that is logged as @@ -264,21 +409,30 @@ type certificateRequestEvent struct { VerifiedFields []string `json:",omitempty"` // CommonName is the subject common name from the issued cert CommonName string `json:",omitempty"` - // Names are the DNS SAN entries from the issued cert - Names []string `json:",omitempty"` + // Identifiers are the identifiers and validation data from the issued cert + Identifiers []identifierLog `json:",omitempty"` // NotBefore is the starting timestamp of the issued cert's validity period - NotBefore time.Time `json:",omitempty"` + NotBefore time.Time // NotAfter is the ending timestamp of the issued cert's validity period - NotAfter time.Time `json:",omitempty"` + NotAfter time.Time // RequestTime and ResponseTime are for tracking elapsed time during issuance - RequestTime time.Time `json:",omitempty"` - ResponseTime time.Time `json:",omitempty"` + RequestTime time.Time + ResponseTime time.Time // Error contains any encountered errors Error string `json:",omitempty"` - // Authorizations is a map of identifier names to certificateRequestAuthz - // objects. It can be used to understand how the names in a certificate - // request were authorized. - Authorizations map[string]certificateRequestAuthz + // CertProfileName is a human readable name used to refer to the certificate + // profile. + CertProfileName string `json:",omitempty"` + // CertProfileHash is SHA256 sum over every exported field of an + // issuance.ProfileConfig, represented here as a hexadecimal string. + CertProfileHash string `json:",omitempty"` + // PreviousCertificateIssued is present when this certificate uses the same set + // of FQDNs as a previous certificate (from any account) and contains the + // notBefore of the most recent such certificate. + PreviousCertificateIssued time.Time + // UserAgent is the User-Agent header from the ACME client (provided to the + // RA via gRPC metadata). + UserAgent string } // certificateRevocationEvent is a struct for holding information that is logged @@ -289,13 +443,14 @@ type certificateRevocationEvent struct { // serial number. SerialNumber string `json:",omitempty"` // Reason is the integer representing the revocation reason used. - Reason int64 `json:",omitempty"` + Reason revocation.Reason `json:"reason"` // Method is the way in which revocation was requested. // It will be one of the strings: "applicant", "subscriber", "control", "key", or "admin". Method string `json:",omitempty"` - // RequesterID is the account ID of the requester. + // Requester is the account ID of the requester. // Will be zero for admin revocations. - RequesterID int64 `json:",omitempty"` + Requester int64 `json:",omitempty"` + CRLShard int64 // AdminName is the name of the admin requester. // Will be zero for subscriber revocations. AdminName string `json:",omitempty"` @@ -303,83 +458,24 @@ type certificateRevocationEvent struct { Error string `json:",omitempty"` } -// noRegistrationID is used for the regID parameter to GetThreshold when no -// registration-based overrides are necessary. -const noRegistrationID = -1 - -// registrationCounter is a type to abstract the use of `CountRegistrationsByIP` -// or `CountRegistrationsByIPRange` SA methods. -type registrationCounter func(context.Context, *sapb.CountRegistrationsByIPRequest, ...grpc.CallOption) (*sapb.Count, error) - -// checkRegistrationIPLimit checks a specific registraton limit by using the -// provided registrationCounter function to determine if the limit has been -// exceeded for a given IP or IP range -func (ra *RegistrationAuthorityImpl) checkRegistrationIPLimit(ctx context.Context, limit ratelimit.RateLimitPolicy, ip net.IP, counter registrationCounter) error { - if !limit.Enabled() { - return nil - } - - now := ra.clk.Now() - count, err := counter(ctx, &sapb.CountRegistrationsByIPRequest{ - Ip: ip, - Range: &sapb.Range{ - Earliest: limit.WindowBegin(now).UnixNano(), - Latest: now.UnixNano(), - }, - }) - if err != nil { - return err - } - - if count.Count >= limit.GetThreshold(ip.String(), noRegistrationID) { - return berrors.RateLimitError("too many registrations for this IP") - } - - return nil -} - -// checkRegistrationLimits enforces the RegistrationsPerIP and -// RegistrationsPerIPRange limits -func (ra *RegistrationAuthorityImpl) checkRegistrationLimits(ctx context.Context, ip net.IP) error { - // Check the registrations per IP limit using the CountRegistrationsByIP SA - // function that matches IP addresses exactly - exactRegLimit := ra.rlPolicies.RegistrationsPerIP() - err := ra.checkRegistrationIPLimit(ctx, exactRegLimit, ip, ra.SA.CountRegistrationsByIP) - if err != nil { - ra.rateLimitCounter.WithLabelValues("registrations_by_ip", "exceeded").Inc() - ra.log.Infof("Rate limit exceeded, RegistrationsByIP, IP: %s", ip) - return err - } - ra.rateLimitCounter.WithLabelValues("registrations_by_ip", "pass").Inc() - - // We only apply the fuzzy reg limit to IPv6 addresses. - // Per https://golang.org/pkg/net/#IP.To4 "If ip is not an IPv4 address, To4 - // returns nil" - if ip.To4() != nil { - return nil - } - - // Check the registrations per IP range limit using the - // CountRegistrationsByIPRange SA function that fuzzy-matches IPv6 addresses - // within a larger address range - fuzzyRegLimit := ra.rlPolicies.RegistrationsPerIPRange() - err = ra.checkRegistrationIPLimit(ctx, fuzzyRegLimit, ip, ra.SA.CountRegistrationsByIPRange) - if err != nil { - ra.rateLimitCounter.WithLabelValues("registrations_by_ip_range", "exceeded").Inc() - ra.log.Infof("Rate limit exceeded, RegistrationsByIPRange, IP: %s", ip) - // For the fuzzyRegLimit we use a new error message that specifically - // mentions that the limit being exceeded is applied to a *range* of IPs - return berrors.RateLimitError("too many registrations for this IP range") - } - ra.rateLimitCounter.WithLabelValues("registrations_by_ip_range", "pass").Inc() - - return nil +// finalizationCAACheckEvent is a struct for holding information logged as JSON +// to the info log as the result of an issuance event. It is logged when the RA +// performs the final CAA check of a certificate finalization request. +type finalizationCAACheckEvent struct { + // Requester is the associated account ID. + Requester int64 `json:",omitempty"` + // Reused is a count of Authz where the original CAA check was performed in + // the last 7 hours. + Reused int `json:",omitempty"` + // Rechecked is a count of Authz where a new CAA check was performed because + // the original check was older than 7 hours. + Rechecked int `json:",omitempty"` } // NewRegistration constructs a new Registration from a request. func (ra *RegistrationAuthorityImpl) NewRegistration(ctx context.Context, request *corepb.Registration) (*corepb.Registration, error) { // Error if the request is nil, there is no account key or IP address - if request == nil || len(request.Key) == 0 || len(request.InitialIP) == 0 { + if request == nil || len(request.Key) == 0 { return nil, errIncompleteGRPCRequest } @@ -394,35 +490,11 @@ func (ra *RegistrationAuthorityImpl) NewRegistration(ctx context.Context, reques return nil, berrors.MalformedError("invalid public key: %s", err.Error()) } - // Check IP address rate limits. - var ipAddr net.IP - err = ipAddr.UnmarshalText(request.InitialIP) - if err != nil { - return nil, berrors.InternalServerError("failed to unmarshal ip address: %s", err.Error()) - } - err = ra.checkRegistrationLimits(ctx, ipAddr) - if err != nil { - return nil, err - } - - // Check that contacts conform to our expectations. - err = validateContactsPresent(request.Contact, request.ContactsPresent) - if err != nil { - return nil, err - } - err = ra.validateContacts(ctx, request.Contact) - if err != nil { - return nil, err - } - // Don't populate ID or CreatedAt because those will be set by the SA. req := &corepb.Registration{ - Key: request.Key, - Contact: request.Contact, - ContactsPresent: request.ContactsPresent, - Agreement: request.Agreement, - InitialIP: request.InitialIP, - Status: string(core.StatusValid), + Key: request.Key, + Agreement: request.Agreement, + Status: string(core.StatusValid), } // Store the registration object, then return the version that got stored. @@ -445,7 +517,7 @@ func (ra *RegistrationAuthorityImpl) NewRegistration(ctx context.Context, reques // * A list containing a mailto contact that contains hfields // * A list containing a contact that has non-ascii characters // * A list containing a contact that doesn't pass `policy.ValidEmail` -func (ra *RegistrationAuthorityImpl) validateContacts(ctx context.Context, contacts []string) error { +func (ra *RegistrationAuthorityImpl) validateContacts(contacts []string) error { if len(contacts) == 0 { return nil // Nothing to validate } @@ -463,19 +535,19 @@ func (ra *RegistrationAuthorityImpl) validateContacts(ctx context.Context, conta } parsed, err := url.Parse(contact) if err != nil { - return berrors.InvalidEmailError("invalid contact") + return berrors.InvalidEmailError("unparsable contact") } if parsed.Scheme != "mailto" { - return berrors.InvalidEmailError("contact method %q is not supported", parsed.Scheme) + return berrors.UnsupportedContactError("only contact scheme 'mailto:' is supported") + } + if parsed.RawQuery != "" || contact[len(contact)-1] == '?' { + return berrors.InvalidEmailError("contact email contains a question mark") } - if parsed.RawQuery != "" { - return berrors.InvalidEmailError("contact email [%q] contains hfields", contact) + if parsed.Fragment != "" || contact[len(contact)-1] == '#' { + return berrors.InvalidEmailError("contact email contains a '#'") } if !core.IsASCII(contact) { - return berrors.InvalidEmailError( - "contact email [%q] contains non-ASCII characters", - contact, - ) + return berrors.InvalidEmailError("contact email contains non-ASCII characters") } err = policy.ValidEmail(parsed.Opaque) if err != nil { @@ -489,10 +561,7 @@ func (ra *RegistrationAuthorityImpl) validateContacts(ctx context.Context, conta // That means the largest marshalled JSON value we can store is 191 bytes. const maxContactBytes = 191 if jsonBytes, err := json.Marshal(contacts); err != nil { - // This shouldn't happen with a simple []string but if it does we want the - // error to be logged internally but served as a 500 to the user so we - // return a bare error and not a berror here. - return fmt.Errorf("failed to marshal reg.Contact to JSON: %#v", contacts) + return fmt.Errorf("failed to marshal reg.Contact to JSON: %w", err) } else if len(jsonBytes) >= maxContactBytes { return berrors.InvalidEmailError( "too many/too long contact(s). Please use shorter or fewer email addresses") @@ -501,144 +570,43 @@ func (ra *RegistrationAuthorityImpl) validateContacts(ctx context.Context, conta return nil } -func (ra *RegistrationAuthorityImpl) checkPendingAuthorizationLimit(ctx context.Context, regID int64) error { - limit := ra.rlPolicies.PendingAuthorizationsPerAccount() - if limit.Enabled() { - countPB, err := ra.SA.CountPendingAuthorizations2(ctx, &sapb.RegistrationID{ - Id: regID, - }) - if err != nil { - return err - } - // Most rate limits have a key for overrides, but there is no meaningful key - // here. - noKey := "" - if countPB.Count >= limit.GetThreshold(noKey, regID) { - ra.rateLimitCounter.WithLabelValues("pending_authorizations_by_registration_id", "exceeded").Inc() - ra.log.Infof("Rate limit exceeded, PendingAuthorizationsByRegID, regID: %d", regID) - return berrors.RateLimitError("too many currently pending authorizations") - } - ra.rateLimitCounter.WithLabelValues("pending_authorizations_by_registration_id", "pass").Inc() +// matchesCSR tests the contents of a generated certificate to make sure +// that the PublicKey, CommonName, and identifiers match those provided in +// the CSR that was used to generate the certificate. It also checks the +// following fields for: +// - notBefore is not more than 24 hours ago +// - BasicConstraintsValid is true +// - IsCA is false +// - ExtKeyUsage only contains ExtKeyUsageServerAuth & ExtKeyUsageClientAuth +// - Subject only contains CommonName & Names +func (ra *RegistrationAuthorityImpl) matchesCSR(parsedCertificate *x509.Certificate, csr *x509.CertificateRequest) error { + if !core.KeyDigestEquals(parsedCertificate.PublicKey, csr.PublicKey) { + return berrors.InternalServerError("generated certificate public key doesn't match CSR public key") } - return nil -} -// checkInvalidAuthorizationLimits checks the failed validation limit for each -// of the provided hostnames. It returns the first error. -func (ra *RegistrationAuthorityImpl) checkInvalidAuthorizationLimits(ctx context.Context, regID int64, hostnames []string) error { - results := make(chan error, len(hostnames)) - for _, hostname := range hostnames { - go func(hostname string) { - results <- ra.checkInvalidAuthorizationLimit(ctx, regID, hostname) - }(hostname) - } - // We don't have to wait for all of the goroutines to finish because there's - // enough capacity in the chan for them all to write their result even if - // nothing is reading off the chan anymore. - for i := 0; i < len(hostnames); i++ { - err := <-results - if err != nil { - return err + csrIdents := identifier.FromCSR(csr) + if parsedCertificate.Subject.CommonName != "" { + // Only check that the issued common name matches one of the SANs if there + // is an issued CN at all: this allows flexibility on whether we include + // the CN. + if !slices.Contains(csrIdents, identifier.NewDNS(parsedCertificate.Subject.CommonName)) { + return berrors.InternalServerError("generated certificate CommonName doesn't match any CSR name") } } - return nil -} -func (ra *RegistrationAuthorityImpl) checkInvalidAuthorizationLimit(ctx context.Context, regID int64, hostname string) error { - limit := ra.rlPolicies.InvalidAuthorizationsPerAccount() - if !limit.Enabled() { - return nil + parsedIdents := identifier.FromCert(parsedCertificate) + if !slices.Equal(csrIdents, parsedIdents) { + return berrors.InternalServerError("generated certificate identifiers don't match CSR identifiers") } - latest := ra.clk.Now().Add(ra.pendingAuthorizationLifetime) - earliest := latest.Add(-limit.Window.Duration) - req := &sapb.CountInvalidAuthorizationsRequest{ - RegistrationID: regID, - Hostname: hostname, - Range: &sapb.Range{ - Earliest: earliest.UnixNano(), - Latest: latest.UnixNano(), - }, - } - count, err := ra.SA.CountInvalidAuthorizations2(ctx, req) - if err != nil { - return err - } - // Most rate limits have a key for overrides, but there is no meaningful key - // here. - noKey := "" - if count.Count >= int64(limit.GetThreshold(noKey, regID)) { - ra.log.Infof("Rate limit exceeded, InvalidAuthorizationsByRegID, regID: %d", regID) - return berrors.RateLimitError("too many failed authorizations recently") - } - return nil -} -// checkNewOrdersPerAccountLimit enforces the rlPolicies `NewOrdersPerAccount` -// rate limit. This rate limit ensures a client can not create more than the -// specified threshold of new orders within the specified time window. -func (ra *RegistrationAuthorityImpl) checkNewOrdersPerAccountLimit(ctx context.Context, acctID int64) error { - limit := ra.rlPolicies.NewOrdersPerAccount() - if !limit.Enabled() { - return nil - } - now := ra.clk.Now() - count, err := ra.SA.CountOrders(ctx, &sapb.CountOrdersRequest{ - AccountID: acctID, - Range: &sapb.Range{ - Earliest: now.Add(-limit.Window.Duration).UnixNano(), - Latest: now.UnixNano(), - }, - }) - if err != nil { - return err - } - // There is no meaningful override key to use for this rate limit - noKey := "" - if count.Count >= limit.GetThreshold(noKey, acctID) { - ra.rateLimitCounter.WithLabelValues("new_order_by_registration_id", "exceeded").Inc() - return berrors.RateLimitError("too many new orders recently") + if !slices.Equal(parsedCertificate.EmailAddresses, csr.EmailAddresses) { + return berrors.InternalServerError("generated certificate EmailAddresses don't match CSR EmailAddresses") } - ra.rateLimitCounter.WithLabelValues("new_order_by_registration_id", "pass").Inc() - return nil -} - -// MatchesCSR tests the contents of a generated certificate to make sure -// that the PublicKey, CommonName, and DNSNames match those provided in -// the CSR that was used to generate the certificate. It also checks the -// following fields for: -// * notBefore is not more than 24 hours ago -// * BasicConstraintsValid is true -// * IsCA is false -// * ExtKeyUsage only contains ExtKeyUsageServerAuth & ExtKeyUsageClientAuth -// * Subject only contains CommonName & Names -func (ra *RegistrationAuthorityImpl) MatchesCSR(parsedCertificate *x509.Certificate, csr *x509.CertificateRequest) error { - // Check issued certificate matches what was expected from the CSR - hostNames := make([]string, len(csr.DNSNames)) - copy(hostNames, csr.DNSNames) - if len(csr.Subject.CommonName) > 0 { - hostNames = append(hostNames, csr.Subject.CommonName) - } - hostNames = core.UniqueLowerNames(hostNames) - if !core.KeyDigestEquals(parsedCertificate.PublicKey, csr.PublicKey) { - return berrors.InternalServerError("generated certificate public key doesn't match CSR public key") - } - if parsedCertificate.Subject.CommonName != strings.ToLower(csr.Subject.CommonName) { - return berrors.InternalServerError("generated certificate CommonName doesn't match CSR CommonName") - } - // Sort both slices of names before comparison. - parsedNames := parsedCertificate.DNSNames - sort.Strings(parsedNames) - sort.Strings(hostNames) - if !reflect.DeepEqual(parsedNames, hostNames) { - return berrors.InternalServerError("generated certificate DNSNames don't match CSR DNSNames") - } - if !reflect.DeepEqual(parsedCertificate.IPAddresses, csr.IPAddresses) { - return berrors.InternalServerError("generated certificate IPAddresses don't match CSR IPAddresses") - } - if !reflect.DeepEqual(parsedCertificate.EmailAddresses, csr.EmailAddresses) { - return berrors.InternalServerError("generated certificate EmailAddresses don't match CSR EmailAddresses") + if !slices.Equal(parsedCertificate.URIs, csr.URIs) { + return berrors.InternalServerError("generated certificate URIs don't match CSR URIs") } + if len(parsedCertificate.Subject.Country) > 0 || len(parsedCertificate.Subject.Organization) > 0 || len(parsedCertificate.Subject.OrganizationalUnit) > 0 || len(parsedCertificate.Subject.Locality) > 0 || len(parsedCertificate.Subject.Province) > 0 || len(parsedCertificate.Subject.StreetAddress) > 0 || @@ -655,8 +623,13 @@ func (ra *RegistrationAuthorityImpl) MatchesCSR(parsedCertificate *x509.Certific if parsedCertificate.IsCA { return berrors.InternalServerError("generated certificate can sign other certificates") } - if !reflect.DeepEqual(parsedCertificate.ExtKeyUsage, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}) { - return berrors.InternalServerError("generated certificate doesn't have correct key usage extensions") + for _, eku := range parsedCertificate.ExtKeyUsage { + if eku != x509.ExtKeyUsageServerAuth && eku != x509.ExtKeyUsageClientAuth { + return berrors.InternalServerError("generated certificate has unacceptable EKU") + } + } + if !slices.Contains(parsedCertificate.ExtKeyUsage, x509.ExtKeyUsageServerAuth) { + return berrors.InternalServerError("generated certificate doesn't have serverAuth EKU") } return nil @@ -669,9 +642,10 @@ func (ra *RegistrationAuthorityImpl) MatchesCSR(parsedCertificate *x509.Certific // will be of type BoulderError. func (ra *RegistrationAuthorityImpl) checkOrderAuthorizations( ctx context.Context, - names []string, + orderID orderID, acctID accountID, - orderID orderID) (map[string]*core.Authorization, error) { + idents identifier.ACMEIdentifiers, + now time.Time) (map[identifier.ACMEIdentifier]*core.Authorization, error) { // Get all of the valid authorizations for this account/order req := &sapb.GetValidOrderAuthorizationsRequest{ Id: int64(orderID), @@ -685,11 +659,65 @@ func (ra *RegistrationAuthorityImpl) checkOrderAuthorizations( if err != nil { return nil, err } - // Ensure the names from the CSR are free of duplicates & lowercased. - names = core.UniqueLowerNames(names) - // Check the authorizations to ensure validity for the names required. - if err = ra.checkAuthorizationsCAA(ctx, names, authzs, int64(acctID), ra.clk.Now()); err != nil { - return nil, err + + // Ensure that every identifier has a matching authz, and vice-versa. + var missing []string + var invalid []string + var expired []string + for _, ident := range idents { + authz, ok := authzs[ident] + if !ok || authz == nil { + missing = append(missing, ident.Value) + continue + } + if authz.Status != core.StatusValid { + invalid = append(invalid, ident.Value) + continue + } + if authz.Expires.Before(now) { + expired = append(expired, ident.Value) + continue + } + err = ra.PA.CheckAuthzChallenges(authz) + if err != nil { + invalid = append(invalid, ident.Value) + continue + } + } + + if len(missing) > 0 { + return nil, berrors.UnauthorizedError( + "authorizations for these identifiers not found: %s", + strings.Join(missing, ", "), + ) + } + + if len(invalid) > 0 { + return nil, berrors.UnauthorizedError( + "authorizations for these identifiers not valid: %s", + strings.Join(invalid, ", "), + ) + } + if len(expired) > 0 { + return nil, berrors.UnauthorizedError( + "authorizations for these identifiers expired: %s", + strings.Join(expired, ", "), + ) + } + + // Even though this check is cheap, we do it after the more specific checks + // so that we can return more specific error messages. + if len(idents) != len(authzs) { + return nil, berrors.UnauthorizedError("incorrect number of identifiers requested for finalization") + } + + if !features.Get().CAARechecksFailOrder { + // Check that the authzs either don't need CAA rechecking, or do the + // necessary CAA rechecks right now. + err = ra.checkAuthorizationsCAA(ctx, int64(acctID), authzs, now) + if err != nil { + return nil, err + } } return authzs, nil @@ -700,27 +728,26 @@ func (ra *RegistrationAuthorityImpl) checkOrderAuthorizations( func validatedBefore(authz *core.Authorization, caaRecheckTime time.Time) (bool, error) { numChallenges := len(authz.Challenges) if numChallenges != 1 { - return false, fmt.Errorf("authorization has incorrect number of challenges. 1 expected, %d found for: id %s", numChallenges, authz.ID) + return false, berrors.InternalServerError("authorization has incorrect number of challenges. 1 expected, %d found for: id %s", numChallenges, authz.ID) } if authz.Challenges[0].Validated == nil { - return false, fmt.Errorf("authorization's challenge has no validated timestamp for: id %s", authz.ID) + return false, berrors.InternalServerError("authorization's challenge has no validated timestamp for: id %s", authz.ID) } return authz.Challenges[0].Validated.Before(caaRecheckTime), nil } -// checkAuthorizationsCAA implements the common logic of validating a set of -// authorizations against a set of names that is used by both -// `checkAuthorizations` and `checkOrderAuthorizations`. If required CAA will be -// rechecked for authorizations that are too old. -// If it returns an error, it will be of type BoulderError. +// checkAuthorizationsCAA ensures that we have sufficiently-recent CAA checks +// for every input identifier/authz. If any authz was validated too long ago, it +// kicks off a CAA recheck for that identifier If it returns an error, it will +// be of type BoulderError. func (ra *RegistrationAuthorityImpl) checkAuthorizationsCAA( ctx context.Context, - names []string, - authzs map[string]*core.Authorization, - regID int64, + acctID int64, + authzs map[identifier.ACMEIdentifier]*core.Authorization, now time.Time) error { - // badNames contains the names that were unauthorized - var badNames []string + if len(authzs) == 0 { + return berrors.MalformedError("order with no authorizations") + } // recheckAuthzs is a list of authorizations that must have their CAA records rechecked var recheckAuthzs []*core.Authorization @@ -731,35 +758,20 @@ func (ra *RegistrationAuthorityImpl) checkAuthorizationsCAA( // check to see if the authorized challenge `AttemptedAt` // (`Validated`) value from the database is before our caaRecheckTime. // Set the recheck time to 7 hours ago. - caaRecheckAfter := now.Add(-7 * time.Hour) - - // Set a CAA recheck time based on the assumption of a 30 day authz - // lifetime. This has been deprecated in favor of a new check based - // off the Validated time stored in the database, but we want to check - // both for a time and increment a stat if this code path is hit for - // compliance safety. - caaRecheckTime := now.Add(ra.authorizationLifetime).Add(-7 * time.Hour) - - for _, name := range names { - authz := authzs[name] - if authz == nil { - badNames = append(badNames, name) - } else if authz.Expires == nil { - return berrors.InternalServerError("found an authorization with a nil Expires field: id %s", authz.ID) - } else if authz.Expires.Before(now) { - badNames = append(badNames, name) - } else if staleCAA, err := validatedBefore(authz, caaRecheckAfter); err != nil { - return berrors.InternalServerError(err.Error()) + caaRecheckAfter := now.Add(caaRecheckDuration) + + for _, authz := range authzs { + if staleCAA, err := validatedBefore(authz, caaRecheckAfter); err != nil { + return err } else if staleCAA { - // Ensure that CAA is rechecked for this name - recheckAuthzs = append(recheckAuthzs, authz) - } else if authz.Expires.Before(caaRecheckTime) { - // Ensure that CAA is rechecked for this name - recheckAuthzs = append(recheckAuthzs, authz) - // This codepath should not be used, but is here as a safety - // net until the new codepath is proven. Increment metric if - // it is used. - ra.recheckCAAUsedAuthzLifetime.Add(1) + switch authz.Identifier.Type { + case identifier.TypeDNS: + // Ensure that CAA is rechecked for this name + recheckAuthzs = append(recheckAuthzs, authz) + case identifier.TypeIP: + default: + return berrors.MalformedError("invalid identifier type: %s", authz.Identifier.Type) + } } } @@ -770,17 +782,17 @@ func (ra *RegistrationAuthorityImpl) checkAuthorizationsCAA( } } - if len(badNames) > 0 { - return berrors.UnauthorizedError( - "authorizations for these names not found or expired: %s", - strings.Join(badNames, ", "), - ) + caaEvent := &finalizationCAACheckEvent{ + Requester: acctID, + Reused: len(authzs) - len(recheckAuthzs), + Rechecked: len(recheckAuthzs), } + ra.log.InfoObject("FinalizationCaaCheck", caaEvent) return nil } -// recheckCAA accepts a list of of names that need to have their CAA records +// recheckCAA accepts a list of names that need to have their CAA records // rechecked because their associated authorizations are sufficiently old and // performs the CAA checks required for each. If any of the rechecks fail an // error is returned. @@ -794,8 +806,6 @@ func (ra *RegistrationAuthorityImpl) recheckCAA(ctx context.Context, authzs []*c ch := make(chan authzCAAResult, len(authzs)) for _, authz := range authzs { go func(authz *core.Authorization) { - name := authz.Identifier.Value - // If an authorization has multiple valid challenges, // the type of the first valid challenge is used for // the purposes of CAA rechecking. @@ -811,24 +821,29 @@ func (ra *RegistrationAuthorityImpl) recheckCAA(ctx context.Context, authzs []*c authz: authz, err: berrors.InternalServerError( "Internal error determining validation method for authorization ID %v (%v)", - authz.ID, name), + authz.ID, authz.Identifier.Value), } return } - - resp, err := ra.caa.IsCAAValid(ctx, &vapb.IsCAAValidRequest{ - Domain: name, + var resp *vapb.IsCAAValidResponse + var err error + resp, err = ra.VA.DoCAA(ctx, &vapb.IsCAAValidRequest{ + Identifier: authz.Identifier.ToProto(), ValidationMethod: method, AccountURIID: authz.RegistrationID, }) if err != nil { - ra.log.AuditErrf("Rechecking CAA: %s", err) + ra.log.AuditErr("Rechecking CAA", err, map[string]any{ + "requester": authz.RegistrationID, + "identifier": authz.Identifier.Value, + "method": method, + }) err = berrors.InternalServerError( "Internal error rechecking CAA for authorization ID %v (%v)", - authz.ID, name, + authz.ID, authz.Identifier.Value, ) } else if resp.Problem != nil { - err = berrors.CAAError(resp.Problem.Detail) + err = berrors.CAAError("rechecking caa: %s", resp.Problem.Detail) } ch <- authzCAAResult{ authz: authz, @@ -838,7 +853,7 @@ func (ra *RegistrationAuthorityImpl) recheckCAA(ctx context.Context, authzs []*c } var subErrors []berrors.SubBoulderError // Read a recheckResult for each authz from the results channel - for i := 0; i < len(authzs); i++ { + for range len(authzs) { recheckResult := <-ch // If the result had a CAA boulder error, construct a suberror with the // identifier from the authorization that was checked. @@ -876,32 +891,43 @@ func (ra *RegistrationAuthorityImpl) recheckCAA(ctx context.Context, authzs []*c // failOrder marks an order as failed by setting the problem details field of // the order & persisting it through the SA. If an error occurs doing this we -// log it and return the order as-is. There aren't any alternatives if we can't -// add the error to the order. +// log it and don't modify the input order. There aren't any alternatives if we +// can't add the error to the order. This function MUST only be called when we +// are already returning an error for another reason. func (ra *RegistrationAuthorityImpl) failOrder( ctx context.Context, order *corepb.Order, - prob *probs.ProblemDetails) *corepb.Order { + prob *probs.ProblemDetails) { + // Use a separate context with its own timeout, since the error we encountered + // may have been a context cancellation or timeout, and these operations still + // need to succeed. + ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 1*time.Second) + defer cancel() // Convert the problem to a protobuf problem for the *corepb.Order field - pbProb, err := bgrpc.ProblemDetailsToPB(prob) - if err != nil { - ra.log.AuditErrf("Could not convert order error problem to PB: %q", err) - return order - } + pbProb := bgrpc.ProblemDetailsToPB(prob) // Assign the protobuf problem to the field and save it via the SA order.Error = pbProb - _, err = ra.SA.SetOrderError(ctx, &sapb.SetOrderErrorRequest{ + _, err := ra.SA.SetOrderError(ctx, &sapb.SetOrderErrorRequest{ Id: order.Id, Error: order.Error, }) if err != nil { - ra.log.AuditErrf("Could not persist order error: %q", err) + ra.log.AuditErr("Persisting failed order", err, map[string]any{ + "requester": order.RegistrationID, + "order": order.Id, + "prob": order.Error.String(), + }) } - return order } +// To help minimize the chance that an accountID would be used as an order ID +// (or vice versa) when calling functions that use both we define internal +// `accountID` and `orderID` types so that callers must explicitly cast. +type accountID int64 +type orderID int64 + // FinalizeOrder accepts a request to finalize an order object and, if possible, // issues a certificate to satisfy the order. If an order does not have valid, // unexpired authorizations for all of its associated names an error is @@ -910,262 +936,365 @@ func (ra *RegistrationAuthorityImpl) failOrder( // If successful the order will be returned in processing status for the client // to poll while awaiting finalization to occur. func (ra *RegistrationAuthorityImpl) FinalizeOrder(ctx context.Context, req *rapb.FinalizeOrderRequest) (*corepb.Order, error) { - if req == nil || req.Order == nil { + // Step 1: Set up logging/tracing and validate the Order + if req == nil || req.Order == nil || len(req.Csr) == 0 { return nil, errIncompleteGRPCRequest } - order := req.Order - - if order.Status != string(core.StatusReady) { - return nil, berrors.OrderNotReadyError( - "Order's status (%q) is not acceptable for finalization", - order.Status) - } - - // There should never be an order with 0 names at the stage the RA is - // processing the order but we check to be on the safe side, throwing an - // internal server error if this assumption is ever violated. - if len(order.Names) == 0 { - return nil, berrors.InternalServerError("Order has no associated names") - } - - // Parse the CSR from the request - csrOb, err := x509.ParseCertificateRequest(req.Csr) - if err != nil { - return nil, err + logEvent := certificateRequestEvent{ + ID: core.NewToken(), + OrderID: req.Order.Id, + Requester: req.Order.RegistrationID, + RequestTime: ra.clk.Now(), + UserAgent: web.UserAgent(ctx), } - - err = csrlib.VerifyCSR(ctx, csrOb, ra.maxNames, &ra.keyPolicy, ra.PA) + csr, authzs, err := ra.validateFinalizeRequest(ctx, req, &logEvent) if err != nil { - // VerifyCSR returns berror instances that can be passed through as-is - // without wrapping. return nil, err } - // Dedupe, lowercase and sort both the names from the CSR and the names in the - // order. - csrNames := core.UniqueLowerNames(csrOb.DNSNames) - orderNames := core.UniqueLowerNames(order.Names) - - // Immediately reject the request if the number of names differ - if len(orderNames) != len(csrNames) { - return nil, berrors.UnauthorizedError("Order includes different number of names than CSR specifies") - } - - // Check that the order names and the CSR names are an exact match - for i, name := range orderNames { - if name != csrNames[i] { - return nil, berrors.UnauthorizedError("CSR is missing Order domain %q", name) - } - } + // Observe the age of this order, so we know how quickly most clients complete + // issuance flows. + ra.orderAges.WithLabelValues("FinalizeOrder").Observe(ra.clk.Since(req.Order.Created.AsTime()).Seconds()) - // Update the order to be status processing - we issue synchronously at the - // present time so this is somewhat artificial/unnecessary but allows planning - // for the future. + // Step 2: Set the Order to Processing status + // + // We do this separately from the issuance process itself so that, when we + // switch to doing issuance asynchronously, we aren't lying to the client + // when we say that their order is already Processing. // // NOTE(@cpu): After this point any errors that are encountered must update // the state of the order to invalid by setting the order's error field. // Otherwise the order will be "stuck" in processing state. It can not be // finalized because it isn't pending, but we aren't going to process it // further because we already did and encountered an error. - _, err = ra.SA.SetOrderProcessing(ctx, &sapb.OrderRequest{Id: order.Id}) + _, err = ra.SA.SetOrderProcessing(ctx, &sapb.OrderRequest{Id: req.Order.Id}) if err != nil { // Fail the order with a server internal error - we weren't able to set the // status to processing and that's unexpected & weird. - ra.failOrder(ctx, order, probs.ServerInternal("Error setting order processing")) + ra.failOrder(ctx, req.Order, probs.ServerInternal("Error setting order processing")) return nil, err } - // Attempt issuance for the order. If the order isn't fully authorized this - // will return an error. - issueReq := core.CertificateRequest{ - Bytes: req.Csr, - CSR: csrOb, + // Update the order status locally since the SA doesn't return the updated + // order itself after setting the status + order := req.Order + order.Status = string(core.StatusProcessing) + + // Steps 3 (issuance) and 4 (cleanup) are done inside a helper function so + // that we can control whether or not that work happens asynchronously. + if features.Get().AsyncFinalize { + // We do this work in a goroutine so that we can better handle latency from + // getting SCTs and writing the (pre)certificate to the database. This lets + // us return the order in the Processing state to the client immediately, + // prompting them to poll the Order object and wait for it to be put into + // its final state. + // + // We track this goroutine's lifetime in a waitgroup global to this RA, so + // that it can wait for all goroutines to drain during shutdown. + ra.drainWG.Go(func() { + // The original context will be canceled in the RPC layer when FinalizeOrder returns, + // so split off a context that won't be canceled (and has its own timeout). + ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), ra.finalizeTimeout) + defer cancel() + + _, err := ra.issueCertificateOuter(ctx, proto.Clone(order).(*corepb.Order), csr, authzs, logEvent) + if err != nil { + // We only log here, because this is in a background goroutine with + // no parent goroutine waiting for it to receive the error. + ra.log.AuditErr("Asynchronous finalization failed", err, map[string]any{ + "requester": order.RegistrationID, + "order": order.Id, + }) + } + }) + return order, nil + } else { + return ra.issueCertificateOuter(ctx, order, csr, authzs, logEvent) } - // We use IssuerNameID 0 here because (as of now) only the v1 flow sets this - // field. This v2 flow allows the CA to select the issuer based on the CSR's - // PublicKeyAlgorithm. - cert, err := ra.issueCertificate(ctx, issueReq, accountID(order.RegistrationID), orderID(order.Id), issuance.IssuerNameID(0)) - if err != nil { - // Fail the order. The problem is computed using - // `web.ProblemDetailsForError`, the same function the WFE uses to convert - // between `berrors` and problems. This will turn normal expected berrors like - // berrors.UnauthorizedError into the correct - // `urn:ietf:params:acme:error:unauthorized` problem while not letting - // anything like a server internal error through with sensitive info. - ra.failOrder(ctx, order, web.ProblemDetailsForError(err, "Error finalizing order")) - return nil, err +} + +// containsMustStaple returns true if the provided set of extensions includes +// an entry whose OID and value both match the expected values for the OCSP +// Must-Staple (a.k.a. id-pe-tlsFeature) extension. +func containsMustStaple(extensions []pkix.Extension) bool { + // RFC 7633: id-pe-tlsfeature OBJECT IDENTIFIER ::= { id-pe 24 } + var mustStapleExtId = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24} + // ASN.1 encoding of: + // SEQUENCE + // INTEGER 5 + // where "5" is the status_request feature (RFC 6066) + var mustStapleExtValue = []byte{0x30, 0x03, 0x02, 0x01, 0x05} + + for _, ext := range extensions { + if ext.Id.Equal(mustStapleExtId) && bytes.Equal(ext.Value, mustStapleExtValue) { + return true + } } + return false +} - // Parse the issued certificate to get the serial - parsedCertificate, err := x509.ParseCertificate([]byte(cert.DER)) - if err != nil { - // Fail the order with a server internal error. The certificate we failed - // to parse was from our own CA. Bad news! - ra.failOrder(ctx, order, probs.ServerInternal("Error parsing certificate DER")) - return nil, err +// validateFinalizeRequest checks that a FinalizeOrder request is fully correct +// and ready for issuance. +// +// Returns a CertificateRequest, a map of identifiers to authorizations, and an error. +func (ra *RegistrationAuthorityImpl) validateFinalizeRequest( + ctx context.Context, + req *rapb.FinalizeOrderRequest, + logEvent *certificateRequestEvent) ( + *x509.CertificateRequest, map[identifier.ACMEIdentifier]*core.Authorization, error) { + if req.Order.Id <= 0 { + return nil, nil, berrors.MalformedError("invalid order ID: %d", req.Order.Id) } - // Finalize the order with its new CertificateSerial - order.CertificateSerial = core.SerialToString(parsedCertificate.SerialNumber) - _, err = ra.SA.FinalizeOrder(ctx, &sapb.FinalizeOrderRequest{Id: order.Id, CertificateSerial: order.CertificateSerial}) - if err != nil { - // Fail the order with a server internal error. We weren't able to persist - // the certificate serial and that's unexpected & weird. - ra.failOrder(ctx, order, probs.ServerInternal("Error persisting finalized order")) - return nil, err + if req.Order.RegistrationID <= 0 { + return nil, nil, berrors.MalformedError("invalid account ID: %d", req.Order.RegistrationID) } - // Note how many names were in this finalized certificate order. - ra.namesPerCert.With( - prometheus.Labels{"type": "issued"}, - ).Observe(float64(len(order.Names))) + if core.AcmeStatus(req.Order.Status) != core.StatusReady { + return nil, nil, berrors.OrderNotReadyError( + "Order's status (%q) is not acceptable for finalization", + req.Order.Status) + } - // Update the order status locally since the SA doesn't return the updated - // order itself after setting the status - order.Status = string(core.StatusValid) - return order, nil -} + profile, err := ra.profiles.get(req.Order.CertificateProfileName) + if err != nil { + return nil, nil, err + } -// To help minimize the chance that an accountID would be used as an order ID -// (or vice versa) when calling `issueCertificate` we define internal -// `accountID` and `orderID` types so that callers must explicitly cast. -type accountID int64 -type orderID int64 + orderIdents := identifier.Normalize(identifier.FromProtoSlice(req.Order.Identifiers)) -// issueCertificate sets up a log event structure and captures any errors -// encountered during issuance, then calls issueCertificateInner. -// -// At this time, all callers of this function set issuerNameID to be zero, which -// allows the CA to pick the issuer based on the CSR's PublicKeyAlgorithm. -func (ra *RegistrationAuthorityImpl) issueCertificate( - ctx context.Context, - req core.CertificateRequest, - acctID accountID, - oID orderID, - issuerNameID issuance.IssuerNameID) (core.Certificate, error) { - // Construct the log event - logEvent := certificateRequestEvent{ - ID: core.NewToken(), - OrderID: int64(oID), - Requester: int64(acctID), - RequestTime: ra.clk.Now(), + // There should never be an order with 0 identifiers at the stage, but we check to + // be on the safe side, throwing an internal server error if this assumption + // is ever violated. + if len(orderIdents) == 0 { + return nil, nil, berrors.InternalServerError("Order has no associated identifiers") } - beeline.AddFieldToTrace(ctx, "issuance.id", logEvent.ID) - beeline.AddFieldToTrace(ctx, "order.id", oID) - beeline.AddFieldToTrace(ctx, "acct.id", acctID) - var result string - cert, err := ra.issueCertificateInner(ctx, req, acctID, oID, issuerNameID, &logEvent) + + // Parse the CSR from the request + csr, err := x509.ParseCertificateRequest(req.Csr) if err != nil { - logEvent.Error = err.Error() - beeline.AddFieldToTrace(ctx, "issuance.error", err) - result = "error" - } else { - result = "successful" + return nil, nil, berrors.BadCSRError("unable to parse CSR: %s", err.Error()) } - logEvent.ResponseTime = ra.clk.Now() - ra.log.AuditObject(fmt.Sprintf("Certificate request - %s", result), logEvent) - return cert, err -} -// issueCertificateInner handles the heavy lifting aspects of certificate -// issuance. -// -// This function is responsible for ensuring that we never try to issue a final -// certificate twice for the same precertificate, because that has the potential -// to create certificates with duplicate serials. For instance, this could -// happen if final certificates were created with different sets of SCTs. This -// function accomplishes that by bailing on issuance if there is any error in -// IssueCertificateForPrecertificate; there are no retries, and serials are -// generated in IssuePrecertificate, so serials with errors are dropped and -// never have final certificates issued for them (because there is a possibility -// that the certificate was actually issued but there was an error returning -// it). -func (ra *RegistrationAuthorityImpl) issueCertificateInner( - ctx context.Context, - req core.CertificateRequest, - acctID accountID, - oID orderID, - issuerNameID issuance.IssuerNameID, - logEvent *certificateRequestEvent) (core.Certificate, error) { - emptyCert := core.Certificate{} - if acctID <= 0 { - return emptyCert, berrors.MalformedError("invalid account ID: %d", acctID) + if containsMustStaple(csr.Extensions) { + return nil, nil, berrors.UnauthorizedError( + "OCSP must-staple extension is no longer available: see https://letsencrypt.org/2024/12/05/ending-ocsp", + ) + } + + err = csrlib.VerifyCSR(ctx, csr, profile.maxNames, &ra.keyPolicy, ra.PA) + if err != nil { + // VerifyCSR returns berror instances that can be passed through as-is + // without wrapping. + return nil, nil, err } - if oID <= 0 { - return emptyCert, berrors.MalformedError("invalid order ID: %d", oID) + // Dedupe, lowercase and sort both the names from the CSR and the names in the + // order. + csrIdents := identifier.FromCSR(csr) + // Check that the order names and the CSR names are an exact match + if !slices.Equal(csrIdents, orderIdents) { + return nil, nil, berrors.UnauthorizedError("CSR does not specify same identifiers as Order") } - regPB, err := ra.SA.GetRegistration(ctx, &sapb.RegistrationID{Id: int64(acctID)}) + // Get the originating account for use in the next check. + regPB, err := ra.SA.GetRegistration(ctx, &sapb.RegistrationID{Id: req.Order.RegistrationID}) if err != nil { - return emptyCert, err + return nil, nil, err } + account, err := bgrpc.PbToRegistration(regPB) if err != nil { - return emptyCert, err + return nil, nil, err + } + + // Make sure they're not using their account key as the certificate key too. + if core.KeyDigestEquals(csr.PublicKey, account.Key) { + return nil, nil, berrors.MalformedError("certificate public key must be different than account key") + } + + // Double-check that all authorizations on this order are valid, are also + // associated with the same account as the order itself, and have recent CAA. + authzs, err := ra.checkOrderAuthorizations( + ctx, orderID(req.Order.Id), accountID(req.Order.RegistrationID), csrIdents, ra.clk.Now()) + if err != nil { + // Pass through the error without wrapping it because the called functions + // return BoulderError and we don't want to lose the type. + return nil, nil, err + } + + // Collect up identifierLogs to log validation information for each identifier. + logIdents := make([]identifierLog, 0) + for ident, authz := range authzs { + // We know that at least one challenge is valid, because this was just + // confirmed by ra.checkOrderAuthorizations. + var solvedChall core.Challenge + for _, chall := range authz.Challenges { + if chall.Status == core.StatusValid { + solvedChall = chall + break + } + } + logIdents = append(logIdents, identifierLog{ + Ident: ident, + Authz: authz.ID, + Challenge: solvedChall.Type, + Validated: *solvedChall.Validated, + }) + authzAge := (profile.validAuthzLifetime - authz.Expires.Sub(ra.clk.Now())).Seconds() + ra.authzAges.WithLabelValues("FinalizeOrder", string(authz.Status)).Observe(authzAge) } + logEvent.Identifiers = logIdents - csr := req.CSR - logEvent.CommonName = csr.Subject.CommonName - beeline.AddFieldToTrace(ctx, "csr.cn", csr.Subject.CommonName) - logEvent.Names = csr.DNSNames - beeline.AddFieldToTrace(ctx, "csr.dnsnames", csr.DNSNames) + // Mark that we verified the CN and SANs + logEvent.VerifiedFields = []string{"subject.commonName", "subjectAltName"} - // Validate that authorization key is authorized for all domains in the CSR - names := make([]string, len(csr.DNSNames)) - copy(names, csr.DNSNames) + return csr, authzs, nil +} - if core.KeyDigestEquals(csr.PublicKey, account.Key) { - return emptyCert, berrors.MalformedError("certificate public key must be different than account key") +func (ra *RegistrationAuthorityImpl) GetSCTs(ctx context.Context, sctRequest *rapb.SCTRequest) (*rapb.SCTResponse, error) { + scts, err := ra.getSCTs(ctx, sctRequest.PrecertDER) + if err != nil { + return nil, err } + return &rapb.SCTResponse{ + SctDER: scts, + }, nil +} - // Check rate limits before checking authorizations. If someone is unable to - // issue a cert due to rate limiting, we don't want to tell them to go get the - // necessary authorizations, only to later fail the rate limit check. - err = ra.checkLimits(ctx, names, account.ID) +// issueCertificateOuter exists solely to ensure that all calls to +// issueCertificateInner have their result handled uniformly, no matter what +// return path that inner function takes. It takes ownership of the logEvent, +// mutates it, and is responsible for outputting its final state. +func (ra *RegistrationAuthorityImpl) issueCertificateOuter( + ctx context.Context, + order *corepb.Order, + csr *x509.CertificateRequest, + authzs map[identifier.ACMEIdentifier]*core.Authorization, + logEvent certificateRequestEvent, +) (*corepb.Order, error) { + ra.inflightFinalizes.Inc() + defer ra.inflightFinalizes.Dec() + + idents := identifier.FromProtoSlice(order.Identifiers) + + isRenewal := false + timestamps, err := ra.SA.FQDNSetTimestampsForWindow(ctx, &sapb.CountFQDNSetsRequest{ + Identifiers: idents.ToProtoSlice(), + Window: durationpb.New(120 * 24 * time.Hour), + Limit: 1, + }) if err != nil { - return emptyCert, err + return nil, fmt.Errorf("checking if certificate is a renewal: %w", err) + } + if len(timestamps.Timestamps) > 0 { + isRenewal = true + logEvent.PreviousCertificateIssued = timestamps.Timestamps[0].AsTime() + } + + profileName := order.CertificateProfileName + if profileName == "" { + profileName = ra.profiles.defaultName } - // Check that this specific order is fully authorized and associated with - // the expected account ID - authzs, err := ra.checkOrderAuthorizations(ctx, names, acctID, oID) + // Step 3: Issue the Certificate + cert, err := ra.issueCertificateInner( + ctx, csr, authzs, isRenewal, profileName, accountID(order.RegistrationID), orderID(order.Id)) + + // Step 4: Fail the order if necessary, and update metrics and log fields + var result string if err != nil { - // Pass through the error without wrapping it because the called functions - // return BoulderError and we don't want to lose the type. - return emptyCert, err + // The problem is computed using `web.ProblemDetailsForError`, the same + // function the WFE uses to convert between `berrors` and problems. This + // will turn normal expected berrors like berrors.UnauthorizedError into the + // correct `urn:ietf:params:acme:error:unauthorized` problem while not + // letting anything like a server internal error through with sensitive + // info. + ra.failOrder(ctx, order, web.ProblemDetailsForError(err, "Error finalizing order")) + order.Status = string(core.StatusInvalid) + + logEvent.Error = err.Error() + result = "error" + } else { + order.CertificateSerial = core.SerialToString(cert.SerialNumber) + order.Status = string(core.StatusValid) + + ra.namesPerCert.With( + prometheus.Labels{"type": "issued"}, + ).Observe(float64(len(idents))) + + ra.newCertCounter.Inc() + + logEvent.SerialNumber = core.SerialToString(cert.SerialNumber) + logEvent.CommonName = cert.Subject.CommonName + logEvent.NotBefore = cert.NotBefore + logEvent.NotAfter = cert.NotAfter + logEvent.CertProfileName = profileName + + result = "successful" } - // Collect up a certificateRequestAuthz that stores the ID and challenge type - // of each of the valid authorizations we used for this issuance. - logEventAuthzs := make(map[string]certificateRequestAuthz, len(names)) - for name, authz := range authzs { - // If the authz has no solved by challenge type there has been an internal - // consistency violation worth logging a warning about. In this case the - // solvedByChallengeType will be logged as the empty string. - solvedByChallengeType, err := authz.SolvedBy() - if err != nil || solvedByChallengeType == nil { - ra.log.Warningf("Authz %q has status %q but empty SolvedBy(): %s", authz.ID, authz.Status, err) - } - logEventAuthzs[name] = certificateRequestAuthz{ - ID: authz.ID, - ChallengeType: *solvedByChallengeType, + logEvent.ResponseTime = ra.clk.Now() + ra.log.AuditInfo(fmt.Sprintf("Certificate request - %s", result), logEvent) + + return order, err +} + +// countCertificateIssued increments the certificates (per domain and per +// account) and duplicate certificate rate limits. There is no reason to surface +// errors from this function to the Subscriber, spends against these limit are +// best effort. +func (ra *RegistrationAuthorityImpl) countCertificateIssued(ctx context.Context, regId int64, orderIdents identifier.ACMEIdentifiers, isRenewal bool) { + var transactions []ratelimits.Transaction + if !isRenewal { + txns, err := ra.txnBuilder.CertificatesPerDomainSpendOnlyTransactions(regId, orderIdents) + if err != nil { + ra.log.Warningf("building rate limit transactions at finalize: %s", err) } + transactions = append(transactions, txns...) } - logEvent.Authorizations = logEventAuthzs - // Mark that we verified the CN and SANs - logEvent.VerifiedFields = []string{"subject.commonName", "subjectAltName"} + txn, err := ra.txnBuilder.CertificatesPerFQDNSetSpendOnlyTransaction(orderIdents) + if err != nil { + ra.log.Warningf("building rate limit transaction at finalize: %s", err) + } + transactions = append(transactions, txn) - // Create the certificate and log the result - issueReq := &capb.IssueCertificateRequest{ - Csr: csr.Raw, - RegistrationID: int64(acctID), - OrderID: int64(oID), - IssuerNameID: int64(issuerNameID), + _, err = ra.limiter.BatchSpend(ctx, transactions) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + ra.log.Warningf("spending against rate limits at finalize: %s", err) } +} +// issueCertificateInner is part of the [issuance cycle]. +// +// It gets a precertificate from the CA, submits it to CT logs to get SCTs, +// then sends the precertificate and the SCTs to the CA to get a final certificate. +// +// This function is responsible for ensuring that we never try to issue a final +// certificate twice for the same precertificate, because that has the potential +// to create certificates with duplicate serials. For instance, this could +// happen if final certificates were created with different sets of SCTs. This +// function accomplishes that by bailing on issuance if there is any error in +// IssueCertificateForPrecertificate; there are no retries, and serials are +// generated in IssuePrecertificate, so serials with errors are dropped and +// never have final certificates issued for them (because there is a possibility +// that the certificate was actually issued but there was an error returning +// it). +// +// [issuance cycle]: https://github.com/letsencrypt/boulder/blob/main/docs/ISSUANCE-CYCLE.md +func (ra *RegistrationAuthorityImpl) issueCertificateInner( + ctx context.Context, + csr *x509.CertificateRequest, + authzs map[identifier.ACMEIdentifier]*core.Authorization, + isRenewal bool, + profileName string, + acctID accountID, + oID orderID) (*x509.Certificate, error) { // wrapError adds a prefix to an error. If the error is a boulder error then // the problem detail is updated with the prefix. Otherwise a new error is // returned with the message prefixed using `fmt.Errorf` @@ -1177,74 +1306,70 @@ func (ra *RegistrationAuthorityImpl) issueCertificateInner( return fmt.Errorf("%s: %s", prefix, e) } - precert, err := ra.CA.IssuePrecertificate(ctx, issueReq) - if err != nil { - return emptyCert, wrapError(err, "issuing precertificate") - } - parsedPrecert, err := x509.ParseCertificate(precert.DER) - if err != nil { - return emptyCert, wrapError(err, "parsing precertificate") + if features.Get().CAARechecksFailOrder { + // Check that the authzs either don't need CAA rechecking, or do the + // necessary CAA rechecks right now. + err := ra.checkAuthorizationsCAA(ctx, int64(acctID), authzs, ra.clk.Now()) + if err != nil { + return nil, err + } } - scts, err := ra.getSCTs(ctx, precert.DER, parsedPrecert.NotAfter) - if err != nil { - return emptyCert, wrapError(err, "getting SCTs") + + issueReq := &capb.IssueCertificateRequest{ + Csr: csr.Raw, + RegistrationID: int64(acctID), + OrderID: int64(oID), + CertProfileName: profileName, } - cert, err := ra.CA.IssueCertificateForPrecertificate(ctx, &capb.IssueCertificateForPrecertificateRequest{ - DER: precert.DER, - SCTs: scts, - RegistrationID: int64(acctID), - OrderID: int64(oID), - }) + + resp, err := ra.CA.IssueCertificate(ctx, issueReq) if err != nil { - return emptyCert, wrapError(err, "issuing certificate for precertificate") + return nil, err } - parsedCertificate, err := x509.ParseCertificate([]byte(cert.Der)) + parsedCertificate, err := x509.ParseCertificate(resp.DER) if err != nil { - // berrors.InternalServerError because the certificate from the CA should be - // parseable. - return emptyCert, berrors.InternalServerError("failed to parse certificate: %s", err.Error()) + return nil, wrapError(err, "parsing final certificate") } + ra.countCertificateIssued(ctx, int64(acctID), identifier.FromCert(parsedCertificate), isRenewal) + // Asynchronously submit the final certificate to any configured logs - go ra.ctpolicy.SubmitFinalCert(cert.Der, parsedCertificate.NotAfter) + go ra.ctpolicy.SubmitFinalCert(resp.DER, parsedCertificate.NotAfter) - err = ra.MatchesCSR(parsedCertificate, csr) + err = ra.matchesCSR(parsedCertificate, csr) if err != nil { - return emptyCert, err + ra.certCSRMismatch.Inc() + return nil, err } - logEvent.SerialNumber = core.SerialToString(parsedCertificate.SerialNumber) - beeline.AddFieldToTrace(ctx, "cert.serial", core.SerialToString(parsedCertificate.SerialNumber)) - logEvent.CommonName = parsedCertificate.Subject.CommonName - beeline.AddFieldToTrace(ctx, "cert.cn", parsedCertificate.Subject.CommonName) - logEvent.NotBefore = parsedCertificate.NotBefore - beeline.AddFieldToTrace(ctx, "cert.not_before", parsedCertificate.NotBefore) - logEvent.NotAfter = parsedCertificate.NotAfter - beeline.AddFieldToTrace(ctx, "cert.not_after", parsedCertificate.NotAfter) - - ra.newCertCounter.Inc() - res, err := bgrpc.PBToCert(cert) + _, err = ra.SA.FinalizeOrder(ctx, &sapb.FinalizeOrderRequest{ + Id: int64(oID), + CertificateSerial: core.SerialToString(parsedCertificate.SerialNumber), + }) if err != nil { - return emptyCert, nil + return nil, wrapError(err, "persisting finalized order") } - return res, nil + + return parsedCertificate, nil } -func (ra *RegistrationAuthorityImpl) getSCTs(ctx context.Context, cert []byte, expiration time.Time) (core.SCTDERs, error) { +func (ra *RegistrationAuthorityImpl) getSCTs(ctx context.Context, precertDER []byte) (core.SCTDERs, error) { started := ra.clk.Now() - scts, err := ra.ctpolicy.GetSCTs(ctx, cert, expiration) + precert, err := x509.ParseCertificate(precertDER) + if err != nil { + return nil, fmt.Errorf("parsing precertificate: %w", err) + } + + scts, err := ra.ctpolicy.GetSCTs(ctx, precertDER, precert.NotAfter) took := ra.clk.Since(started) - // The final cert has already been issued so actually return it to the - // user even if this fails since we aren't actually doing anything with - // the SCTs yet. if err != nil { state := "failure" if err == context.DeadlineExceeded { state = "deadlineExceeded" // Convert the error to a missingSCTsError to communicate the timeout, // otherwise it will be a generic serverInternalError - err = berrors.MissingSCTsError(err.Error()) + err = berrors.MissingSCTsError("failed to get SCTs: %s", err.Error()) } ra.log.Warningf("ctpolicy.GetSCTs failed: %s", err) ra.ctpolicyResults.With(prometheus.Labels{"result": state}).Observe(took.Seconds()) @@ -1254,315 +1379,104 @@ func (ra *RegistrationAuthorityImpl) getSCTs(ctx context.Context, cert []byte, e return scts, nil } -// domainsForRateLimiting transforms a list of FQDNs into a list of eTLD+1's -// for the purpose of rate limiting. It also de-duplicates the output -// domains. Exact public suffix matches are included. -func domainsForRateLimiting(names []string) ([]string, error) { - var domains []string - for _, name := range names { - domain, err := publicsuffix.Domain(name) - if err != nil { - // The only possible errors are: - // (1) publicsuffix.Domain is giving garbage values - // (2) the public suffix is the domain itself - // We assume 2 and include the original name in the result. - domains = append(domains, name) - } else { - domains = append(domains, domain) - } - } - return core.UniqueLowerNames(domains), nil -} - -// enforceNameCounts uses the provided count RPC to find a count of certificates -// for each of the names. If the count for any of the names exceeds the limit -// for the given registration then the names out of policy are returned to be -// used for a rate limit error. -func (ra *RegistrationAuthorityImpl) enforceNameCounts(ctx context.Context, names []string, limit ratelimit.RateLimitPolicy, regID int64) ([]string, error) { - now := ra.clk.Now() - req := &sapb.CountCertificatesByNamesRequest{ - Names: names, - Range: &sapb.Range{ - Earliest: limit.WindowBegin(now).UnixNano(), - Latest: now.UnixNano(), - }, +// UpdateRegistrationKey updates an existing Registration's key. +func (ra *RegistrationAuthorityImpl) UpdateRegistrationKey(ctx context.Context, req *rapb.UpdateRegistrationKeyRequest) (*corepb.Registration, error) { + if core.IsAnyNilOrZero(req.RegistrationID, req.Jwk) { + return nil, errIncompleteGRPCRequest } - response, err := ra.SA.CountCertificatesByNames(ctx, req) + update, err := ra.SA.UpdateRegistrationKey(ctx, &sapb.UpdateRegistrationKeyRequest{ + RegistrationID: req.RegistrationID, + Jwk: req.Jwk, + }) if err != nil { - return nil, err - } - - if len(response.Counts) == 0 { - return nil, errIncompleteGRPCResponse + return nil, fmt.Errorf("failed to update registration key: %w", err) } - var badNames []string - // Find the names that have counts at or over the threshold. Range - // over the names slice input to ensure the order of badNames will - // return the badNames in the same order they were input. - for _, name := range names { - if response.Counts[name] >= limit.GetThreshold(name, regID) { - badNames = append(badNames, name) - } - } - return badNames, nil + return update, nil } -func (ra *RegistrationAuthorityImpl) checkCertificatesPerNameLimit(ctx context.Context, names []string, limit ratelimit.RateLimitPolicy, regID int64) error { - // check if there is already an existing certificate for - // the exact name set we are issuing for. If so bypass the - // the certificatesPerName limit. - exists, err := ra.SA.FQDNSetExists(ctx, &sapb.FQDNSetExistsRequest{Domains: names}) - if err != nil { - return fmt.Errorf("checking renewal exemption for %q: %s", names, err) - } - if exists.Exists { - ra.rateLimitCounter.WithLabelValues("certificates_for_domain", "FQDN set bypass").Inc() - return nil - } - - tldNames, err := domainsForRateLimiting(names) +// countFailedValidations increments the FailedAuthorizationsPerDomainPerAccount limit. +// and the FailedAuthorizationsForPausingPerDomainPerAccountTransaction limit. +func (ra *RegistrationAuthorityImpl) countFailedValidations(ctx context.Context, regId int64, ident identifier.ACMEIdentifier) error { + txn, err := ra.txnBuilder.FailedAuthorizationsPerDomainPerAccountSpendOnlyTransaction(regId, ident) if err != nil { - return err - } - - namesOutOfLimit, err := ra.enforceNameCounts(ctx, tldNames, limit, regID) - if err != nil { - return fmt.Errorf("checking certificates per name limit for %q: %s", - names, err) - } - - if len(namesOutOfLimit) > 0 { - // check if there is already an existing certificate for - // the exact name set we are issuing for. If so bypass the - // the certificatesPerName limit. - exists, err := ra.SA.FQDNSetExists(ctx, &sapb.FQDNSetExistsRequest{Domains: names}) - if err != nil { - return fmt.Errorf("checking renewal exemption for %q: %s", names, err) - } - if exists.Exists { - ra.rateLimitCounter.WithLabelValues("certificates_for_domain", "FQDN set bypass").Inc() - return nil - } - - ra.log.Infof("Rate limit exceeded, CertificatesForDomain, regID: %d, domains: %s", regID, strings.Join(namesOutOfLimit, ", ")) - ra.rateLimitCounter.WithLabelValues("certificates_for_domain", "exceeded").Inc() - if len(namesOutOfLimit) > 1 { - var subErrors []berrors.SubBoulderError - for _, name := range namesOutOfLimit { - subErrors = append(subErrors, berrors.SubBoulderError{ - Identifier: identifier.DNSIdentifier(name), - BoulderError: berrors.RateLimitError("too many certificates already issued").(*berrors.BoulderError), - }) - } - return berrors.RateLimitError("too many certificates already issued for multiple names (%s and %d others)", namesOutOfLimit[0], len(namesOutOfLimit)).(*berrors.BoulderError).WithSubErrors(subErrors) - } - return berrors.RateLimitError("too many certificates already issued for: %s", namesOutOfLimit[0]) + return fmt.Errorf("building rate limit transaction for the %s rate limit: %w", ratelimits.FailedAuthorizationsPerDomainPerAccount, err) } - ra.rateLimitCounter.WithLabelValues("certificates_for_domain", "pass").Inc() - - return nil -} -func (ra *RegistrationAuthorityImpl) checkCertificatesPerFQDNSetLimit(ctx context.Context, names []string, limit ratelimit.RateLimitPolicy, regID int64) error { - count, err := ra.SA.CountFQDNSets(ctx, &sapb.CountFQDNSetsRequest{ - Domains: names, - Window: limit.Window.Duration.Nanoseconds(), - }) + _, err = ra.limiter.Spend(ctx, txn) if err != nil { - return fmt.Errorf("checking duplicate certificate limit for %q: %s", names, err) + return fmt.Errorf("spending against the %s rate limit: %w", ratelimits.FailedAuthorizationsPerDomainPerAccount, err) } - names = core.UniqueLowerNames(names) - threshold := limit.GetThreshold(strings.Join(names, ","), regID) - if count.Count >= threshold { - return berrors.RateLimitError( - "too many certificates (%d) already issued for this exact set of domains in the last %.0f hours: %s", - threshold, limit.Window.Duration.Hours(), strings.Join(names, ","), - ) - } - return nil -} -func (ra *RegistrationAuthorityImpl) checkLimits(ctx context.Context, names []string, regID int64) error { - certNameLimits := ra.rlPolicies.CertificatesPerName() - if certNameLimits.Enabled() { - err := ra.checkCertificatesPerNameLimit(ctx, names, certNameLimits, regID) + if features.Get().AutomaticallyPauseZombieClients { + txn, err = ra.txnBuilder.FailedAuthorizationsForPausingPerDomainPerAccountTransaction(regId, ident) if err != nil { - return err + return fmt.Errorf("building rate limit transaction for the %s rate limit: %w", ratelimits.FailedAuthorizationsForPausingPerDomainPerAccount, err) } - } - fqdnFastLimits := ra.rlPolicies.CertificatesPerFQDNSetFast() - if fqdnFastLimits.Enabled() { - err := ra.checkCertificatesPerFQDNSetLimit(ctx, names, fqdnFastLimits, regID) + decision, err := ra.limiter.Spend(ctx, txn) if err != nil { - return err + return fmt.Errorf("spending against the %s rate limit: %s", ratelimits.FailedAuthorizationsForPausingPerDomainPerAccount, err) } - } - fqdnLimits := ra.rlPolicies.CertificatesPerFQDNSet() - if fqdnLimits.Enabled() { - err := ra.checkCertificatesPerFQDNSetLimit(ctx, names, fqdnLimits, regID) - if err != nil { - return err + if decision.Result(ra.clk.Now()) != nil { + resp, err := ra.SA.PauseIdentifiers(ctx, &sapb.PauseRequest{ + RegistrationID: regId, + Identifiers: []*corepb.Identifier{ident.ToProto()}, + }) + if err != nil { + return fmt.Errorf("failed to pause %d/%q: %w", regId, ident.Value, err) + } + ra.pauseCounter.With(prometheus.Labels{ + "paused": strconv.FormatBool(resp.Paused > 0), + "repaused": strconv.FormatBool(resp.Repaused > 0), + "grace": strconv.FormatBool(resp.Paused <= 0 && resp.Repaused <= 0), + }).Inc() } } return nil } -// UpdateRegistration updates an existing Registration with new values. Caller -// is responsible for making sure that update.Key is only different from base.Key -// if it is being called from the WFE key change endpoint. -// TODO(#5554): Split this into separate methods for updating Contacts vs Key. -func (ra *RegistrationAuthorityImpl) UpdateRegistration(ctx context.Context, req *rapb.UpdateRegistrationRequest) (*corepb.Registration, error) { - // Error if the request is nil, there is no account key or IP address - if req.Base == nil || len(req.Base.Key) == 0 || len(req.Base.InitialIP) == 0 || req.Base.Id == 0 { - return nil, errIncompleteGRPCRequest - } - - err := validateContactsPresent(req.Base.Contact, req.Base.ContactsPresent) - if err != nil { - return nil, err - } - err = validateContactsPresent(req.Update.Contact, req.Update.ContactsPresent) - if err != nil { - return nil, err - } - err = ra.validateContacts(ctx, req.Update.Contact) +// resetAccountPausingLimit resets bucket to maximum capacity for given account. +// There is no reason to surface errors from this function to the Subscriber. +func (ra *RegistrationAuthorityImpl) resetAccountPausingLimit(ctx context.Context, regId int64, ident identifier.ACMEIdentifier) { + txns, err := ra.txnBuilder.NewPausingResetTransactions(regId, ident) if err != nil { - return nil, err - } - - update, changed := mergeUpdate(req.Base, req.Update) - if !changed { - // If merging the update didn't actually change the base then our work is - // done, we can return before calling ra.SA.UpdateRegistration since there's - // nothing for the SA to do - return req.Base, nil + ra.log.Warningf("building reset transaction for regID=[%d] identifier=[%s]: %s", regId, ident.Value, err) + return } - _, err = ra.SA.UpdateRegistration(ctx, update) + err = ra.limiter.BatchReset(ctx, txns) if err != nil { - // berrors.InternalServerError since the user-data was validated before being - // passed to the SA. - err = berrors.InternalServerError("Could not update registration: %s", err) - return nil, err + ra.log.Warningf("resetting bucket for regID=[%d] identifier=[%s]: %s", regId, ident.Value, err) } - - return update, nil } -func contactsEqual(a []string, b []string) bool { - if len(a) != len(b) { - return false +// doDCVAndCAA performs DCV and CAA checks sequentially: DCV is performed first +// and CAA is only checked if DCV is successful. Validation records from the DCV +// check are returned even if the CAA check fails. +func (ra *RegistrationAuthorityImpl) checkDCVAndCAA(ctx context.Context, dcvReq *vapb.PerformValidationRequest, caaReq *vapb.IsCAAValidRequest) (*corepb.ProblemDetails, []*corepb.ValidationRecord, error) { + doDCVRes, err := ra.VA.DoDCV(ctx, dcvReq) + if err != nil { + return nil, nil, err } - - // If there is an existing contact slice and it has the same length as the - // new contact slice we need to look at each contact to determine if there - // is a change being made. Use `sort.Strings` here to ensure a consistent - // comparison - sort.Strings(a) - sort.Strings(b) - for i := 0; i < len(b); i++ { - // If the contact's string representation differs at any index they aren't - // equal - if a[i] != b[i] { - return false - } + if doDCVRes.Problem != nil { + return doDCVRes.Problem, doDCVRes.Records, nil } - // They are equal! - return true -} - -// MergeUpdate returns a new corepb.Registration with the majority of its fields -// copies from the base Registration, and a subset (Contact, Agreement, and Key) -// copied from the update Registration. It also returns a boolean indicating -// whether or not this operation resulted in a Registration which differs from -// the base. -func mergeUpdate(base *corepb.Registration, update *corepb.Registration) (*corepb.Registration, bool) { - var changed bool - - // Start by copying all of the fields. - res := &corepb.Registration{ - Id: base.Id, - Key: base.Key, - Contact: base.Contact, - ContactsPresent: base.ContactsPresent, - Agreement: base.Agreement, - InitialIP: base.InitialIP, - CreatedAt: base.CreatedAt, - Status: base.Status, - } - - // Note: we allow update.Contact to overwrite base.Contact even if the former - // is empty in order to allow users to remove the contact associated with - // a registration. If the update has ContactsPresent set to false, then we - // know it is not attempting to update the contacts field. - if update.ContactsPresent && !contactsEqual(base.Contact, update.Contact) { - res.Contact = update.Contact - res.ContactsPresent = update.ContactsPresent - changed = true - } - - if len(update.Agreement) > 0 && update.Agreement != base.Agreement { - res.Agreement = update.Agreement - changed = true - } - - if len(update.Key) > 0 { - if len(update.Key) != len(base.Key) { - res.Key = update.Key - changed = true - } else { - for i := 0; i < len(base.Key); i++ { - if update.Key[i] != base.Key[i] { - res.Key = update.Key - changed = true - break - } - } + switch identifier.FromProto(dcvReq.Identifier).Type { + case identifier.TypeDNS: + doCAAResp, err := ra.VA.DoCAA(ctx, caaReq) + if err != nil { + return nil, nil, err } + return doCAAResp.Problem, doDCVRes.Records, nil + case identifier.TypeIP: + return nil, doDCVRes.Records, nil + default: + return nil, nil, berrors.MalformedError("invalid identifier type: %s", dcvReq.Identifier.Type) } - - return res, changed -} - -// recordValidation records an authorization validation event, -// it should only be used on v2 style authorizations. -func (ra *RegistrationAuthorityImpl) recordValidation(ctx context.Context, authID string, authExpires *time.Time, challenge *core.Challenge) error { - authzID, err := strconv.ParseInt(authID, 10, 64) - if err != nil { - return err - } - var expires int64 - if challenge.Status == core.StatusInvalid { - expires = authExpires.UnixNano() - } else { - expires = ra.clk.Now().Add(ra.authorizationLifetime).UnixNano() - } - vr, err := bgrpc.ValidationResultToPB(challenge.ValidationRecord, challenge.Error) - if err != nil { - return err - } - var validated int64 - if challenge.Validated != nil { - validated = challenge.Validated.UTC().UnixNano() - } - _, err = ra.SA.FinalizeAuthorization2(ctx, &sapb.FinalizeAuthorizationRequest{ - Id: authzID, - Status: string(challenge.Status), - Expires: expires, - Attempted: string(challenge.Type), - AttemptedAt: validated, - ValidationRecords: vr.Records, - ValidationError: vr.Problems, - }) - if err != nil { - return err - } - return nil } // PerformValidation initiates validation for a specific challenge associated @@ -1571,11 +1485,10 @@ func (ra *RegistrationAuthorityImpl) recordValidation(ctx context.Context, authI func (ra *RegistrationAuthorityImpl) PerformValidation( ctx context.Context, req *rapb.PerformValidationRequest) (*corepb.Authorization, error) { - // Clock for start of PerformValidation. vStart := ra.clk.Now() - if req.Authz == nil || req.Authz.Id == "" || req.Authz.Identifier == "" || req.Authz.Status == "" || req.Authz.Expires == 0 { + if core.IsAnyNilOrZero(req.Authz, req.Authz.Id, req.Authz.Identifier, req.Authz.Status, req.Authz.Expires) { return nil, errIncompleteGRPCRequest } @@ -1583,16 +1496,24 @@ func (ra *RegistrationAuthorityImpl) PerformValidation( if err != nil { return nil, err } + authzID, err := strconv.ParseInt(authz.ID, 10, 64) + if err != nil { + return nil, err + } // Refuse to update expired authorizations if authz.Expires == nil || authz.Expires.Before(ra.clk.Now()) { return nil, berrors.MalformedError("expired authorization") } + profile, err := ra.profiles.get(authz.CertificateProfileName) + if err != nil { + return nil, err + } + challIndex := int(req.ChallengeIndex) if challIndex >= len(authz.Challenges) { - return nil, - berrors.MalformedError("invalid challenge index '%d'", challIndex) + return nil, berrors.MalformedError("invalid challenge index '%d'", challIndex) } ch := &authz.Challenges[challIndex] @@ -1602,12 +1523,11 @@ func (ra *RegistrationAuthorityImpl) PerformValidation( return nil, berrors.MalformedError("challenge type %q no longer allowed", ch.Type) } - // When configured with `reuseValidAuthz` we can expect some clients to try - // and update a challenge for an authorization that is already valid. In this - // case we don't need to process the challenge update. It wouldn't be helpful, - // the overall authorization is already good! We increment a stat for this - // case and return early. - if ra.reuseValidAuthz && authz.Status == core.StatusValid { + // We expect some clients to try and update a challenge for an authorization + // that is already valid. In this case we don't need to process the + // challenge update. It wouldn't be helpful, the overall authorization is + // already good! We return early for the valid authz reuse case. + if authz.Status == core.StatusValid { return req.Authz, nil } @@ -1615,225 +1535,108 @@ func (ra *RegistrationAuthorityImpl) PerformValidation( return nil, berrors.MalformedError("authorization must be pending") } - // Look up the account key for this authorization + // Compute the key authorization field based on the registration key regPB, err := ra.SA.GetRegistration(ctx, &sapb.RegistrationID{Id: authz.RegistrationID}) if err != nil { - return nil, berrors.InternalServerError(err.Error()) + return nil, berrors.InternalServerError("getting acct for authorization: %s", err.Error()) } reg, err := bgrpc.PbToRegistration(regPB) if err != nil { - return nil, berrors.InternalServerError(err.Error()) + return nil, berrors.InternalServerError("getting acct for authorization: %s", err.Error()) } - - // Compute the key authorization field based on the registration key expectedKeyAuthorization, err := ch.ExpectedKeyAuthorization(reg.Key) if err != nil { return nil, berrors.InternalServerError("could not compute expected key authorization value") } - // Populate the ProvidedKeyAuthorization such that the VA can confirm the - // expected vs actual without needing the registration key. Historically this - // was done with the value from the challenge response and so the field name - // is called "ProvidedKeyAuthorization", in reality this is just - // "KeyAuthorization". - // TODO(@cpu): Rename ProvidedKeyAuthorization to KeyAuthorization - ch.ProvidedKeyAuthorization = expectedKeyAuthorization - // Double check before sending to VA - if cErr := ch.CheckConsistencyForValidation(); cErr != nil { - return nil, berrors.MalformedError(cErr.Error()) + if cErr := ch.CheckPending(); cErr != nil { + return nil, berrors.MalformedError("cannot validate challenge: %s", cErr.Error()) } // Dispatch to the VA for service - vaCtx := context.Background() - go func(authz core.Authorization) { - // We will mutate challenges later in this goroutine to change status and - // add error, but we also return a copy of authz immediately. To avoid a - // data race, make a copy of the challenges slice here for mutation. - challenges := make([]core.Challenge, len(authz.Challenges)) - copy(challenges, authz.Challenges) - authz.Challenges = challenges - chall, _ := bgrpc.ChallengeToPB(authz.Challenges[challIndex]) - - req := vapb.PerformValidationRequest{ - Domain: authz.Identifier.Value, - Challenge: chall, - Authz: &vapb.AuthzMeta{ - Id: authz.ID, - RegID: authz.RegistrationID, + ra.drainWG.Go(func() { + ctx := context.WithoutCancel(ctx) + + prob, records, err := ra.checkDCVAndCAA( + ctx, + &vapb.PerformValidationRequest{ + Identifier: authz.Identifier.ToProto(), + Challenge: &corepb.Challenge{Type: string(ch.Type), Status: string(ch.Status), Token: ch.Token}, + Authz: &vapb.AuthzMeta{Id: authz.ID, RegID: authz.RegistrationID}, + ExpectedKeyAuthorization: expectedKeyAuthorization, }, - } - res, err := ra.VA.PerformValidation(vaCtx, &req) - - challenge := &authz.Challenges[challIndex] - var prob *probs.ProblemDetails - + &vapb.IsCAAValidRequest{ + Identifier: authz.Identifier.ToProto(), + ValidationMethod: string(ch.Type), + AccountURIID: authz.RegistrationID, + AuthzID: authz.ID, + }, + ) if err != nil { - prob = probs.ServerInternal("Could not communicate with VA") - ra.log.AuditErrf("Could not communicate with VA: %s", err) - } else { - if res.Problems != nil { - prob, err = bgrpc.PBToProblemDetails(res.Problems) - if err != nil { - prob = probs.ServerInternal("Could not communicate with VA") - ra.log.AuditErrf("Could not communicate with VA: %s", err) - } - } - - // Save the updated records - records := make([]core.ValidationRecord, len(res.Records)) - for i, r := range res.Records { - records[i], err = bgrpc.PBToValidationRecord(r) - if err != nil { - prob = probs.ServerInternal("Records for validation corrupt") - } - } - challenge.ValidationRecord = records - } - - if !challenge.RecordsSane() && prob == nil { - prob = probs.ServerInternal("Records for validation failed sanity check") + prob = bgrpc.ProblemDetailsToPB(probs.ServerInternal("Could not communicate with VA")) + ra.log.Errf("Failed to communicate with VA: %s", err) } + var status core.AcmeStatus + var expires time.Time if prob != nil { - challenge.Status = core.StatusInvalid - challenge.Error = prob + status = core.StatusInvalid + expires = *authz.Expires + err := ra.countFailedValidations(ctx, authz.RegistrationID, authz.Identifier) + if err != nil { + ra.log.Warningf("incrementing failed validations: %s", err) + } } else { - challenge.Status = core.StatusValid - } - challenge.Validated = &vStart - authz.Challenges[challIndex] = *challenge - - err = ra.recordValidation(vaCtx, authz.ID, authz.Expires, challenge) - if err != nil { - ra.log.AuditErrf("Could not record updated validation: err=[%s] regID=[%d] authzID=[%s]", - err, authz.RegistrationID, authz.ID) + status = core.StatusValid + expires = ra.clk.Now().Add(profile.validAuthzLifetime) + if features.Get().AutomaticallyPauseZombieClients { + ra.resetAccountPausingLimit(ctx, authz.RegistrationID, authz.Identifier) + } } - }(authz) - return bgrpc.AuthzToPB(authz) -} -func revokeEvent(state, serial, cn string, names []string, revocationCode revocation.Reason) string { - return fmt.Sprintf( - "Revocation - State: %s, Serial: %s, CN: %s, DNS Names: %s, Reason: %s", - state, - serial, - cn, - names, - revocation.ReasonToString[revocationCode], - ) -} - -// deprecatedRevokeCertificate generates a revoked OCSP response for the given certificate, stores -// the revocation information, and purges OCSP request URLs from Akamai. -// DEPRECATED: Used only by RevokeCertificateWithReg, which is itself deprecated. -func (ra *RegistrationAuthorityImpl) deprecatedRevokeCertificate(ctx context.Context, cert *x509.Certificate, reason revocation.Reason, revokedBy int64, source string, comment string, skipBlockKey bool) error { - serial := core.SerialToString(cert.SerialNumber) - - var issuerID int64 - var issuer *issuance.Certificate - var ok bool - if cert.Raw == nil { - // We've been given a synthetic cert containing just a serial number, - // presumably because the cert we're revoking is so badly malformed that - // it is unparsable. We need to gather the relevant info using only the - // serial number. - if reason == ocsp.KeyCompromise { - return fmt.Errorf("cannot revoke for KeyCompromise without full cert") - } - - status, err := ra.SA.GetCertificateStatus(ctx, &sapb.Serial{Serial: serial}) + _, err = ra.SA.FinalizeAuthorization2(ctx, &sapb.FinalizeAuthorizationRequest{ + Id: authzID, + Status: string(status), + Expires: timestamppb.New(expires), + Attempted: string(ch.Type), + AttemptedAt: timestamppb.New(vStart), + ValidationRecords: records, + ValidationError: prob, + }) if err != nil { - return fmt.Errorf("unable to confirm that serial %q was ever issued: %w", serial, err) - } - - issuerID = status.IssuerID - issuer, ok = ra.issuersByNameID[issuance.IssuerNameID(issuerID)] - if !ok { - // TODO(#5152): Remove this fallback to old-style IssuerIDs. - issuer, ok = ra.issuersByID[issuance.IssuerID(issuerID)] - if !ok { - return fmt.Errorf("unable to identify issuer of serial %q", serial) + if errors.Is(err, berrors.NotFound) { + // We log NotFound at a lower level because this is largely due to a + // parallel-validation race: a different validation attempt has already + // updated this authz, so we failed to find a *pending* authz with the + // given ID to update. + ra.log.InfoObject("Failed to record validation (likely parallel validation race)", map[string]any{ + "requester": authz.RegistrationID, + "authz": authz.ID, + "error": err.Error(), + }) + } else { + ra.log.AuditErr("Failed to record validation (likely parallel validation race)", err, map[string]any{ + "requester": authz.RegistrationID, + "authz": authz.ID, + }) } } - } else { - issuerID = int64(issuance.GetIssuerNameID(cert)) - issuer, ok = ra.issuersByNameID[issuance.IssuerNameID(issuerID)] - if !ok { - return fmt.Errorf("unable to identify issuer of cert with serial %q", serial) - } - } - - revokedAt := ra.clk.Now().UnixNano() - ocspResponse, err := ra.CA.GenerateOCSP(ctx, &capb.GenerateOCSPRequest{ - Serial: serial, - IssuerID: issuerID, - Status: string(core.OCSPStatusRevoked), - Reason: int32(reason), - RevokedAt: revokedAt, - }) - if err != nil { - return err - } - - _, err = ra.SA.RevokeCertificate(ctx, &sapb.RevokeCertificateRequest{ - Serial: serial, - Reason: int64(reason), - Date: revokedAt, - Response: ocspResponse.Response, - IssuerID: issuerID, }) - if err != nil { - return err - } - if reason == ocsp.KeyCompromise && !skipBlockKey { - digest, err := core.KeyDigest(cert.PublicKey) - if err != nil { - return err - } - req := &sapb.AddBlockedKeyRequest{ - KeyHash: digest[:], - Added: revokedAt, - Source: source, - } - if comment != "" { - req.Comment = comment - } - if features.Enabled(features.StoreRevokerInfo) && revokedBy != 0 { - req.RevokedBy = revokedBy - } - if _, err = ra.SA.AddBlockedKey(ctx, req); err != nil { - return err - } - } - - purgeURLs, err := akamai.GeneratePurgeURLs(cert, issuer.Certificate) - if err != nil { - return err - } - _, err = ra.purger.Purge(ctx, &akamaipb.PurgeRequest{Urls: purgeURLs}) - if err != nil { - return err - } - - return nil + // Because Authorizations do not have a "processing" state like Orders do, + // a client POSTing to a Challenge URL does not result in any state changes + // for the Authorization itself. Therefore we just return the exact same authz + // as we started with. + return req.Authz, nil } -// revokeCertificate generates a revoked OCSP response for the certificate with -// the given serial and issuer and stores that response in the database. -// TODO(#5152) make the issuerID argument an issuance.IssuerNameID -func (ra *RegistrationAuthorityImpl) revokeCertificate(ctx context.Context, serial *big.Int, issuerID int64, reason revocation.Reason) error { - serialString := core.SerialToString(serial) - revokedAt := ra.clk.Now().UnixNano() - - ocspResponse, err := ra.CA.GenerateOCSP(ctx, &capb.GenerateOCSPRequest{ - Serial: serialString, - IssuerID: int64(issuerID), - Status: string(core.OCSPStatusRevoked), - Reason: int32(reason), - RevokedAt: revokedAt, - }) +// revokeCertificate updates the database to mark the certificate as revoked, +// with the given reason and current timestamp. +func (ra *RegistrationAuthorityImpl) revokeCertificate(ctx context.Context, cert *x509.Certificate, reason revocation.Reason) error { + serialString := core.SerialToString(cert.SerialNumber) + issuerID := issuance.IssuerNameID(cert) + shardIdx, err := crlShard(cert) if err != nil { return err } @@ -1841,28 +1644,23 @@ func (ra *RegistrationAuthorityImpl) revokeCertificate(ctx context.Context, seri _, err = ra.SA.RevokeCertificate(ctx, &sapb.RevokeCertificateRequest{ Serial: serialString, Reason: int64(reason), - Date: revokedAt, - Response: ocspResponse.Response, + Date: timestamppb.New(ra.clk.Now()), IssuerID: int64(issuerID), + ShardIdx: shardIdx, }) if err != nil { return err } - ra.revocationReasonCounter.WithLabelValues(revocation.ReasonToString[reason]).Inc() + ra.revocationReasonCounter.WithLabelValues(reason.String()).Inc() return nil } -// updateRevocationForKeyCompromise generates a revoked OCSP response for the -// already-revoked certificate with the given serial and issuer, and stores that -// response in the database. This only works for certificates that were -// previously revoked for a reason other than keyCompromise, and which are now -// being updated to keyCompromise instead. -// TODO(#5152) make the issuerID argument an issuance.IssuerNameID -func (ra *RegistrationAuthorityImpl) updateRevocationForKeyCompromise(ctx context.Context, serial *big.Int, issuerID int64) error { - serialString := core.SerialToString(serial) - thisUpdate := ra.clk.Now().UnixNano() - +// updateRevocationForKeyCompromise updates the database to mark the certificate +// as revoked, with the given reason and current timestamp. This only works for +// certificates that were previously revoked for a reason other than +// keyCompromise, and which are now being updated to keyCompromise instead. +func (ra *RegistrationAuthorityImpl) updateRevocationForKeyCompromise(ctx context.Context, serialString string, issuerID issuance.NameID) error { status, err := ra.SA.GetCertificateStatus(ctx, &sapb.Serial{Serial: serialString}) if err != nil { return berrors.NotFoundError("unable to confirm that serial %q was ever issued: %s", serialString, err) @@ -1873,94 +1671,59 @@ func (ra *RegistrationAuthorityImpl) updateRevocationForKeyCompromise(ctx contex // unless the cert was already revoked. return fmt.Errorf("unable to re-revoke serial %q which is not currently revoked", serialString) } - if status.RevokedReason == ocsp.KeyCompromise { + if revocation.Reason(status.RevokedReason) == revocation.KeyCompromise { return berrors.AlreadyRevokedError("unable to re-revoke serial %q which is already revoked for keyCompromise", serialString) } - // The new OCSP response has to be back-dated to the original date. - ocspResponse, err := ra.CA.GenerateOCSP(ctx, &capb.GenerateOCSPRequest{ - Serial: serialString, - IssuerID: int64(issuerID), - Status: string(core.OCSPStatusRevoked), - Reason: int32(ocsp.KeyCompromise), - RevokedAt: status.RevokedDate, - }) + cert, err := ra.SA.GetCertificate(ctx, &sapb.Serial{Serial: serialString}) if err != nil { - return err + return berrors.NotFoundError("unable to confirm that serial %q was ever issued: %s", serialString, err) } - - _, err = ra.SA.UpdateRevokedCertificate(ctx, &sapb.RevokeCertificateRequest{ - Serial: serialString, - Reason: int64(ocsp.KeyCompromise), - Date: thisUpdate, - Backdate: status.RevokedDate, - Response: ocspResponse.Response, - IssuerID: int64(issuerID), - }) + x509Cert, err := x509.ParseCertificate(cert.Der) if err != nil { return err } - ra.revocationReasonCounter.WithLabelValues(revocation.ReasonToString[ocsp.KeyCompromise]).Inc() - return nil -} - -// purgeOCSPCache makes a request to akamai-purger to purge the cache entries -// for the given certificate. -// TODO(#5152) make the issuerID argument an issuance.IssuerNameID -func (ra *RegistrationAuthorityImpl) purgeOCSPCache(ctx context.Context, cert *x509.Certificate, issuerID int64) error { - issuer, ok := ra.issuersByNameID[issuance.IssuerNameID(issuerID)] - if !ok { - // TODO(#5152): Remove this fallback (which only gets used when revoking by - // serial, so the issuer ID had to be read from the db). - issuer, ok = ra.issuersByID[issuance.IssuerID(issuerID)] - if !ok { - return fmt.Errorf("unable to identify issuer of cert with serial %q", core.SerialToString(cert.SerialNumber)) - } - } - - purgeURLs, err := akamai.GeneratePurgeURLs(cert, issuer.Certificate) + shardIdx, err := crlShard(x509Cert) if err != nil { return err } - _, err = ra.purger.Purge(ctx, &akamaipb.PurgeRequest{Urls: purgeURLs}) + _, err = ra.SA.UpdateRevokedCertificate(ctx, &sapb.RevokeCertificateRequest{ + Serial: serialString, + Reason: int64(revocation.KeyCompromise), + Date: timestamppb.New(ra.clk.Now()), + Backdate: status.RevokedDate, + IssuerID: int64(issuerID), + ShardIdx: shardIdx, + }) if err != nil { return err } + ra.revocationReasonCounter.WithLabelValues(revocation.KeyCompromise.String()).Inc() return nil } // RevokeCertByApplicant revokes the certificate in question. It allows any // revocation reason from (0, 1, 3, 4, 5, 9), because Subscribers are allowed to // request any revocation reason for their own certificates. However, if the -// requesting RegID is an account which has authorizations for all names in the -// cert but is *not* the original subscriber, it overrides the revocation reason +// requesting account has authorizations for all names in the cert but +// is *not* the original subscriber, it overrides the revocation reason // to be 5 (cessationOfOperation), because that code is used to cover instances // where "the certificate subscriber no longer owns the domain names in the // certificate". It does not add the key to the blocked keys list, even if // reason 1 (keyCompromise) is requested, as it does not demonstrate said -// compromise. It attempts to purge the certificate from the Akamai cache, but -// it does not hard-fail if doing so is not successful, because the cache will -// drop the old OCSP response in less than 24 hours anyway. +// compromise. func (ra *RegistrationAuthorityImpl) RevokeCertByApplicant(ctx context.Context, req *rapb.RevokeCertByApplicantRequest) (*emptypb.Empty, error) { if req == nil || req.Cert == nil || req.RegID == 0 { return nil, errIncompleteGRPCRequest } - if _, present := revocation.UserAllowedReasons[revocation.Reason(req.Code)]; !present { + reasonCode := revocation.Reason(req.Code) + if !revocation.UserAllowedReason(reasonCode) { return nil, berrors.BadRevocationReasonError(req.Code) } - if !features.Enabled(features.MozRevocationReasons) { - // By our current policy, demonstrating key compromise is the only way to - // get a certificate revoked with reason key compromise. Upcoming Mozilla - // policy may require us to allow the original Subscriber to assert the - // keyCompromise revocation reason, even without demonstrating such. - if req.Code == ocsp.KeyCompromise { - return nil, berrors.BadRevocationReasonError(req.Code) - } - } cert, err := x509.ParseCertificate(req.Cert) if err != nil { @@ -1972,9 +1735,9 @@ func (ra *RegistrationAuthorityImpl) RevokeCertByApplicant(ctx context.Context, logEvent := certificateRevocationEvent{ ID: core.NewToken(), SerialNumber: serialString, - Reason: req.Code, + Reason: reasonCode, Method: "applicant", - RequesterID: req.RegID, + Requester: req.RegID, } // Below this point, do not re-declare `err` (i.e. type `err :=`) in a @@ -1984,7 +1747,7 @@ func (ra *RegistrationAuthorityImpl) RevokeCertByApplicant(ctx context.Context, if err != nil { logEvent.Error = err.Error() } - ra.log.AuditObject("Revocation request:", logEvent) + ra.log.AuditInfo("Revocation request", logEvent) }() metadata, err := ra.SA.GetSerialMetadata(ctx, &sapb.Serial{Serial: serialString}) @@ -2000,92 +1763,125 @@ func (ra *RegistrationAuthorityImpl) RevokeCertByApplicant(ctx context.Context, // authorizations for all names in the cert. logEvent.Method = "control" - var authzMapPB *sapb.Authorizations - authzMapPB, err = ra.SA.GetValidAuthorizations2(ctx, &sapb.GetValidAuthorizationsRequest{ + idents := identifier.FromCert(cert) + var authzPB *sapb.Authorizations + authzPB, err = ra.SA.GetValidAuthorizations2(ctx, &sapb.GetValidAuthorizationsRequest{ RegistrationID: req.RegID, - Domains: cert.DNSNames, - Now: ra.clk.Now().UnixNano(), + Identifiers: idents.ToProtoSlice(), + ValidUntil: timestamppb.New(ra.clk.Now()), }) if err != nil { return nil, err } - m := make(map[string]struct{}) - for _, authz := range authzMapPB.Authz { - m[authz.Domain] = struct{}{} + var authzMap map[identifier.ACMEIdentifier]*core.Authorization + authzMap, err = bgrpc.PBToAuthzMap(authzPB) + if err != nil { + return nil, err } - for _, name := range cert.DNSNames { - if _, present := m[name]; !present { - return nil, berrors.UnauthorizedError("requester does not control all names in cert with serial %q", serialString) + + for _, ident := range idents { + if _, present := authzMap[ident]; !present { + return nil, berrors.UnauthorizedError("requester does not control all identifiers in cert with serial %q", serialString) } } - if features.Enabled(features.MozRevocationReasons) { - // Applicants who are not the original Subscriber are not allowed to - // revoke for any reason other than cessationOfOperation, which covers - // circumstances where "the certificate subscriber no longer owns the - // domain names in the certificate". Override the reason code to match. - req.Code = ocsp.CessationOfOperation - logEvent.Reason = req.Code - } + // Applicants who are not the original Subscriber are not allowed to + // revoke for any reason other than cessationOfOperation, which covers + // circumstances where "the certificate subscriber no longer owns the + // domain names in the certificate". Override the reason code to match. + reasonCode = revocation.CessationOfOperation + logEvent.Reason = reasonCode } - issuerID := issuance.GetIssuerNameID(cert) - err = ra.revokeCertificate( - ctx, - cert.SerialNumber, - int64(issuerID), - revocation.Reason(req.Code), - ) + err = ra.revokeCertificate(ctx, cert, reasonCode) if err != nil { return nil, err } - // TODO(#5979): Check this error when it can't simply be due to a full queue. - _ = ra.purgeOCSPCache(ctx, cert, int64(issuerID)) - return &emptypb.Empty{}, nil } +// crlShard extracts the CRL shard from a certificate's CRLDistributionPoint. +// +// If there is no CRLDistributionPoint, returns 0. +// +// If there is more than one CRLDistributionPoint, returns an error. +// +// Assumes the shard number is represented in the URL as an integer that +// occurs in the last path component, optionally followed by ".crl". +// +// Note: This assumes (a) the CA is generating well-formed, correct +// CRLDistributionPoints and (b) an earlier component has verified the signature +// on this certificate comes from one of our issuers. +func crlShard(cert *x509.Certificate) (int64, error) { + if len(cert.CRLDistributionPoints) == 0 { + return 0, errors.New("no crlDistributionPoints in certificate") + } + if len(cert.CRLDistributionPoints) > 1 { + return 0, errors.New("too many crlDistributionPoints in certificate") + } + + url := strings.TrimSuffix(cert.CRLDistributionPoints[0], ".crl") + lastIndex := strings.LastIndex(url, "/") + if lastIndex == -1 { + return 0, fmt.Errorf("malformed CRLDistributionPoint %q", url) + } + shardStr := url[lastIndex+1:] + shardIdx, err := strconv.Atoi(shardStr) + if err != nil { + return 0, fmt.Errorf("parsing CRLDistributionPoint: %s", err) + } + + if shardIdx <= 0 { + return 0, fmt.Errorf("invalid shard in CRLDistributionPoint: %d", shardIdx) + } + + return int64(shardIdx), nil +} + +// addToBlockedKeys initiates a GRPC call to have the Base64-encoded SHA256 +// digest of a provided public key added to the blockedKeys table. +func (ra *RegistrationAuthorityImpl) addToBlockedKeys(ctx context.Context, key crypto.PublicKey, src string, comment string) error { + var digest core.Sha256Digest + digest, err := core.KeyDigest(key) + if err != nil { + return err + } + + // Add the public key to the blocked keys list. + _, err = ra.SA.AddBlockedKey(ctx, &sapb.AddBlockedKeyRequest{ + KeyHash: digest[:], + Added: timestamppb.New(ra.clk.Now()), + Source: src, + Comment: comment, + }) + if err != nil { + return err + } + + return nil +} + // RevokeCertByKey revokes the certificate in question. It always uses // reason code 1 (keyCompromise). It ensures that they public key is added to -// the blocked keys list, even if revocation otherwise fails. It attempts to -// purge the certificate from the Akamai cache, but it does not hard-fail if -// doing so is not successful, because the cache will drop the old OCSP response -// in less than 24 hours anyway. +// the blocked keys list, even if revocation otherwise fails. func (ra *RegistrationAuthorityImpl) RevokeCertByKey(ctx context.Context, req *rapb.RevokeCertByKeyRequest) (*emptypb.Empty, error) { if req == nil || req.Cert == nil { return nil, errIncompleteGRPCRequest } - var reason int64 - if features.Enabled(features.MozRevocationReasons) { - // Upcoming Mozilla policy may require that a certificate be revoked with - // reason keyCompromise if "the CA obtains verifiable evidence that the - // certificate subscriber’s private key corresponding to the public key in - // the certificate suffered a key compromise". Signing a JWS to an ACME - // server's revocation endpoint certainly counts, so override the reason. - reason = ocsp.KeyCompromise - } else { - if _, present := revocation.UserAllowedReasons[revocation.Reason(req.Code)]; !present { - return nil, berrors.BadRevocationReasonError(req.Code) - } - reason = req.Code - } - cert, err := x509.ParseCertificate(req.Cert) if err != nil { return nil, err } - issuerID := issuance.GetIssuerNameID(cert) - logEvent := certificateRevocationEvent{ ID: core.NewToken(), SerialNumber: core.SerialToString(cert.SerialNumber), - Reason: reason, + Reason: revocation.KeyCompromise, Method: "key", - RequesterID: 0, + Requester: 0, } // Below this point, do not re-declare `err` (i.e. type `err :=`) in a @@ -2095,198 +1891,155 @@ func (ra *RegistrationAuthorityImpl) RevokeCertByKey(ctx context.Context, req *r if err != nil { logEvent.Error = err.Error() } - ra.log.AuditObject("Revocation request:", logEvent) + ra.log.AuditInfo("Revocation request", logEvent) }() // We revoke the cert before adding it to the blocked keys list, to avoid a - // race between this and the bad-key-revoker. But we don't check the error on - // from this operation until after we add to the blocked keys list, since that - // add needs to happen no matter what. + // race between this and the bad-key-revoker. But we don't check the error + // from this operation until after we add the key to the blocked keys list, + // since that addition needs to happen no matter what. revokeErr := ra.revokeCertificate( ctx, - cert.SerialNumber, - int64(issuerID), - revocation.Reason(reason), + cert, + revocation.KeyCompromise, ) - // Now add the public key to the blocked keys list, and report the error if - // there is one. It's okay to error out here because failing to add the key - // to the blocked keys list is a worse failure than failing to revoke in the - // first place, because it means that bad-key-revoker won't revoke the cert - // anyway. - var shouldBlock bool - if features.Enabled(features.AllowReRevocation) { - // If we're allowing re-revocation, then block the key for all keyCompromise - // requests, no matter whether the revocation itself succeeded or failed. - shouldBlock = reason == ocsp.KeyCompromise - } else { - // Otherwise, only block the key if the revocation above succeeded, or - // failed for a reason other than "already revoked". - shouldBlock = (reason == ocsp.KeyCompromise && !errors.Is(revokeErr, berrors.AlreadyRevoked)) - } - if shouldBlock { - var digest core.Sha256Digest - digest, err = core.KeyDigest(cert.PublicKey) - if err != nil { - return nil, err - } - _, err = ra.SA.AddBlockedKey(ctx, &sapb.AddBlockedKeyRequest{ - KeyHash: digest[:], - Added: ra.clk.Now().UnixNano(), - Source: "API", - }) - if err != nil { - return nil, err - } + // Failing to add the key to the blocked keys list is a worse failure than + // failing to revoke in the first place, because it means that + // bad-key-revoker won't revoke the cert anyway. + err = ra.addToBlockedKeys(ctx, cert.PublicKey, "API", "") + if err != nil { + return nil, err } - // Finally check the error from revocation itself. If it was an AlreadyRevoked - // error, try to re-revoke the cert, in case it is revoked for a reason other - // than keyCompromise. + issuerID := issuance.IssuerNameID(cert) + + // Check the error returned from revokeCertificate itself. err = revokeErr - if err != nil { - // Immediately error out, rather than trying re-revocation, if the error was - // anything other than AlreadyRevoked, if the requested reason is anything - // other than keyCompromise, or if we're not yet using the new logic. - if !errors.Is(err, berrors.AlreadyRevoked) || - reason != ocsp.KeyCompromise || - !features.Enabled(features.AllowReRevocation) { - return nil, err - } - err = ra.updateRevocationForKeyCompromise(ctx, cert.SerialNumber, int64(issuerID)) + if errors.Is(err, berrors.AlreadyRevoked) { + // If it was an AlreadyRevoked error, try to re-revoke the cert in case + // it was revoked for a reason other than keyCompromise. + err = ra.updateRevocationForKeyCompromise(ctx, core.SerialToString(cert.SerialNumber), issuerID) if err != nil { return nil, err } - } - - // TODO(#5979): Check this error when it can't simply be due to a full queue. - _ = ra.purgeOCSPCache(ctx, cert, int64(issuerID)) - - return &emptypb.Empty{}, nil -} - -// RevokeCertificateWithReg terminates trust in the certificate provided. -// DEPRECATED: use RevokeCertBySubscriber, RevokeCertByController, or -// RevokeCertByKey instead. -func (ra *RegistrationAuthorityImpl) RevokeCertificateWithReg(ctx context.Context, req *rapb.RevokeCertificateWithRegRequest) (*emptypb.Empty, error) { - if req == nil || req.Cert == nil { - return nil, errIncompleteGRPCRequest - } - - cert, err := x509.ParseCertificate(req.Cert) - if err != nil { - return nil, err - } - - serialString := core.SerialToString(cert.SerialNumber) - revocationCode := revocation.Reason(req.Code) - - err = ra.deprecatedRevokeCertificate(ctx, cert, revocationCode, req.RegID, "API", "", false) - - state := "Failure" - defer func() { - // Needed: - // Serial - // CN - // DNS names - // Revocation reason - // Registration ID of requester; may be 0 if request is signed with cert key - // Error (if there was one) - ra.log.AuditInfof("%s, Request by registration ID: %d", - revokeEvent(state, serialString, cert.Subject.CommonName, cert.DNSNames, revocationCode), - req.RegID) - }() - - if err != nil { - state = fmt.Sprintf("Failure -- %s", err) + return &emptypb.Empty{}, nil + } else if err != nil { + // Error out if the error was anything other than AlreadyRevoked. return nil, err } - ra.revocationReasonCounter.WithLabelValues(revocation.ReasonToString[revocationCode]).Inc() - state = "Success" return &emptypb.Empty{}, nil } // AdministrativelyRevokeCertificate terminates trust in the certificate // provided and does not require the registration ID of the requester since this -// method is only called from the admin-revoker tool. It trusts that the admin +// method is only called from the `admin` tool. It trusts that the admin // is doing the right thing, so if the requested reason is keyCompromise, it // blocks the key from future issuance even though compromise has not been -// demonstrated here. It purges the certificate from the Akamai cache, and -// returns an error if that purge fails, since this method may be called late -// in the BRs-mandated revocation timeframe. +// demonstrated here. func (ra *RegistrationAuthorityImpl) AdministrativelyRevokeCertificate(ctx context.Context, req *rapb.AdministrativelyRevokeCertificateRequest) (*emptypb.Empty, error) { if req == nil || req.AdminName == "" { return nil, errIncompleteGRPCRequest } - if req.Cert == nil && req.Serial == "" { + if req.Serial == "" { return nil, errIncompleteGRPCRequest } - - reasonCode := revocation.Reason(req.Code) - if reasonCode == ocsp.KeyCompromise && req.Cert == nil && !req.SkipBlockKey { - return nil, fmt.Errorf("cannot revoke and block for KeyCompromise by serial alone") + if req.CrlShard != 0 && !req.Malformed { + return nil, errors.New("non-zero CRLShard is only allowed for malformed certificates (shard is automatic for well formed certificates)") } - if req.SkipBlockKey && reasonCode != ocsp.KeyCompromise { - return nil, fmt.Errorf("cannot skip key blocking for reasons other than KeyCompromise") + if req.Malformed && req.CrlShard == 0 { + return nil, errors.New("CRLShard is required for malformed certificates") } - if _, present := revocation.AdminAllowedReasons[reasonCode]; !present { + reasonCode := revocation.Reason(req.Code) + if !revocation.AdminAllowedReason(reasonCode) { return nil, fmt.Errorf("cannot revoke for reason %d", reasonCode) } - - // If we don't have a real cert, we create a fake cert (containing just the - // serial number, which is all we need) and look up the IssuerID from the db. - // We could instead look up and parse the certificate itself, but we avoid - // that in case we are administratively revoking the certificate because it is - // so badly malformed that it can't be parsed. - var cert *x509.Certificate - var issuerID int64 // TODO(#5152) make this an issuance.IssuerNameID - var err error - if req.Cert == nil { - cert = nil - serial, err := core.StringToSerial(req.Serial) - if err != nil { - return nil, err - } - cert = &x509.Certificate{ - SerialNumber: serial, - } - - status, err := ra.SA.GetCertificateStatus(ctx, &sapb.Serial{Serial: req.Serial}) - if err != nil { - return nil, fmt.Errorf("unable to confirm that serial %q was ever issued: %w", serial, err) - } - issuerID = status.IssuerID - } else { - cert, err = x509.ParseCertificate(req.Cert) - if err != nil { - return nil, err - } - issuerID = int64(issuance.GetIssuerNameID(cert)) + if req.SkipBlockKey && reasonCode != revocation.KeyCompromise { + return nil, fmt.Errorf("cannot skip key blocking for reasons other than KeyCompromise") + } + if reasonCode == revocation.KeyCompromise && req.Malformed { + return nil, fmt.Errorf("cannot revoke malformed certificate for KeyCompromise") } logEvent := certificateRevocationEvent{ ID: core.NewToken(), - Method: "key", + SerialNumber: req.Serial, + Reason: reasonCode, + CRLShard: req.CrlShard, + Method: "admin", AdminName: req.AdminName, - SerialNumber: core.SerialToString(cert.SerialNumber), } // Below this point, do not re-declare `err` (i.e. type `err :=`) in a // nested scope. Doing so will create a new `err` variable that is not // captured by this closure. + var err error defer func() { if err != nil { logEvent.Error = err.Error() } - ra.log.AuditObject("Revocation request:", logEvent) + ra.log.AuditInfo("Revocation request", logEvent) }() - err = ra.revokeCertificate(ctx, cert.SerialNumber, issuerID, revocation.Reason(req.Code)) + var cert *x509.Certificate + var issuerID issuance.NameID + var shard int64 + if req.Cert != nil { + // If the incoming request includes a certificate body, just use that and + // avoid doing any database queries. This code path is deprecated and will + // be removed when req.Cert is removed. + cert, err = x509.ParseCertificate(req.Cert) + if err != nil { + return nil, err + } + issuerID = issuance.IssuerNameID(cert) + shard, err = crlShard(cert) + if err != nil { + return nil, err + } + } else if !req.Malformed { + // As long as we don't believe the cert will be malformed, we should + // get the precertificate so we can block its pubkey if necessary. + var certPB *corepb.Certificate + certPB, err = ra.SA.GetLintPrecertificate(ctx, &sapb.Serial{Serial: req.Serial}) + if err != nil { + return nil, err + } + // Note that, although the thing we're parsing here is actually a linting + // precertificate, it has identical issuer info (and therefore an identical + // issuer NameID) to the real thing. + cert, err = x509.ParseCertificate(certPB.Der) + if err != nil { + return nil, err + } + issuerID = issuance.IssuerNameID(cert) + shard, err = crlShard(cert) + if err != nil { + return nil, err + } + } else { + // But if the cert is malformed, we at least still need its IssuerID. + var status *corepb.CertificateStatus + status, err = ra.SA.GetCertificateStatus(ctx, &sapb.Serial{Serial: req.Serial}) + if err != nil { + return nil, fmt.Errorf("unable to confirm that serial %q was ever issued: %w", req.Serial, err) + } + issuerID = issuance.NameID(status.IssuerID) + shard = req.CrlShard + } + + _, err = ra.SA.RevokeCertificate(ctx, &sapb.RevokeCertificateRequest{ + Serial: req.Serial, + Reason: int64(reasonCode), + Date: timestamppb.New(ra.clk.Now()), + IssuerID: int64(issuerID), + ShardIdx: shard, + }) if err != nil { - if req.Code == ocsp.KeyCompromise && errors.Is(err, berrors.AlreadyRevoked) { - err = ra.updateRevocationForKeyCompromise(ctx, cert.SerialNumber, issuerID) + if reasonCode == revocation.KeyCompromise && errors.Is(err, berrors.AlreadyRevoked) { + err = ra.updateRevocationForKeyCompromise(ctx, req.Serial, issuerID) if err != nil { return nil, err } @@ -2294,49 +2047,38 @@ func (ra *RegistrationAuthorityImpl) AdministrativelyRevokeCertificate(ctx conte return nil, err } - if req.Code == ocsp.KeyCompromise && !req.SkipBlockKey { - var digest core.Sha256Digest - digest, err = core.KeyDigest(cert.PublicKey) - if err != nil { - return nil, err + if reasonCode == revocation.KeyCompromise && !req.SkipBlockKey { + if cert == nil { + return nil, errors.New("revoking for key compromise requires providing the certificate's DER") } - _, err = ra.SA.AddBlockedKey(ctx, &sapb.AddBlockedKeyRequest{ - KeyHash: digest[:], - Added: ra.clk.Now().UnixNano(), - Source: "admin-revoker", - Comment: fmt.Sprintf("revoked by %s", req.AdminName), - }) + err = ra.addToBlockedKeys(ctx, cert.PublicKey, "admin-revoker", fmt.Sprintf("revoked by %s", req.AdminName)) if err != nil { return nil, err } } - err = ra.purgeOCSPCache(ctx, cert, int64(issuerID)) - if err != nil { - return nil, err - } - return &emptypb.Empty{}, nil } // DeactivateRegistration deactivates a valid registration -func (ra *RegistrationAuthorityImpl) DeactivateRegistration(ctx context.Context, reg *corepb.Registration) (*emptypb.Empty, error) { - if reg == nil || reg.Id == 0 { +func (ra *RegistrationAuthorityImpl) DeactivateRegistration(ctx context.Context, req *rapb.DeactivateRegistrationRequest) (*corepb.Registration, error) { + if req == nil || req.RegistrationID == 0 { return nil, errIncompleteGRPCRequest } - if reg.Status != string(core.StatusValid) { - return nil, berrors.MalformedError("only valid registrations can be deactivated") - } - _, err := ra.SA.DeactivateRegistration(ctx, &sapb.RegistrationID{Id: reg.Id}) + + updatedAcct, err := ra.SA.DeactivateRegistration(ctx, &sapb.RegistrationID{Id: req.RegistrationID}) if err != nil { - return nil, berrors.InternalServerError(err.Error()) + return nil, err } - return &emptypb.Empty{}, nil + + return updatedAcct, nil } // DeactivateAuthorization deactivates a currently valid authorization func (ra *RegistrationAuthorityImpl) DeactivateAuthorization(ctx context.Context, req *corepb.Authorization) (*emptypb.Empty, error) { - if req == nil || req.Id == "" || req.Status == "" { + ident := identifier.FromProto(req.Identifier) + + if core.IsAnyNilOrZero(req, req.Id, ident, req.Status, req.RegistrationID) { return nil, errIncompleteGRPCRequest } authzID, err := strconv.ParseInt(req.Id, 10, 64) @@ -2346,23 +2088,18 @@ func (ra *RegistrationAuthorityImpl) DeactivateAuthorization(ctx context.Context if _, err := ra.SA.DeactivateAuthorization2(ctx, &sapb.AuthorizationID2{Id: authzID}); err != nil { return nil, err } - return &emptypb.Empty{}, nil -} - -// checkOrderNames validates that the RA's policy authority allows issuing for -// each of the names in an order. If any of the names are unacceptable a -// malformed or rejectedIdentifier error with suberrors for each rejected -// identifier is returned. -func (ra *RegistrationAuthorityImpl) checkOrderNames(names []string) error { - idents := make([]identifier.ACMEIdentifier, len(names)) - for i, name := range names { - idents[i] = identifier.DNSIdentifier(name) - } - err := ra.PA.WillingToIssueWildcards(idents) - if err != nil { - return err + if req.Status == string(core.StatusPending) { + // Some clients deactivate pending authorizations without attempting them. + // We're not sure exactly when this happens but it's most likely due to + // internal errors in the client. From our perspective this uses storage + // resources similar to how failed authorizations do, so we increment the + // failed authorizations limit. + err = ra.countFailedValidations(ctx, req.RegistrationID, ident) + if err != nil { + return nil, fmt.Errorf("failed to update rate limits: %w", err) + } } - return nil + return &emptypb.Empty{}, nil } // NewOrder creates a new order object @@ -2371,23 +2108,39 @@ func (ra *RegistrationAuthorityImpl) NewOrder(ctx context.Context, req *rapb.New return nil, errIncompleteGRPCRequest } - newOrder := &sapb.NewOrderRequest{ - RegistrationID: req.RegistrationID, - Names: core.UniqueLowerNames(req.Names), + idents := identifier.Normalize(identifier.FromProtoSlice(req.Identifiers)) + + profile, err := ra.profiles.get(req.CertificateProfileName) + if err != nil { + return nil, err + } + + if profile.allowList != nil && !profile.allowList.Contains(req.RegistrationID) { + return nil, berrors.UnauthorizedError("account ID %d is not permitted to use certificate profile %q", + req.RegistrationID, + req.CertificateProfileName, + ) } - if len(newOrder.Names) > ra.maxNames { + if len(idents) > profile.maxNames { return nil, berrors.MalformedError( - "Order cannot contain more than %d DNS names", ra.maxNames) + "Order cannot contain more than %d identifiers", profile.maxNames) + } + + for _, ident := range idents { + if !slices.Contains(profile.identifierTypes, ident.Type) { + return nil, berrors.RejectedIdentifierError("Profile %q does not permit %s type identifiers", req.CertificateProfileName, ident.Type) + } } - // Validate that our policy allows issuing for each of the names in the order - err := ra.checkOrderNames(newOrder.Names) + // Validate that our policy allows issuing for each of the identifiers in + // the order + err = ra.PA.WillingToIssue(idents) if err != nil { return nil, err } - err = wildcardOverlap(newOrder.Names) + err = wildcardOverlap(idents) if err != nil { return nil, err } @@ -2395,8 +2148,8 @@ func (ra *RegistrationAuthorityImpl) NewOrder(ctx context.Context, req *rapb.New // See if there is an existing unexpired pending (or ready) order that can be reused // for this account existingOrder, err := ra.SA.GetOrderForNames(ctx, &sapb.GetOrderForNamesRequest{ - AcctID: newOrder.RegistrationID, - Names: newOrder.Names, + AcctID: req.RegistrationID, + Identifiers: idents.ToProtoSlice(), }) // If there was an error and it wasn't an acceptable "NotFound" error, return // immediately @@ -2408,28 +2161,16 @@ func (ra *RegistrationAuthorityImpl) NewOrder(ctx context.Context, req *rapb.New // Error if an incomplete order is returned. if existingOrder != nil { // Check to see if the expected fields of the existing order are set. - if existingOrder.Id == 0 || existingOrder.Created == 0 || existingOrder.Status == "" || existingOrder.RegistrationID == 0 || existingOrder.Expires == 0 || len(existingOrder.Names) == 0 { + if core.IsAnyNilOrZero(existingOrder.Id, existingOrder.Status, existingOrder.RegistrationID, existingOrder.Identifiers, existingOrder.Created, existingOrder.Expires) { return nil, errIncompleteGRPCResponse } - return existingOrder, nil - } - // Check if there is rate limit space for a new order within the current window - err = ra.checkNewOrdersPerAccountLimit(ctx, newOrder.RegistrationID) - if err != nil { - return nil, err - } - // Check if there is rate limit space for issuing a certificate for the new - // order's names. If there isn't then it doesn't make sense to allow creating - // an order - it will just fail when finalization checks the same limits. - err = ra.checkLimits(ctx, newOrder.Names, newOrder.RegistrationID) - if err != nil { - return nil, err - } - if features.Enabled(features.CheckFailedAuthorizationsFirst) { - err := ra.checkInvalidAuthorizationLimits(ctx, newOrder.RegistrationID, newOrder.Names) - if err != nil { - return nil, err + // Only re-use the order if the profile (even if it is just the empty + // string, leaving us to choose a default profile) matches. + if existingOrder.CertificateProfileName == req.CertificateProfileName { + // Track how often we reuse an existing order and how old that order is. + ra.orderAges.WithLabelValues("NewOrder").Observe(ra.clk.Since(existingOrder.Created.AsTime()).Seconds()) + return existingOrder, nil } } @@ -2438,134 +2179,148 @@ func (ra *RegistrationAuthorityImpl) NewOrder(ctx context.Context, req *rapb.New // `sa.GetAuthorizations` returned an authorization that was very close to // expiry. The resulting pending order that references it would itself end up // expiring very soon. - // To prevent this we only return authorizations that are at least 1 day away - // from expiring. - authzExpiryCutoff := ra.clk.Now().AddDate(0, 0, 1).UnixNano() - - getAuthReq := &sapb.GetAuthorizationsRequest{ - RegistrationID: newOrder.RegistrationID, - Now: authzExpiryCutoff, - Domains: newOrder.Names, + // What is considered "very soon" scales with the associated order's lifetime, + // up to a point. + minTimeToExpiry := profile.orderLifetime / 8 + if minTimeToExpiry < time.Hour { + minTimeToExpiry = time.Hour + } else if minTimeToExpiry > 24*time.Hour { + minTimeToExpiry = 24 * time.Hour } - existingAuthz, err := ra.SA.GetAuthorizations2(ctx, getAuthReq) + authzExpiryCutoff := ra.clk.Now().Add(minTimeToExpiry) + + existingAuthz, err := ra.SA.GetValidAuthorizations2(ctx, &sapb.GetValidAuthorizationsRequest{ + RegistrationID: req.RegistrationID, + ValidUntil: timestamppb.New(authzExpiryCutoff), + Identifiers: idents.ToProtoSlice(), + Profile: req.CertificateProfileName, + }) if err != nil { return nil, err } - // Collect up the authorizations we found into a map keyed by the domains the - // authorizations correspond to - nameToExistingAuthz := make(map[string]*corepb.Authorization, len(newOrder.Names)) - for _, v := range existingAuthz.Authz { - // Don't reuse a valid authorization if the reuseValidAuthz flag is - // disabled. - if v.Authz.Status == string(core.StatusValid) && !ra.reuseValidAuthz { - continue - } - nameToExistingAuthz[v.Domain] = v.Authz + identToExistingAuthz, err := bgrpc.PBToAuthzMap(existingAuthz) + if err != nil { + return nil, err } - // For each of the names in the order, if there is an acceptable - // existing authz, append it to the order to reuse it. Otherwise track - // that there is a missing authz for that name. - var missingAuthzNames []string - for _, name := range newOrder.Names { + // For each of the identifiers in the order, if there is an acceptable + // existing authz, append it to the order to reuse it. Otherwise track that + // there is a missing authz for that identifier. + var newOrderAuthzs []int64 + var missingAuthzIdents identifier.ACMEIdentifiers + for _, ident := range idents { // If there isn't an existing authz, note that its missing and continue - if _, exists := nameToExistingAuthz[name]; !exists { - missingAuthzNames = append(missingAuthzNames, name) + authz, exists := identToExistingAuthz[ident] + if !exists { + // The existing authz was not acceptable for reuse, and we need to + // mark the name as requiring a new pending authz. + missingAuthzIdents = append(missingAuthzIdents, ident) continue } - authz := nameToExistingAuthz[name] - // If the identifier is a wildcard and the existing authz only has one - // DNS-01 type challenge we can reuse it. In theory we will - // never get back an authorization for a domain with a wildcard prefix - // that doesn't meet this criteria from SA.GetAuthorizations but we verify - // again to be safe. - if strings.HasPrefix(name, "*.") && - len(authz.Challenges) == 1 && core.AcmeChallenge(authz.Challenges[0].Type) == core.ChallengeTypeDNS01 { - authzID, err := strconv.ParseInt(authz.Id, 10, 64) - if err != nil { - return nil, err - } - newOrder.V2Authorizations = append(newOrder.V2Authorizations, authzID) - continue - } else if !strings.HasPrefix(name, "*.") { - // If the identifier isn't a wildcard, we can reuse any authz - authzID, err := strconv.ParseInt(authz.Id, 10, 64) - if err != nil { - return nil, err - } - newOrder.V2Authorizations = append(newOrder.V2Authorizations, authzID) + + // If the authz is associated with the wrong profile, don't reuse it. + if authz.CertificateProfileName != req.CertificateProfileName { + missingAuthzIdents = append(missingAuthzIdents, ident) + // Delete the authz from the identToExistingAuthz map since we are not reusing it. + delete(identToExistingAuthz, ident) continue } - // Delete the authz from the nameToExistingAuthz map since we are not reusing it. - delete(nameToExistingAuthz, name) - // If we reached this point then the existing authz was not acceptable for - // reuse and we need to mark the name as requiring a new pending authz - missingAuthzNames = append(missingAuthzNames, name) - } + // This is only used for our metrics. + authzAge := (profile.validAuthzLifetime - authz.Expires.Sub(ra.clk.Now())).Seconds() + if authz.Status == core.StatusPending { + authzAge = (profile.pendingAuthzLifetime - authz.Expires.Sub(ra.clk.Now())).Seconds() + } + + // If the identifier is a wildcard DNS name, all challenges must be + // DNS-01 or DNS-Account-01. The PA guarantees this at order creation + // time, but we verify again to be safe. + if ident.Type == identifier.TypeDNS && strings.HasPrefix(ident.Value, "*.") { + for _, chall := range authz.Challenges { + if chall.Type != core.ChallengeTypeDNS01 && !(features.Get().DNSAccount01Enabled && chall.Type == core.ChallengeTypeDNSAccount01) { + return nil, berrors.InternalServerError( + "SA.GetAuthorizations returned a DNS wildcard authz (%s) with invalid challenge(s)", + authz.ID, + ) + } + } + } - // If the order isn't fully authorized we need to check that the client has - // rate limit room for more pending authorizations - if len(missingAuthzNames) > 0 { - err := ra.checkPendingAuthorizationLimit(ctx, newOrder.RegistrationID) + // If we reached this point then the existing authz was acceptable for + // reuse. + authzID, err := strconv.ParseInt(authz.ID, 10, 64) if err != nil { return nil, err } - if !features.Enabled(features.CheckFailedAuthorizationsFirst) { - err := ra.checkInvalidAuthorizationLimits(ctx, newOrder.RegistrationID, missingAuthzNames) - if err != nil { - return nil, err - } - } + newOrderAuthzs = append(newOrderAuthzs, authzID) + ra.authzAges.WithLabelValues("NewOrder", string(authz.Status)).Observe(authzAge) } - // Loop through each of the names missing authzs and create a new pending - // authorization for each. - var newAuthzs []*corepb.Authorization - for _, name := range missingAuthzNames { - pb, err := ra.createPendingAuthz(ctx, newOrder.RegistrationID, identifier.ACMEIdentifier{ - Type: identifier.DNS, - Value: name, - }) + // Loop through each of the identifiers missing authzs and create a new + // pending authorization for each. + var newAuthzs []*sapb.NewAuthzRequest + for _, ident := range missingAuthzIdents { + challTypes, err := ra.PA.ChallengeTypesFor(ident) if err != nil { return nil, err } - newAuthzs = append(newAuthzs, pb) + + var challStrs []string + for _, t := range challTypes { + challStrs = append(challStrs, string(t)) + } + + newAuthzs = append(newAuthzs, &sapb.NewAuthzRequest{ + Identifier: ident.ToProto(), + RegistrationID: req.RegistrationID, + Expires: timestamppb.New(ra.clk.Now().Add(profile.pendingAuthzLifetime).Truncate(time.Second)), + ChallengeTypes: challStrs, + Token: core.NewToken(), + }) + + ra.authzAges.WithLabelValues("NewOrder", string(core.StatusPending)).Observe(0) } // Start with the order's own expiry as the minExpiry. We only care // about authz expiries that are sooner than the order's expiry - minExpiry := ra.clk.Now().Add(ra.orderLifetime) + minExpiry := ra.clk.Now().Add(profile.orderLifetime) // Check the reused authorizations to see if any have an expiry before the // minExpiry (the order's lifetime) - for _, authz := range nameToExistingAuthz { + for _, authz := range identToExistingAuthz { // An authz without an expiry is an unexpected internal server event - if authz.Expires == 0 { + if core.IsAnyNilOrZero(authz.Expires) { return nil, berrors.InternalServerError( "SA.GetAuthorizations returned an authz (%s) with zero expiry", - authz.Id) + authz.ID) } // If the reused authorization expires before the minExpiry, it's expiry // is the new minExpiry. - authzExpiry := time.Unix(0, authz.Expires) - if authzExpiry.Before(minExpiry) { - minExpiry = authzExpiry + if authz.Expires.Before(minExpiry) { + minExpiry = *authz.Expires } } // If the newly created pending authz's have an expiry closer than the // minExpiry the minExpiry is the pending authz expiry. if len(newAuthzs) > 0 { - newPendingAuthzExpires := ra.clk.Now().Add(ra.pendingAuthorizationLifetime) + newPendingAuthzExpires := ra.clk.Now().Add(profile.pendingAuthzLifetime) if newPendingAuthzExpires.Before(minExpiry) { minExpiry = newPendingAuthzExpires } } - // Set the order's expiry to the minimum expiry. The db doesn't store - // sub-second values, so truncate here. - newOrder.Expires = minExpiry.Truncate(time.Second).UnixNano() + newOrder := &sapb.NewOrderRequest{ + RegistrationID: req.RegistrationID, + Identifiers: idents.ToProtoSlice(), + CertificateProfileName: req.CertificateProfileName, + Replaces: req.Replaces, + ReplacesSerial: req.ReplacesSerial, + // Set the order's expiry to the minimum expiry. The db doesn't store + // sub-second values, so truncate here. + Expires: timestamppb.New(minExpiry.Truncate(time.Second)), + V2Authorizations: newOrderAuthzs, + } newOrderAndAuthzsReq := &sapb.NewOrderAndAuthzsRequest{ NewOrder: newOrder, NewAuthzs: newAuthzs, @@ -2574,59 +2329,26 @@ func (ra *RegistrationAuthorityImpl) NewOrder(ctx context.Context, req *rapb.New if err != nil { return nil, err } - if storedOrder.Id == 0 || storedOrder.Created == 0 || storedOrder.Status == "" || storedOrder.RegistrationID == 0 || storedOrder.Expires == 0 || len(storedOrder.Names) == 0 { + + if core.IsAnyNilOrZero(storedOrder.Id, storedOrder.Status, storedOrder.RegistrationID, storedOrder.Identifiers, storedOrder.Created, storedOrder.Expires) { return nil, errIncompleteGRPCResponse } + ra.orderAges.WithLabelValues("NewOrder").Observe(0) - // Note how many names are being requested in this certificate order. - ra.namesPerCert.With(prometheus.Labels{"type": "requested"}).Observe(float64(len(storedOrder.Names))) + // Note how many identifiers are being requested in this certificate order. + ra.namesPerCert.With(prometheus.Labels{"type": "requested"}).Observe(float64(len(storedOrder.Identifiers))) return storedOrder, nil } -// createPendingAuthz checks that a name is allowed for issuance and creates the -// necessary challenges for it and puts this and all of the relevant information -// into a corepb.Authorization for transmission to the SA to be stored -func (ra *RegistrationAuthorityImpl) createPendingAuthz(ctx context.Context, reg int64, identifier identifier.ACMEIdentifier) (*corepb.Authorization, error) { - authz := &corepb.Authorization{ - Identifier: identifier.Value, - RegistrationID: reg, - Status: string(core.StatusPending), - Expires: ra.clk.Now().Add(ra.pendingAuthorizationLifetime).Truncate(time.Second).UnixNano(), - } - - // Create challenges. The WFE will update them with URIs before sending them out. - challenges, err := ra.PA.ChallengesFor(identifier) - if err != nil { - // The only time ChallengesFor errors it is a fatal configuration error - // where challenges required by policy for an identifier are not enabled. We - // want to treat this as an internal server error. - return nil, berrors.InternalServerError(err.Error()) - } - // Check each challenge for sanity. - for _, challenge := range challenges { - err := challenge.CheckConsistencyForClientOffer() - if err != nil { - // berrors.InternalServerError because we generated these challenges, they should - // be OK. - err = berrors.InternalServerError("challenge didn't pass sanity check: %+v", challenge) - return nil, err - } - challPB, err := bgrpc.ChallengeToPB(challenge) - if err != nil { - return nil, err - } - authz.Challenges = append(authz.Challenges, challPB) - } - return authz, nil -} - -// wildcardOverlap takes a slice of domain names and returns an error if any of +// wildcardOverlap takes a slice of identifiers and returns an error if any of // them is a non-wildcard FQDN that overlaps with a wildcard domain in the map. -func wildcardOverlap(dnsNames []string) error { - nameMap := make(map[string]bool, len(dnsNames)) - for _, v := range dnsNames { - nameMap[v] = true +func wildcardOverlap(idents identifier.ACMEIdentifiers) error { + nameMap := make(map[string]bool, len(idents)) + for _, v := range idents { + if v.Type == identifier.TypeDNS { + nameMap[v.Value] = true + } } for name := range nameMap { if name[0] == '*' { @@ -2642,16 +2364,85 @@ func wildcardOverlap(dnsNames []string) error { return nil } -// validateContactsPresent will return an error if the contacts []string -// len is greater than zero and the contactsPresent bool is false. We -// don't care about any other cases. If the length of the contacts is zero -// and contactsPresent is true, it seems like a mismatch but we have to -// assume that the client is requesting to update the contacts field with -// by removing the existing contacts value so we don't want to return an -// error here. -func validateContactsPresent(contacts []string, contactsPresent bool) error { - if len(contacts) > 0 && !contactsPresent { - return berrors.InternalServerError("account contacts present but contactsPresent false") +// UnpauseAccount receives a validated account unpause request from the SFE and +// instructs the SA to unpause that account. If the account cannot be unpaused, +// an error is returned. +func (ra *RegistrationAuthorityImpl) UnpauseAccount(ctx context.Context, request *rapb.UnpauseAccountRequest) (*rapb.UnpauseAccountResponse, error) { + if core.IsAnyNilOrZero(request.RegistrationID) { + return nil, errIncompleteGRPCRequest + } + + count, err := ra.SA.UnpauseAccount(ctx, &sapb.RegistrationID{ + Id: request.RegistrationID, + }) + if err != nil { + return nil, berrors.InternalServerError("failed to unpause account ID %d", request.RegistrationID) + } + + return &rapb.UnpauseAccountResponse{Count: count.Count}, nil +} + +func (ra *RegistrationAuthorityImpl) GetAuthorization(ctx context.Context, req *rapb.GetAuthorizationRequest) (*corepb.Authorization, error) { + if core.IsAnyNilOrZero(req, req.Id) { + return nil, errIncompleteGRPCRequest } - return nil + + authz, err := ra.SA.GetAuthorization2(ctx, &sapb.AuthorizationID2{Id: req.Id}) + if err != nil { + return nil, fmt.Errorf("getting authz from SA: %w", err) + } + + // Filter out any challenges which are currently disabled, so that the client + // doesn't attempt them. + challs := []*corepb.Challenge{} + for _, chall := range authz.Challenges { + if ra.PA.ChallengeTypeEnabled(core.AcmeChallenge(chall.Type)) { + challs = append(challs, chall) + } + } + + authz.Challenges = challs + return authz, nil +} + +// AddRateLimitOverride dispatches an SA RPC to add a rate limit override to the +// database. If the override already exists, it will be updated. If the override +// does not exist, it will be inserted and enabled. If the override exists but +// has been disabled, it will be updated but not be re-enabled. The status of +// the override is returned in Enabled field of the response. To re-enable an +// override, use sa.EnableRateLimitOverride. +func (ra *RegistrationAuthorityImpl) AddRateLimitOverride(ctx context.Context, req *rapb.AddRateLimitOverrideRequest) (*rapb.AddRateLimitOverrideResponse, error) { + if core.IsAnyNilOrZero(req, req.LimitEnum, req.BucketKey, req.Count, req.Burst, req.Period, req.Comment) { + return nil, errIncompleteGRPCRequest + } + + resp, err := ra.SA.AddRateLimitOverride(ctx, &sapb.AddRateLimitOverrideRequest{ + Override: &sapb.RateLimitOverride{ + LimitEnum: req.LimitEnum, + BucketKey: req.BucketKey, + Comment: req.Comment, + Period: req.Period, + Count: req.Count, + Burst: req.Burst, + }, + }) + if err != nil { + return nil, fmt.Errorf("adding rate limit override: %w", err) + } + + return &rapb.AddRateLimitOverrideResponse{ + Inserted: resp.Inserted, + Enabled: resp.Enabled, + }, nil +} + +// Drain blocks until all detached goroutines are done. +// +// The RA runs detached goroutines for challenge validation and finalization, +// so that ACME responses can be returned to the user promptly while work continues. +// +// The main goroutine should call this before exiting to avoid canceling the work +// being done in detached goroutines. +func (ra *RegistrationAuthorityImpl) Drain() { + ra.drainWG.Wait() } diff --git a/ra/ra_test.go b/ra/ra_test.go index 9233d3ea8ba..5d49264db07 100644 --- a/ra/ra_test.go +++ b/ra/ra_test.go @@ -7,18 +7,18 @@ import ( "crypto/elliptic" "crypto/rand" "crypto/rsa" - "crypto/sha256" "crypto/x509" "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" "encoding/json" "encoding/pem" "errors" "fmt" - "io/ioutil" + "math" "math/big" - mrand "math/rand" - "net" - "os" + mrand "math/rand/v2" + "net/netip" "regexp" "strconv" "strings" @@ -26,17 +26,23 @@ import ( "testing" "time" - ctasn1 "github.com/google/certificate-transparency-go/asn1" - ctx509 "github.com/google/certificate-transparency-go/x509" - ctpkix "github.com/google/certificate-transparency-go/x509/pkix" + "github.com/go-jose/go-jose/v4" "github.com/jmhodges/clock" - akamaipb "github.com/letsencrypt/boulder/akamai/proto" + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/letsencrypt/boulder/allowlist" capb "github.com/letsencrypt/boulder/ca/proto" - "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/core" corepb "github.com/letsencrypt/boulder/core/proto" "github.com/letsencrypt/boulder/ctpolicy" - "github.com/letsencrypt/boulder/ctpolicy/ctconfig" + "github.com/letsencrypt/boulder/ctpolicy/loglist" berrors "github.com/letsencrypt/boulder/errors" "github.com/letsencrypt/boulder/features" "github.com/letsencrypt/boulder/goodkey" @@ -49,67 +55,91 @@ import ( "github.com/letsencrypt/boulder/policy" pubpb "github.com/letsencrypt/boulder/publisher/proto" rapb "github.com/letsencrypt/boulder/ra/proto" - "github.com/letsencrypt/boulder/ratelimit" + "github.com/letsencrypt/boulder/ratelimits" + "github.com/letsencrypt/boulder/revocation" "github.com/letsencrypt/boulder/sa" sapb "github.com/letsencrypt/boulder/sa/proto" "github.com/letsencrypt/boulder/test" isa "github.com/letsencrypt/boulder/test/inmem/sa" "github.com/letsencrypt/boulder/test/vars" + "github.com/letsencrypt/boulder/va" vapb "github.com/letsencrypt/boulder/va/proto" - "github.com/prometheus/client_golang/prometheus" - "github.com/weppos/publicsuffix-go/publicsuffix" - "golang.org/x/crypto/ocsp" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" - jose "gopkg.in/square/go-jose.v2" ) -func createPendingAuthorization(t *testing.T, sa sapb.StorageAuthorityClient, domain string, exp time.Time) *corepb.Authorization { +// randomDomain creates a random domain name for testing. +// +// panics if crypto/rand.Rand.Read fails. +func randomDomain() string { + var bytes [4]byte + _, err := rand.Read(bytes[:]) + if err != nil { + panic(err) + } + return fmt.Sprintf("%x.example.com", bytes[:]) +} + +// randomIPv6 creates a random IPv6 netip.Addr for testing. It uses a real IPv6 +// address range, not a test/documentation range. +// +// panics if crypto/rand.Rand.Read or netip.AddrFromSlice fails. +func randomIPv6() netip.Addr { + var ipBytes [10]byte + _, err := rand.Read(ipBytes[:]) + if err != nil { + panic(err) + } + ipPrefix, err := hex.DecodeString("2602080a600f") + if err != nil { + panic(err) + } + ip, ok := netip.AddrFromSlice(bytes.Join([][]byte{ipPrefix, ipBytes[:]}, nil)) + if !ok { + panic("Couldn't parse random IP to netip.Addr") + } + return ip +} + +func createPendingAuthorization(t *testing.T, sa sapb.StorageAuthorityClient, regID int64, ident identifier.ACMEIdentifier, exp time.Time) *corepb.Authorization { t.Helper() - authz := core.Authorization{ - Identifier: identifier.DNSIdentifier(domain), - RegistrationID: Registration.Id, - Status: "pending", - Expires: &exp, - Challenges: []core.Challenge{ - { - Token: core.NewToken(), - Type: core.ChallengeTypeHTTP01, - Status: core.StatusPending, - }, - { - Token: core.NewToken(), - Type: core.ChallengeTypeDNS01, - Status: core.StatusPending, + res, err := sa.NewOrderAndAuthzs( + context.Background(), + &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: regID, + Expires: timestamppb.New(exp), + Identifiers: []*corepb.Identifier{ident.ToProto()}, }, - { - Token: core.NewToken(), - Type: core.ChallengeTypeTLSALPN01, - Status: core.StatusPending, + NewAuthzs: []*sapb.NewAuthzRequest{ + { + Identifier: ident.ToProto(), + RegistrationID: regID, + Expires: timestamppb.New(exp), + ChallengeTypes: []string{ + string(core.ChallengeTypeHTTP01), + string(core.ChallengeTypeDNS01), + string(core.ChallengeTypeTLSALPN01)}, + Token: core.NewToken(), + }, }, }, - } - authzPB, err := bgrpc.AuthzToPB(authz) - test.AssertNotError(t, err, "AuthzToPB failed") - ids, err := sa.NewAuthorizations2(context.Background(), &sapb.AddPendingAuthorizationsRequest{ - Authz: []*corepb.Authorization{authzPB}, - }) - test.AssertNotError(t, err, "sa.NewAuthorizations2 failed") - return getAuthorization(t, fmt.Sprint(ids.Ids[0]), sa) + ) + test.AssertNotError(t, err, "sa.NewOrderAndAuthzs failed") + + return getAuthorization(t, fmt.Sprint(res.V2Authorizations[0]), sa) } -func createFinalizedAuthorization(t *testing.T, sa sapb.StorageAuthorityClient, domain string, exp time.Time, status string, attemptedAt time.Time) int64 { +func createFinalizedAuthorization(t *testing.T, saClient sapb.StorageAuthorityClient, regID int64, ident identifier.ACMEIdentifier, exp time.Time, chall core.AcmeChallenge, attemptedAt time.Time) int64 { t.Helper() - pending := createPendingAuthorization(t, sa, domain, exp) + pending := createPendingAuthorization(t, saClient, regID, ident, exp) pendingID, err := strconv.ParseInt(pending.Id, 10, 64) test.AssertNotError(t, err, "strconv.ParseInt failed") - _, err = sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ + _, err = saClient.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ Id: pendingID, - Status: status, - Expires: exp.UnixNano(), - Attempted: string(core.ChallengeTypeHTTP01), - AttemptedAt: attemptedAt.UnixNano(), + Status: "valid", + Expires: timestamppb.New(exp), + Attempted: string(chall), + AttemptedAt: timestamppb.New(attemptedAt), }) test.AssertNotError(t, err, "sa.FinalizeAuthorizations2 failed") return pendingID @@ -124,19 +154,19 @@ func getAuthorization(t *testing.T, id string, sa sapb.StorageAuthorityClient) * return dbAuthz } -func challTypeIndex(t *testing.T, challenges []*corepb.Challenge, typ core.AcmeChallenge) int64 { +func dnsChallIdx(t *testing.T, challenges []*corepb.Challenge) int64 { t.Helper() var challIdx int64 var set bool for i, ch := range challenges { - if core.AcmeChallenge(ch.Type) == typ { + if core.AcmeChallenge(ch.Type) == core.ChallengeTypeDNS01 { challIdx = int64(i) set = true break } } if !set { - t.Errorf("challTypeIndex didn't find challenge of type: %s", typ) + t.Errorf("dnsChallIdx didn't find challenge of type DNS-01") } return challIdx } @@ -145,15 +175,58 @@ func numAuthorizations(o *corepb.Order) int { return len(o.V2Authorizations) } +// def is a test-only helper that returns the default validation profile +// and is guaranteed to succeed because the validationProfile constructor +// ensures that the default name has a corresponding profile. +func (vp *validationProfiles) def() *validationProfile { + return vp.byName[vp.defaultName] +} + type DummyValidationAuthority struct { - request chan *vapb.PerformValidationRequest - ResultError error - ResultReturn *vapb.ValidationResult + doDCVRequest chan *vapb.PerformValidationRequest + doDCVError error + doDCVResult *vapb.ValidationResult + + doCAARequest chan *vapb.IsCAAValidRequest + doCAAError error + doCAAResponse *vapb.IsCAAValidResponse } func (dva *DummyValidationAuthority) PerformValidation(ctx context.Context, req *vapb.PerformValidationRequest, _ ...grpc.CallOption) (*vapb.ValidationResult, error) { - dva.request <- req - return dva.ResultReturn, dva.ResultError + dcvRes, err := dva.DoDCV(ctx, req) + if err != nil { + return nil, err + } + if dcvRes.Problem != nil { + return dcvRes, nil + } + caaResp, err := dva.DoCAA(ctx, &vapb.IsCAAValidRequest{ + Identifier: req.Identifier, + ValidationMethod: req.Challenge.Type, + AccountURIID: req.Authz.RegID, + AuthzID: req.Authz.Id, + }) + if err != nil { + return nil, err + } + return &vapb.ValidationResult{ + Records: dcvRes.Records, + Problem: caaResp.Problem, + }, nil +} + +func (dva *DummyValidationAuthority) IsCAAValid(ctx context.Context, req *vapb.IsCAAValidRequest, _ ...grpc.CallOption) (*vapb.IsCAAValidResponse, error) { + return nil, status.Error(codes.Unimplemented, "IsCAAValid not implemented") +} + +func (dva *DummyValidationAuthority) DoDCV(ctx context.Context, req *vapb.PerformValidationRequest, _ ...grpc.CallOption) (*vapb.ValidationResult, error) { + dva.doDCVRequest <- req + return dva.doDCVResult, dva.doDCVError +} + +func (dva *DummyValidationAuthority) DoCAA(ctx context.Context, req *vapb.IsCAAValidRequest, _ ...grpc.CallOption) (*vapb.IsCAAValidResponse, error) { + dva.doCAARequest <- req + return dva.doCAAResponse, dva.doCAAError } var ( @@ -205,89 +278,14 @@ var ( ExampleCSR = &x509.CertificateRequest{} - Registration = &corepb.Registration{Id: 1} - Identifier = "not-example.com" log = blog.UseMock() ) -var testKeyPolicy = goodkey.KeyPolicy{ - AllowRSA: true, - AllowECDSANISTP256: true, - AllowECDSANISTP384: true, -} - var ctx = context.Background() -// dummyRateLimitConfig satisfies the ratelimit.RateLimitConfig interface while -// allowing easy mocking of the individual RateLimitPolicy's -type dummyRateLimitConfig struct { - CertificatesPerNamePolicy ratelimit.RateLimitPolicy - RegistrationsPerIPPolicy ratelimit.RateLimitPolicy - RegistrationsPerIPRangePolicy ratelimit.RateLimitPolicy - PendingAuthorizationsPerAccountPolicy ratelimit.RateLimitPolicy - PendingOrdersPerAccountPolicy ratelimit.RateLimitPolicy - NewOrdersPerAccountPolicy ratelimit.RateLimitPolicy - InvalidAuthorizationsPerAccountPolicy ratelimit.RateLimitPolicy - CertificatesPerFQDNSetPolicy ratelimit.RateLimitPolicy - CertificatesPerFQDNSetFastPolicy ratelimit.RateLimitPolicy -} - -func (r *dummyRateLimitConfig) CertificatesPerName() ratelimit.RateLimitPolicy { - return r.CertificatesPerNamePolicy -} - -func (r *dummyRateLimitConfig) RegistrationsPerIP() ratelimit.RateLimitPolicy { - return r.RegistrationsPerIPPolicy -} - -func (r *dummyRateLimitConfig) RegistrationsPerIPRange() ratelimit.RateLimitPolicy { - return r.RegistrationsPerIPRangePolicy -} - -func (r *dummyRateLimitConfig) PendingAuthorizationsPerAccount() ratelimit.RateLimitPolicy { - return r.PendingAuthorizationsPerAccountPolicy -} - -func (r *dummyRateLimitConfig) PendingOrdersPerAccount() ratelimit.RateLimitPolicy { - return r.PendingOrdersPerAccountPolicy -} - -func (r *dummyRateLimitConfig) NewOrdersPerAccount() ratelimit.RateLimitPolicy { - return r.NewOrdersPerAccountPolicy -} - -func (r *dummyRateLimitConfig) InvalidAuthorizationsPerAccount() ratelimit.RateLimitPolicy { - return r.InvalidAuthorizationsPerAccountPolicy -} - -func (r *dummyRateLimitConfig) CertificatesPerFQDNSet() ratelimit.RateLimitPolicy { - return r.CertificatesPerFQDNSetPolicy -} - -func (r *dummyRateLimitConfig) CertificatesPerFQDNSetFast() ratelimit.RateLimitPolicy { - return r.CertificatesPerFQDNSetFastPolicy -} - -func (r *dummyRateLimitConfig) LoadPolicies(contents []byte) error { - return nil // NOP - unrequired behaviour for this mock -} - -func parseAndMarshalIP(t *testing.T, ip string) []byte { - ipBytes, err := net.ParseIP(ip).MarshalText() - test.AssertNotError(t, err, "failed to marshal ip") - return ipBytes -} - -func newAcctKey(t *testing.T) []byte { - key := &jose.JSONWebKey{Key: testKey()} - acctKey, err := key.MarshalJSON() - test.AssertNotError(t, err, "failed to marshal account key") - return acctKey -} - -func initAuthorities(t *testing.T) (*DummyValidationAuthority, sapb.StorageAuthorityClient, *RegistrationAuthorityImpl, clock.FakeClock, func()) { +func initAuthorities(t *testing.T) (*DummyValidationAuthority, sapb.StorageAuthorityClient, *RegistrationAuthorityImpl, ratelimits.Source, clock.FakeClock, *corepb.Registration, func()) { err := json.Unmarshal(AccountKeyJSONA, &AccountKeyA) test.AssertNotError(t, err, "Failed to unmarshal public JWK") err = json.Unmarshal(AccountKeyJSONB, &AccountKeyB) @@ -303,29 +301,39 @@ func initAuthorities(t *testing.T) (*DummyValidationAuthority, sapb.StorageAutho fc := clock.NewFake() // Set to some non-zero time. - fc.Set(time.Date(2015, 3, 4, 5, 0, 0, 0, time.UTC)) + fc.Set(time.Date(2020, 3, 4, 5, 0, 0, 0, time.UTC)) - dbMap, err := sa.NewDbMap(vars.DBConnSA, sa.DbSettings{}) + dbMap, err := sa.DBMapForTest(vars.DBConnSA) if err != nil { t.Fatalf("Failed to create dbMap: %s", err) } - ssa, err := sa.NewSQLStorageAuthority(dbMap, dbMap, nil, nil, fc, log, metrics.NoopRegisterer, 1) + ssa, err := sa.NewSQLStorageAuthority(dbMap, dbMap, nil, 1, 0, fc, log, metrics.NoopRegisterer) if err != nil { t.Fatalf("Failed to create SA: %s", err) } sa := &isa.SA{Impl: ssa} - saDBCleanUp := test.ResetSATestDatabase(t) + saDBCleanUp := test.ResetBoulderTestDatabase(t) - va := &DummyValidationAuthority{request: make(chan *vapb.PerformValidationRequest, 1)} + dummyVA := &DummyValidationAuthority{ + doDCVRequest: make(chan *vapb.PerformValidationRequest, 1), + doCAARequest: make(chan *vapb.IsCAAValidRequest, 1), + } + va := va.RemoteClients{VAClient: dummyVA, CAAClient: dummyVA} - pa, err := policy.New(map[core.AcmeChallenge]bool{ - core.ChallengeTypeHTTP01: true, - core.ChallengeTypeDNS01: true, - }) + pa, err := policy.New( + map[identifier.IdentifierType]bool{ + identifier.TypeDNS: true, + identifier.TypeIP: true, + }, + map[core.AcmeChallenge]bool{ + core.ChallengeTypeHTTP01: true, + core.ChallengeTypeDNS01: true, + }, + blog.NewMock()) test.AssertNotError(t, err, "Couldn't create PA") - err = pa.SetHostnamePolicyFile("../test/hostname-policy.yaml") - test.AssertNotError(t, err, "Couldn't set hostname policy") + err = pa.LoadIdentPolicyFile("../test/ident-policy.yaml") + test.AssertNotError(t, err, "Couldn't set identifier policy") stats := metrics.NoopRegisterer @@ -339,31 +347,51 @@ func initAuthorities(t *testing.T) (*DummyValidationAuthority, sapb.StorageAutho block, _ := pem.Decode(CSRPEM) ExampleCSR, _ = x509.ParseCertificateRequest(block.Bytes) - initialIP, err := net.ParseIP("3.2.3.3").MarshalText() test.AssertNotError(t, err, "Couldn't create initial IP") - Registration, _ = ssa.NewRegistration(ctx, &corepb.Registration{ - Key: AccountKeyJSONA, - InitialIP: initialIP, - Status: string(core.StatusValid), + registration, err := sa.NewRegistration(ctx, &corepb.Registration{ + Key: AccountKeyJSONA, + Status: string(core.StatusValid), }) - - ctp := ctpolicy.New(&mocks.PublisherClient{}, nil, nil, log, metrics.NoopRegisterer) - - ra := NewRegistrationAuthorityImpl(fc, - log, - stats, - 1, testKeyPolicy, 100, true, 300*24*time.Hour, 7*24*time.Hour, nil, noopCAA{}, 0, ctp, nil, nil) + test.AssertNotError(t, err, "Failed to create initial registration") + + ctp := ctpolicy.New(&mocks.PublisherClient{}, loglist.List{ + {Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")}, + {Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1")}, + }, nil, nil, 0, log, metrics.NoopRegisterer) + + rlSource := ratelimits.NewInmemSource() + limiter, err := ratelimits.NewLimiter(fc, rlSource, stats) + test.AssertNotError(t, err, "making limiter") + txnBuilder, err := ratelimits.NewTransactionBuilderFromFiles("../test/config-next/ratelimit-defaults.yml", "", metrics.NoopRegisterer, log) + test.AssertNotError(t, err, "making transaction composer") + + testKeyPolicy, err := goodkey.NewPolicy(nil, nil) + test.AssertNotError(t, err, "making keypolicy") + + profiles := &validationProfiles{ + defaultName: "test", + byName: map[string]*validationProfile{"test": { + pendingAuthzLifetime: 7 * 24 * time.Hour, + validAuthzLifetime: 300 * 24 * time.Hour, + orderLifetime: 7 * 24 * time.Hour, + maxNames: 100, + identifierTypes: []identifier.IdentifierType{identifier.TypeDNS}, + }}, + } + + ra := NewRegistrationAuthorityImpl( + fc, log, stats, + 1, testKeyPolicy, limiter, txnBuilder, 100, + profiles, nil, 5*time.Minute, ctp, nil) ra.SA = sa ra.VA = va ra.CA = ca ra.PA = pa - ra.reuseValidAuthz = true - - return va, sa, ra, fc, cleanUp + return dummyVA, sa, ra, rlSource, fc, registration, cleanUp } func TestValidateContacts(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) + _, _, ra, _, _, _, cleanUp := initAuthorities(t) defer cleanUp() ansible := "ansible:earth.sol.milkyway.laniakea/letsencrypt" @@ -374,73 +402,78 @@ func TestValidateContacts(t *testing.T) { unparsable := "mailto:a@email.com, b@email.com" forbidden := "mailto:a@example.org" - err := ra.validateContacts(context.Background(), []string{}) + err := ra.validateContacts([]string{}) test.AssertNotError(t, err, "No Contacts") - err = ra.validateContacts(context.Background(), []string{validEmail, otherValidEmail}) + err = ra.validateContacts([]string{validEmail, otherValidEmail}) test.AssertError(t, err, "Too Many Contacts") - err = ra.validateContacts(context.Background(), []string{validEmail}) + err = ra.validateContacts([]string{validEmail}) test.AssertNotError(t, err, "Valid Email") - err = ra.validateContacts(context.Background(), []string{malformedEmail}) + err = ra.validateContacts([]string{malformedEmail}) test.AssertError(t, err, "Malformed Email") - err = ra.validateContacts(context.Background(), []string{ansible}) + err = ra.validateContacts([]string{ansible}) test.AssertError(t, err, "Unknown scheme") - err = ra.validateContacts(context.Background(), []string{""}) + err = ra.validateContacts([]string{""}) test.AssertError(t, err, "Empty URL") - err = ra.validateContacts(context.Background(), []string{nonASCII}) + err = ra.validateContacts([]string{nonASCII}) test.AssertError(t, err, "Non ASCII email") - err = ra.validateContacts(context.Background(), []string{unparsable}) + err = ra.validateContacts([]string{unparsable}) test.AssertError(t, err, "Unparsable email") - err = ra.validateContacts(context.Background(), []string{forbidden}) + err = ra.validateContacts([]string{forbidden}) test.AssertError(t, err, "Forbidden email") - err = ra.validateContacts(context.Background(), []string{"mailto:admin@localhost"}) + err = ra.validateContacts([]string{"mailto:admin@localhost"}) test.AssertError(t, err, "Forbidden email") - err = ra.validateContacts(context.Background(), []string{"mailto:admin@example.not.a.iana.suffix"}) + err = ra.validateContacts([]string{"mailto:admin@example.not.a.iana.suffix"}) test.AssertError(t, err, "Forbidden email") - err = ra.validateContacts(context.Background(), []string{"mailto:admin@1.2.3.4"}) + err = ra.validateContacts([]string{"mailto:admin@1.2.3.4"}) test.AssertError(t, err, "Forbidden email") - err = ra.validateContacts(context.Background(), []string{"mailto:admin@[1.2.3.4]"}) + err = ra.validateContacts([]string{"mailto:admin@[1.2.3.4]"}) test.AssertError(t, err, "Forbidden email") - err = ra.validateContacts(context.Background(), []string{"mailto:admin@a.com?no-reminder-emails"}) + err = ra.validateContacts([]string{"mailto:admin@a.com?no-reminder-emails"}) test.AssertError(t, err, "No hfields in email") + err = ra.validateContacts([]string{"mailto:example@a.com?"}) + test.AssertError(t, err, "No hfields in email") + + err = ra.validateContacts([]string{"mailto:example@a.com#"}) + test.AssertError(t, err, "No fragment") + + err = ra.validateContacts([]string{"mailto:example@a.com#optional"}) + test.AssertError(t, err, "No fragment") + // The registrations.contact field is VARCHAR(191). 175 'a' characters plus // the prefix "mailto:" and the suffix "@a.com" makes exactly 191 bytes of // encoded JSON. The correct size to hit our maximum DB field length. var longStringBuf strings.Builder longStringBuf.WriteString("mailto:") - for i := 0; i < 175; i++ { + for range 175 { longStringBuf.WriteRune('a') } longStringBuf.WriteString("@a.com") - err = ra.validateContacts(context.Background(), []string{longStringBuf.String()}) + err = ra.validateContacts([]string{longStringBuf.String()}) test.AssertError(t, err, "Too long contacts") } func TestNewRegistration(t *testing.T) { - _, sa, ra, _, cleanUp := initAuthorities(t) + _, sa, ra, _, _, _, cleanUp := initAuthorities(t) defer cleanUp() - mailto := "mailto:foo@letsencrypt.org" acctKeyB, err := AccountKeyB.MarshalJSON() test.AssertNotError(t, err, "failed to marshal account key") input := &corepb.Registration{ - Contact: []string{mailto}, - ContactsPresent: true, - Key: acctKeyB, - InitialIP: parseAndMarshalIP(t, "7.6.6.5"), + Key: acctKeyB, } result, err := ra.NewRegistration(ctx, input) @@ -448,8 +481,6 @@ func TestNewRegistration(t *testing.T) { t.Fatalf("could not create new registration: %s", err) } test.AssertByteEquals(t, result.Key, acctKeyB) - test.Assert(t, len(result.Contact) == 1, "Wrong number of contacts") - test.Assert(t, mailto == (result.Contact)[0], "Contact didn't match") test.Assert(t, result.Agreement == "", "Agreement didn't default empty") reg, err := sa.GetRegistration(ctx, &sapb.RegistrationID{Id: result.Id}) @@ -457,72 +488,8 @@ func TestNewRegistration(t *testing.T) { test.AssertByteEquals(t, reg.Key, acctKeyB) } -func TestNewRegistrationContactsPresent(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) - defer cleanUp() - testCases := []struct { - Name string - Reg *corepb.Registration - ExpectedErr error - }{ - { - Name: "No contacts provided by client ContactsPresent false", - Reg: &corepb.Registration{ - Key: newAcctKey(t), - InitialIP: parseAndMarshalIP(t, "7.6.6.5"), - }, - ExpectedErr: nil, - }, - { - Name: "Empty contact provided by client ContactsPresent true", - Reg: &corepb.Registration{ - Contact: []string{}, - ContactsPresent: true, - Key: newAcctKey(t), - InitialIP: parseAndMarshalIP(t, "7.6.6.4"), - }, - ExpectedErr: nil, - }, - { - Name: "Valid contact provided by client ContactsPresent true", - Reg: &corepb.Registration{ - Contact: []string{"mailto:foo@letsencrypt.org"}, - ContactsPresent: true, - Key: newAcctKey(t), - InitialIP: parseAndMarshalIP(t, "7.6.4.3"), - }, - ExpectedErr: nil, - }, - { - Name: "Valid contact provided by client ContactsPresent false", - Reg: &corepb.Registration{ - Contact: []string{"mailto:foo@letsencrypt.org"}, - ContactsPresent: false, - Key: newAcctKey(t), - InitialIP: parseAndMarshalIP(t, "7.6.6.2"), - }, - ExpectedErr: fmt.Errorf("account contacts present but contactsPresent false"), - }, - } - // For each test case we check that the NewRegistration works as - // intended with variations of Contact and ContactsPresent fields - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - // Create new registration - _, err := ra.NewRegistration(ctx, tc.Reg) - // Check error output - if tc.ExpectedErr == nil { - test.AssertNotError(t, err, "expected no error for NewRegistration") - } else { - test.AssertError(t, err, "expected error for NewRegistration") - test.AssertEquals(t, err.Error(), tc.ExpectedErr.Error()) - } - }) - } -} - type mockSAFailsNewRegistration struct { - mocks.StorageAuthority + sapb.StorageAuthorityClient } func (sa *mockSAFailsNewRegistration) NewRegistration(_ context.Context, _ *corepb.Registration, _ ...grpc.CallOption) (*corepb.Registration, error) { @@ -530,16 +497,13 @@ func (sa *mockSAFailsNewRegistration) NewRegistration(_ context.Context, _ *core } func TestNewRegistrationSAFailure(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) + _, _, ra, _, _, _, cleanUp := initAuthorities(t) defer cleanUp() ra.SA = &mockSAFailsNewRegistration{} acctKeyB, err := AccountKeyB.MarshalJSON() test.AssertNotError(t, err, "failed to marshal account key") input := corepb.Registration{ - Contact: []string{"mailto:test@example.com"}, - ContactsPresent: true, - Key: acctKeyB, - InitialIP: parseAndMarshalIP(t, "7.6.6.5"), + Key: acctKeyB, } result, err := ra.NewRegistration(ctx, &input) if err == nil { @@ -548,18 +512,14 @@ func TestNewRegistrationSAFailure(t *testing.T) { } func TestNewRegistrationNoFieldOverwrite(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) + _, _, ra, _, _, _, cleanUp := initAuthorities(t) defer cleanUp() - mailto := "mailto:foo@letsencrypt.org" acctKeyC, err := AccountKeyC.MarshalJSON() test.AssertNotError(t, err, "failed to marshal account key") input := &corepb.Registration{ - Id: 23, - Key: acctKeyC, - Contact: []string{mailto}, - ContactsPresent: true, - Agreement: "I agreed", - InitialIP: parseAndMarshalIP(t, "5.0.5.0"), + Id: 23, + Key: acctKeyC, + Agreement: "I agreed", } result, err := ra.NewRegistration(ctx, input) @@ -570,151 +530,22 @@ func TestNewRegistrationNoFieldOverwrite(t *testing.T) { } func TestNewRegistrationBadKey(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) + _, _, ra, _, _, _, cleanUp := initAuthorities(t) defer cleanUp() - mailto := "mailto:foo@letsencrypt.org" shortKey, err := ShortKey.MarshalJSON() test.AssertNotError(t, err, "failed to marshal account key") input := &corepb.Registration{ - Contact: []string{mailto}, - ContactsPresent: true, - Key: shortKey, + Key: shortKey, } _, err = ra.NewRegistration(ctx, input) test.AssertError(t, err, "Should have rejected authorization with short key") } -// testKey returns a random 2048 bit RSA public key for test registrations -func testKey() *rsa.PublicKey { - key, _ := rsa.GenerateKey(rand.Reader, 2048) - return &key.PublicKey -} - -func TestNewRegistrationRateLimit(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) - defer cleanUp() - - // Specify a dummy rate limit policy that allows 1 registration per exact IP - // match, and 2 per range. - ra.rlPolicies = &dummyRateLimitConfig{ - RegistrationsPerIPPolicy: ratelimit.RateLimitPolicy{ - Threshold: 1, - Window: cmd.ConfigDuration{Duration: 24 * 90 * time.Hour}, - }, - RegistrationsPerIPRangePolicy: ratelimit.RateLimitPolicy{ - Threshold: 2, - Window: cmd.ConfigDuration{Duration: 24 * 90 * time.Hour}, - }, - } - - // Create one registration for an IPv4 address - mailto := "mailto:foo@letsencrypt.org" - reg := &corepb.Registration{ - Contact: []string{mailto}, - ContactsPresent: true, - Key: newAcctKey(t), - InitialIP: parseAndMarshalIP(t, "7.6.6.5"), - } - // There should be no errors - it is within the RegistrationsPerIP rate limit - _, err := ra.NewRegistration(ctx, reg) - test.AssertNotError(t, err, "Unexpected error adding new IPv4 registration") - - // Create another registration for the same IPv4 address by changing the key - reg.Key = newAcctKey(t) - - // There should be an error since a 2nd registration will exceed the - // RegistrationsPerIP rate limit - _, err = ra.NewRegistration(ctx, reg) - test.AssertError(t, err, "No error adding duplicate IPv4 registration") - test.AssertEquals(t, err.Error(), "too many registrations for this IP: see https://letsencrypt.org/docs/rate-limits/") - - // Create a registration for an IPv6 address - reg.Key = newAcctKey(t) - reg.InitialIP = parseAndMarshalIP(t, "2001:cdba:1234:5678:9101:1121:3257:9652") - - // There should be no errors - it is within the RegistrationsPerIP rate limit - _, err = ra.NewRegistration(ctx, reg) - test.AssertNotError(t, err, "Unexpected error adding a new IPv6 registration") - - // Create a 2nd registration for the IPv6 address by changing the key - reg.Key = newAcctKey(t) - - // There should be an error since a 2nd reg for the same IPv6 address will - // exceed the RegistrationsPerIP rate limit - _, err = ra.NewRegistration(ctx, reg) - test.AssertError(t, err, "No error adding duplicate IPv6 registration") - test.AssertEquals(t, err.Error(), "too many registrations for this IP: see https://letsencrypt.org/docs/rate-limits/") - - // Create a registration for an IPv6 address in the same /48 - reg.Key = newAcctKey(t) - reg.InitialIP = parseAndMarshalIP(t, "2001:cdba:1234:5678:9101:1121:3257:9653") - - // There should be no errors since two IPv6 addresses in the same /48 is - // within the RegistrationsPerIPRange limit - _, err = ra.NewRegistration(ctx, reg) - test.AssertNotError(t, err, "Unexpected error adding second IPv6 registration in the same /48") - - // Create a registration for yet another IPv6 address in the same /48 - reg.Key = newAcctKey(t) - reg.InitialIP = parseAndMarshalIP(t, "2001:cdba:1234:5678:9101:1121:3257:9654") - - // There should be an error since three registrations within the same IPv6 - // /48 is outside of the RegistrationsPerIPRange limit - _, err = ra.NewRegistration(ctx, reg) - test.AssertError(t, err, "No error adding a third IPv6 registration in the same /48") - test.AssertEquals(t, err.Error(), "too many registrations for this IP range: see https://letsencrypt.org/docs/rate-limits/") -} - -type NoUpdateSA struct { - mocks.StorageAuthority -} - -func (sa NoUpdateSA) UpdateRegistration(_ context.Context, _ *corepb.Registration, _ ...grpc.CallOption) (*emptypb.Empty, error) { - return nil, fmt.Errorf("UpdateRegistration() is mocked to always error") -} - -func TestUpdateRegistrationSame(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) - defer cleanUp() - mailto := "mailto:foo@letsencrypt.org" - - // Make a new registration with AccountKeyC and a Contact - acctKeyC, err := AccountKeyC.MarshalJSON() - test.AssertNotError(t, err, "failed to marshal account key") - reg := &corepb.Registration{ - Key: acctKeyC, - Contact: []string{mailto}, - ContactsPresent: true, - Agreement: "I agreed", - InitialIP: parseAndMarshalIP(t, "5.0.5.0"), - } - result, err := ra.NewRegistration(ctx, reg) - test.AssertNotError(t, err, "Could not create new registration") - - // Switch to a mock SA that will always error if UpdateRegistration() is called - ra.SA = &NoUpdateSA{} - - // Make an update to the registration with the same Contact & Agreement values. - updateSame := &corepb.Registration{ - Id: result.Id, - Key: acctKeyC, - Contact: []string{mailto}, - ContactsPresent: true, - Agreement: "I agreed", - } - - // The update operation should *not* error, even with the NoUpdateSA because - // UpdateRegistration() should not be called when the update content doesn't - // actually differ from the existing content - _, err = ra.UpdateRegistration(ctx, &rapb.UpdateRegistrationRequest{Base: result, Update: updateSame}) - test.AssertNotError(t, err, "Error updating registration") -} - func TestPerformValidationExpired(t *testing.T) { - _, sa, ra, fc, cleanUp := initAuthorities(t) + _, sa, ra, _, fc, registration, cleanUp := initAuthorities(t) defer cleanUp() - authz := createPendingAuthorization(t, sa, Identifier, fc.Now().Add(-2*time.Hour)) + authz := createPendingAuthorization(t, sa, registration.Id, identifier.NewDNS("example.com"), fc.Now().Add(-2*time.Hour)) _, err := ra.PerformValidation(ctx, &rapb.PerformValidationRequest{ Authz: authz, @@ -724,16 +555,15 @@ func TestPerformValidationExpired(t *testing.T) { } func TestPerformValidationAlreadyValid(t *testing.T) { - va, _, ra, _, cleanUp := initAuthorities(t) + va, _, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() - ra.reuseValidAuthz = false // Create a finalized authorization exp := ra.clk.Now().Add(365 * 24 * time.Hour) authz := core.Authorization{ ID: "1337", - Identifier: identifier.DNSIdentifier("not-example.com"), - RegistrationID: 1, + Identifier: identifier.NewDNS("not-example.com"), + RegistrationID: registration.Id, Status: "valid", Expires: &exp, Challenges: []core.Challenge{ @@ -747,7 +577,7 @@ func TestPerformValidationAlreadyValid(t *testing.T) { authzPB, err := bgrpc.AuthzToPB(authz) test.AssertNotError(t, err, "bgrpc.AuthzToPB failed") - va.ResultReturn = &vapb.ValidationResult{ + va.doDCVResult = &vapb.ValidationResult{ Records: []*corepb.ValidationRecord{ { AddressUsed: []byte("192.168.0.1"), @@ -756,87 +586,264 @@ func TestPerformValidationAlreadyValid(t *testing.T) { Url: "http://example.com/", }, }, - Problems: nil, + Problem: nil, } + va.doCAAResponse = &vapb.IsCAAValidResponse{Problem: nil} - // A subsequent call to perform validation should return the expected error - _, err = ra.PerformValidation(ctx, &rapb.PerformValidationRequest{ + // A subsequent call to perform validation should return nil due + // to being short-circuited because of valid authz reuse. + val, err := ra.PerformValidation(ctx, &rapb.PerformValidationRequest{ Authz: authzPB, ChallengeIndex: int64(ResponseIndex), }) - test.AssertErrorIs(t, err, berrors.Malformed) + test.Assert(t, core.AcmeStatus(val.Status) == core.StatusValid, "Validation should have been valid") + test.AssertNotError(t, err, "Error was not nil, but should have been nil") } func TestPerformValidationSuccess(t *testing.T) { - va, sa, ra, fc, cleanUp := initAuthorities(t) + va, sa, ra, _, fc, registration, cleanUp := initAuthorities(t) + defer cleanUp() + + idents := identifier.ACMEIdentifiers{ + identifier.NewDNS("example.com"), + identifier.NewIP(netip.MustParseAddr("192.168.0.1")), + } + + for _, ident := range idents { + // We know this is OK because of TestNewAuthorization + authzPB := createPendingAuthorization(t, sa, registration.Id, ident, fc.Now().Add(12*time.Hour)) + + va.doDCVResult = &vapb.ValidationResult{ + Records: []*corepb.ValidationRecord{ + { + AddressUsed: []byte("192.168.0.1"), + Hostname: "example.com", + Port: "8080", + Url: "http://example.com/", + ResolverAddrs: []string{"rebound"}, + }, + }, + Problem: nil, + } + va.doCAAResponse = &vapb.IsCAAValidResponse{Problem: nil} + + now := fc.Now() + challIdx := dnsChallIdx(t, authzPB.Challenges) + authzPB, err := ra.PerformValidation(ctx, &rapb.PerformValidationRequest{ + Authz: authzPB, + ChallengeIndex: challIdx, + }) + test.AssertNotError(t, err, "PerformValidation failed") + + var vaRequest *vapb.PerformValidationRequest + select { + case r := <-va.doDCVRequest: + vaRequest = r + case <-time.After(time.Second): + t.Fatal("Timed out waiting for DummyValidationAuthority.PerformValidation to complete") + } + + // Verify that the VA got the request, and it's the same as the others + test.AssertEquals(t, authzPB.Challenges[challIdx].Type, vaRequest.Challenge.Type) + test.AssertEquals(t, authzPB.Challenges[challIdx].Token, vaRequest.Challenge.Token) + + // Sleep so the RA has a chance to write to the SA + time.Sleep(100 * time.Millisecond) + + dbAuthzPB := getAuthorization(t, authzPB.Id, sa) + t.Log("dbAuthz:", dbAuthzPB) + + // Verify that the responses are reflected + challIdx = dnsChallIdx(t, dbAuthzPB.Challenges) + challenge, err := bgrpc.PBToChallenge(dbAuthzPB.Challenges[challIdx]) + test.AssertNotError(t, err, "Failed to marshall corepb.Challenge to core.Challenge.") + + test.AssertNotNil(t, vaRequest.Challenge, "Request passed to VA has no challenge") + test.Assert(t, challenge.Status == core.StatusValid, "challenge was not marked as valid") + + // The DB authz's expiry should be equal to the current time plus the + // configured authorization lifetime + test.AssertEquals(t, dbAuthzPB.Expires.AsTime(), now.Add(ra.profiles.def().validAuthzLifetime)) + + // Check that validated timestamp was recorded, stored, and retrieved + expectedValidated := fc.Now() + test.AssertEquals(t, *challenge.Validated, expectedValidated) + } +} + +// mockSAWithSyncPause is a mock sapb.StorageAuthorityClient that forwards all +// method calls to an inner SA, but also performs a blocking write to a channel +// when PauseIdentifiers is called to allow the tests to synchronize. +type mockSAWithSyncPause struct { + sapb.StorageAuthorityClient + out chan<- *sapb.PauseRequest +} + +func (msa mockSAWithSyncPause) PauseIdentifiers(ctx context.Context, req *sapb.PauseRequest, _ ...grpc.CallOption) (*sapb.PauseIdentifiersResponse, error) { + res, err := msa.StorageAuthorityClient.PauseIdentifiers(ctx, req) + msa.out <- req + return res, err +} + +func TestPerformValidation_FailedValidationsTriggerPauseIdentifiersRatelimit(t *testing.T) { + va, sa, ra, rl, fc, registration, cleanUp := initAuthorities(t) defer cleanUp() - // We know this is OK because of TestNewAuthorization - authzPB := createPendingAuthorization(t, sa, Identifier, fc.Now().Add(12*time.Hour)) + features.Set(features.Config{AutomaticallyPauseZombieClients: true}) + defer features.Reset() + + // Replace the SA with one that will block when PauseIdentifiers is called. + pauseChan := make(chan *sapb.PauseRequest) + defer close(pauseChan) + ra.SA = mockSAWithSyncPause{ + StorageAuthorityClient: ra.SA, + out: pauseChan, + } + + // Set the default ratelimits to only allow one failed validation per 24 + // hours before pausing. + txnBuilder, err := ratelimits.NewTransactionBuilder(ratelimits.LimitConfigs{ + ratelimits.FailedAuthorizationsForPausingPerDomainPerAccount.String(): &ratelimits.LimitConfig{ + Burst: 1, + Count: 1, + Period: config.Duration{Duration: time.Hour * 24}}, + }, nil, metrics.NoopRegisterer, blog.NewMock()) + test.AssertNotError(t, err, "making transaction composer") + ra.txnBuilder = txnBuilder + + // Set up a fake domain, authz, and bucket key to care about. + domain := randomDomain() + ident := identifier.NewDNS(domain) + authzPB := createPendingAuthorization(t, sa, registration.Id, ident, fc.Now().Add(12*time.Hour)) + bucketKey, err := ratelimits.BuildBucketKey(ratelimits.FailedAuthorizationsForPausingPerDomainPerAccount, authzPB.RegistrationID, ident, nil, netip.Addr{}) + test.AssertNotError(t, err, "building bucket key") + + // Set the stored TAT to indicate that this bucket has exhausted its quota. + err = rl.BatchSet(context.Background(), map[string]time.Time{ + bucketKey: fc.Now().Add(25 * time.Hour), + }) + test.AssertNotError(t, err, "updating rate limit bucket") - va.ResultReturn = &vapb.ValidationResult{ + // Now a failed validation should result in the identifier being paused + // due to the strict ratelimit. + va.doDCVResult = &vapb.ValidationResult{ Records: []*corepb.ValidationRecord{ { - AddressUsed: []byte("192.168.0.1"), - Hostname: "example.com", - Port: "8080", - Url: "http://example.com/", + AddressUsed: []byte("192.168.0.1"), + Hostname: domain, + Port: "8080", + Url: fmt.Sprintf("http://%s/", domain), + ResolverAddrs: []string{"rebound"}, }, }, - Problems: nil, + Problem: nil, + } + va.doCAAResponse = &vapb.IsCAAValidResponse{ + Problem: &corepb.ProblemDetails{ + Detail: fmt.Sprintf("CAA invalid for %s", domain), + }, } - challIdx := challTypeIndex(t, authzPB.Challenges, core.ChallengeTypeDNS01) - authzPB, err := ra.PerformValidation(ctx, &rapb.PerformValidationRequest{ + _, err = ra.PerformValidation(ctx, &rapb.PerformValidationRequest{ Authz: authzPB, - ChallengeIndex: challIdx, + ChallengeIndex: dnsChallIdx(t, authzPB.Challenges), }) test.AssertNotError(t, err, "PerformValidation failed") - var vaRequest *vapb.PerformValidationRequest - select { - case r := <-va.request: - vaRequest = r - case <-time.After(time.Second): - t.Fatal("Timed out waiting for DummyValidationAuthority.PerformValidation to complete") + // Wait for the RA to finish processing the validation, and ensure that the paused + // account+identifier is what we expect. + paused := <-pauseChan + test.AssertEquals(t, len(paused.Identifiers), 1) + test.AssertEquals(t, paused.Identifiers[0].Value, domain) +} + +// mockRLSourceWithSyncDelete is a mock ratelimits.Source that forwards all +// method calls to an inner Source, but also performs a blocking write to a +// channel when BatchDelete is called to allow the tests to synchronize. +type mockRLSourceWithSyncDelete struct { + ratelimits.Source + out chan<- string +} + +func (rl mockRLSourceWithSyncDelete) BatchDelete(ctx context.Context, bucketKeys []string) error { + err := rl.Source.BatchDelete(ctx, bucketKeys) + for _, bucketKey := range bucketKeys { + rl.out <- bucketKey } + return err +} - // Verify that the VA got the request, and it's the same as the others - test.AssertEquals(t, string(authzPB.Challenges[challIdx].Type), vaRequest.Challenge.Type) - test.AssertEquals(t, authzPB.Challenges[challIdx].Token, vaRequest.Challenge.Token) +func TestPerformValidation_FailedThenSuccessfulValidationResetsPauseIdentifiersRatelimit(t *testing.T) { + va, sa, ra, rl, fc, registration, cleanUp := initAuthorities(t) + defer cleanUp() - // Sleep so the RA has a chance to write to the SA - time.Sleep(100 * time.Millisecond) + features.Set(features.Config{AutomaticallyPauseZombieClients: true}) + defer features.Reset() - dbAuthzPB := getAuthorization(t, authzPB.Id, sa) - t.Log("dbAuthz:", dbAuthzPB) + // Replace the rate limit source with one that will block when Delete is called. + keyChan := make(chan string) + defer close(keyChan) + limiter, err := ratelimits.NewLimiter(fc, mockRLSourceWithSyncDelete{ + Source: rl, + out: keyChan, + }, metrics.NoopRegisterer) + test.AssertNotError(t, err, "creating mock limiter") + ra.limiter = limiter + + // Set up a fake domain, authz, and bucket key to care about. + domain := randomDomain() + ident := identifier.NewDNS(domain) + authzPB := createPendingAuthorization(t, sa, registration.Id, ident, fc.Now().Add(12*time.Hour)) + bucketKey, err := ratelimits.BuildBucketKey(ratelimits.FailedAuthorizationsForPausingPerDomainPerAccount, authzPB.RegistrationID, ident, nil, netip.Addr{}) + test.AssertNotError(t, err, "building bucket key") + + // Set a stored TAT so that we can tell when it's been reset. + err = rl.BatchSet(context.Background(), map[string]time.Time{ + bucketKey: fc.Now().Add(25 * time.Hour), + }) + test.AssertNotError(t, err, "updating rate limit bucket") - // Verify that the responses are reflected - challIdx = challTypeIndex(t, dbAuthzPB.Challenges, core.ChallengeTypeDNS01) - challenge, err := bgrpc.PBToChallenge(dbAuthzPB.Challenges[challIdx]) - test.AssertNotError(t, err, "Failed to marshall corepb.Challenge to core.Challenge.") + va.doDCVResult = &vapb.ValidationResult{ + Records: []*corepb.ValidationRecord{ + { + AddressUsed: []byte("192.168.0.1"), + Hostname: domain, + Port: "8080", + Url: fmt.Sprintf("http://%s/", domain), + ResolverAddrs: []string{"rebound"}, + }, + }, + Problem: nil, + } + va.doCAAResponse = &vapb.IsCAAValidResponse{Problem: nil} - test.AssertNotNil(t, vaRequest.Challenge, "Request passed to VA has no challenge") - test.Assert(t, challenge.Status == core.StatusValid, "challenge was not marked as valid") + _, err = ra.PerformValidation(ctx, &rapb.PerformValidationRequest{ + Authz: authzPB, + ChallengeIndex: dnsChallIdx(t, authzPB.Challenges), + }) + test.AssertNotError(t, err, "PerformValidation failed") - // The DB authz's expiry should be equal to the current time plus the - // configured authorization lifetime - test.AssertEquals(t, time.Unix(0, dbAuthzPB.Expires).String(), fc.Now().Add(ra.authorizationLifetime).String()) + // Wait for the RA to finish processesing the validation, and ensure that + // the reset bucket key is what we expect. + reset := <-keyChan + test.AssertEquals(t, reset, bucketKey) - // Check that validated timestamp was recorded, stored, and retrieved - expectedValidated := fc.Now() - test.Assert(t, *challenge.Validated == expectedValidated, "Validated timestamp incorrect or missing") + // Verify that the bucket no longer exists (because the limiter reset has + // deleted it). This indicates the accountID:identifier bucket has regained + // capacity avoiding being inadvertently paused. + _, err = rl.Get(ctx, bucketKey) + test.AssertErrorIs(t, err, ratelimits.ErrBucketNotFound) } func TestPerformValidationVAError(t *testing.T) { - va, sa, ra, fc, cleanUp := initAuthorities(t) + va, sa, ra, _, fc, registration, cleanUp := initAuthorities(t) defer cleanUp() - authzPB := createPendingAuthorization(t, sa, Identifier, fc.Now().Add(12*time.Hour)) + authzPB := createPendingAuthorization(t, sa, registration.Id, identifier.NewDNS("example.com"), fc.Now().Add(12*time.Hour)) - va.ResultError = fmt.Errorf("Something went wrong") + va.doDCVError = fmt.Errorf("Something went wrong") - challIdx := challTypeIndex(t, authzPB.Challenges, core.ChallengeTypeDNS01) + challIdx := dnsChallIdx(t, authzPB.Challenges) authzPB, err := ra.PerformValidation(ctx, &rapb.PerformValidationRequest{ Authz: authzPB, ChallengeIndex: challIdx, @@ -846,14 +853,14 @@ func TestPerformValidationVAError(t *testing.T) { var vaRequest *vapb.PerformValidationRequest select { - case r := <-va.request: + case r := <-va.doDCVRequest: vaRequest = r case <-time.After(time.Second): t.Fatal("Timed out waiting for DummyValidationAuthority.PerformValidation to complete") } // Verify that the VA got the request, and it's the same as the others - test.AssertEquals(t, string(authzPB.Challenges[challIdx].Type), vaRequest.Challenge.Type) + test.AssertEquals(t, authzPB.Challenges[challIdx].Type, vaRequest.Challenge.Type) test.AssertEquals(t, authzPB.Challenges[challIdx].Token, vaRequest.Challenge.Token) // Sleep so the RA has a chance to write to the SA @@ -863,31 +870,33 @@ func TestPerformValidationVAError(t *testing.T) { t.Log("dbAuthz:", dbAuthzPB) // Verify that the responses are reflected - challIdx = challTypeIndex(t, dbAuthzPB.Challenges, core.ChallengeTypeDNS01) + challIdx = dnsChallIdx(t, dbAuthzPB.Challenges) challenge, err := bgrpc.PBToChallenge(dbAuthzPB.Challenges[challIdx]) test.AssertNotError(t, err, "Failed to marshall corepb.Challenge to core.Challenge.") test.Assert(t, challenge.Status == core.StatusInvalid, "challenge was not marked as invalid") - test.AssertContains(t, challenge.Error.Error(), "Could not communicate with VA") + test.AssertContains(t, challenge.Error.String(), "Could not communicate with VA") test.Assert(t, challenge.ValidationRecord == nil, "challenge had a ValidationRecord") // Check that validated timestamp was recorded, stored, and retrieved expectedValidated := fc.Now() - test.Assert(t, *challenge.Validated == expectedValidated, "Validated timestamp incorrect or missing") + test.AssertEquals(t, *challenge.Validated, expectedValidated) } func TestCertificateKeyNotEqualAccountKey(t *testing.T) { - _, sa, ra, _, cleanUp := initAuthorities(t) + _, sa, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() exp := ra.clk.Now().Add(365 * 24 * time.Hour) - authzID := createFinalizedAuthorization(t, sa, "www.example.com", exp, "valid", ra.clk.Now()) + authzID := createFinalizedAuthorization(t, sa, registration.Id, identifier.NewDNS("www.example.com"), exp, core.ChallengeTypeHTTP01, ra.clk.Now()) - order, err := sa.NewOrder(context.Background(), &sapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Expires: exp.UnixNano(), - Names: []string{"www.example.com"}, - V2Authorizations: []int64{authzID}, + order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: registration.Id, + Expires: timestamppb.New(exp), + Identifiers: []*corepb.Identifier{identifier.NewDNS("www.example.com").ToProto()}, + V2Authorizations: []int64{authzID}, + }, }) test.AssertNotError(t, err, "Could not add test order with finalized authz IDs, ready status") @@ -902,9 +911,9 @@ func TestCertificateKeyNotEqualAccountKey(t *testing.T) { _, err = ra.FinalizeOrder(ctx, &rapb.FinalizeOrderRequest{ Order: &corepb.Order{ Status: string(core.StatusReady), - Names: []string{"www.example.com"}, + Identifiers: []*corepb.Identifier{identifier.NewDNS("www.example.com").ToProto()}, Id: order.Id, - RegistrationID: Registration.Id, + RegistrationID: registration.Id, }, Csr: csrBytes, }) @@ -912,644 +921,114 @@ func TestCertificateKeyNotEqualAccountKey(t *testing.T) { test.AssertEquals(t, err.Error(), "certificate public key must be different than account key") } -func TestNewOrderRateLimiting(t *testing.T) { - _, _, ra, fc, cleanUp := initAuthorities(t) +func TestDeactivateAuthorization(t *testing.T) { + _, sa, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() - ra.orderLifetime = 5 * 24 * time.Hour - - rateLimitDuration := 5 * time.Minute - // Create a dummy rate limit config that sets a NewOrdersPerAccount rate - // limit with a very low threshold/short window - ra.rlPolicies = &dummyRateLimitConfig{ - NewOrdersPerAccountPolicy: ratelimit.RateLimitPolicy{ - Threshold: 1, - Window: cmd.ConfigDuration{Duration: rateLimitDuration}, - }, - } - - orderOne := &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{"first.example.com"}, - } - orderTwo := &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{"second.example.com"}, - } - - // To start, it should be possible to create a new order - _, err := ra.NewOrder(ctx, orderOne) - test.AssertNotError(t, err, "NewOrder for orderOne failed") - - // Advance the clock 1s to separate the orders in time - fc.Add(time.Second) + exp := ra.clk.Now().Add(365 * 24 * time.Hour) + authzID := createFinalizedAuthorization(t, sa, registration.Id, identifier.NewDNS("not-example.com"), exp, core.ChallengeTypeHTTP01, ra.clk.Now()) + dbAuthzPB := getAuthorization(t, fmt.Sprint(authzID), sa) + _, err := ra.DeactivateAuthorization(ctx, dbAuthzPB) + test.AssertNotError(t, err, "Could not deactivate authorization") + deact, err := sa.GetAuthorization2(ctx, &sapb.AuthorizationID2{Id: authzID}) + test.AssertNotError(t, err, "Could not get deactivated authorization with ID "+dbAuthzPB.Id) + test.AssertEquals(t, deact.Status, string(core.StatusDeactivated)) +} - // Creating an order immediately after the first with different names - // should fail - _, err = ra.NewOrder(ctx, orderTwo) - test.AssertError(t, err, "NewOrder for orderTwo succeeded, should have been ratelimited") +type mockSARecordingPauses struct { + sapb.StorageAuthorityClient + recv *sapb.PauseRequest +} - // Creating the first order again should succeed because of order reuse, no - // new pending order is produced. - _, err = ra.NewOrder(ctx, orderOne) - test.AssertNotError(t, err, "Reuse of orderOne failed") +func (sa *mockSARecordingPauses) PauseIdentifiers(ctx context.Context, req *sapb.PauseRequest, _ ...grpc.CallOption) (*sapb.PauseIdentifiersResponse, error) { + sa.recv = req + return &sapb.PauseIdentifiersResponse{Paused: int64(len(req.Identifiers))}, nil +} - // Advancing the clock by 2 * the rate limit duration should allow orderTwo to - // succeed - fc.Add(2 * rateLimitDuration) - _, err = ra.NewOrder(ctx, orderTwo) - test.AssertNotError(t, err, "NewOrder for orderTwo failed after advancing clock") +func (sa *mockSARecordingPauses) DeactivateAuthorization2(_ context.Context, _ *sapb.AuthorizationID2, _ ...grpc.CallOption) (*emptypb.Empty, error) { + return nil, nil } -// TestEarlyOrderRateLimiting tests that NewOrder applies the certificates per -// name/per FQDN rate limits against the order names. -func TestEarlyOrderRateLimiting(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) +func TestDeactivateAuthorization_Pausing(t *testing.T) { + _, _, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() - ra.orderLifetime = 5 * 24 * time.Hour - - rateLimitDuration := 5 * time.Minute - - domain := "early-ratelimit-example.com" - - // Set a mock RL policy with a CertificatesPerName threshold for the domain - // name so low if it were enforced it would prevent a new order for any names. - ra.rlPolicies = &dummyRateLimitConfig{ - CertificatesPerNamePolicy: ratelimit.RateLimitPolicy{ - Threshold: 10, - Window: cmd.ConfigDuration{Duration: rateLimitDuration}, - // Setting the Threshold to 0 skips applying the rate limit. Setting an - // override to 0 does the trick. - Overrides: map[string]int64{ - domain: 0, - }, - }, - NewOrdersPerAccountPolicy: ratelimit.RateLimitPolicy{ - Threshold: 10, - Window: cmd.ConfigDuration{Duration: rateLimitDuration}, - }, - } - // Request an order for the test domain - newOrder := &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{domain}, + if ra.limiter == nil { + t.Skip("no redis limiter configured") } - // With the feature flag enabled the NewOrder request should fail because of - // the CertificatesPerNamePolicy. - _, err := ra.NewOrder(ctx, newOrder) - test.AssertError(t, err, "NewOrder did not apply cert rate limits with feature flag enabled") + msa := mockSARecordingPauses{} + ra.SA = &msa - // The err should be the expected rate limit error - expectedErrPrefix := "too many certificates already issued for: " + - "early-ratelimit-example.com" - test.Assert(t, - strings.HasPrefix(err.Error(), expectedErrPrefix), - fmt.Sprintf("expected error to have prefix %q got %q", expectedErrPrefix, err)) -} + features.Set(features.Config{AutomaticallyPauseZombieClients: true}) + defer features.Reset() -func TestAuthzFailedRateLimitingNewOrder(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) - defer cleanUp() - - ra.rlPolicies = &dummyRateLimitConfig{ - InvalidAuthorizationsPerAccountPolicy: ratelimit.RateLimitPolicy{ - Threshold: 1, - Window: cmd.ConfigDuration{Duration: 1 * time.Hour}, - }, - } - - testcase := func() { - ra.SA = &mockInvalidAuthorizationsAuthority{domainWithFailures: "all.i.do.is.lose.com"} - err := ra.checkInvalidAuthorizationLimits(ctx, Registration.Id, - []string{"charlie.brown.com", "all.i.do.is.lose.com"}) - test.AssertError(t, err, "checkInvalidAuthorizationLimits did not encounter expected rate limit error") - test.AssertEquals(t, err.Error(), "too many failed authorizations recently: see https://letsencrypt.org/docs/rate-limits/") - } - - testcase() -} - -func TestDomainsForRateLimiting(t *testing.T) { - domains, err := domainsForRateLimiting([]string{}) - test.AssertNotError(t, err, "failed on empty") - test.AssertEquals(t, len(domains), 0) - - domains, err = domainsForRateLimiting([]string{"www.example.com", "example.com"}) - test.AssertNotError(t, err, "failed on example.com") - test.AssertDeepEquals(t, domains, []string{"example.com"}) - - domains, err = domainsForRateLimiting([]string{"www.example.com", "example.com", "www.example.co.uk"}) - test.AssertNotError(t, err, "failed on example.co.uk") - test.AssertDeepEquals(t, domains, []string{"example.co.uk", "example.com"}) - - domains, err = domainsForRateLimiting([]string{"www.example.com", "example.com", "www.example.co.uk", "co.uk"}) - test.AssertNotError(t, err, "should not fail on public suffix") - test.AssertDeepEquals(t, domains, []string{"co.uk", "example.co.uk", "example.com"}) - - domains, err = domainsForRateLimiting([]string{"foo.bar.baz.www.example.com", "baz.example.com"}) - test.AssertNotError(t, err, "failed on foo.bar.baz") - test.AssertDeepEquals(t, domains, []string{"example.com"}) - - domains, err = domainsForRateLimiting([]string{"github.io", "foo.github.io", "bar.github.io"}) - test.AssertNotError(t, err, "failed on public suffix private domain") - test.AssertDeepEquals(t, domains, []string{"bar.github.io", "foo.github.io", "github.io"}) -} - -func TestRateLimitLiveReload(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) - defer cleanUp() - - // We'll work with a temporary file as the reloader monitored rate limit - // policy file - policyFile, tempErr := ioutil.TempFile("", "rate-limit-policies.yml") - test.AssertNotError(t, tempErr, "should not fail to create TempFile") - filename := policyFile.Name() - defer os.Remove(filename) - - // Start with bodyOne in the temp file - bodyOne, readErr := ioutil.ReadFile("../test/rate-limit-policies.yml") - test.AssertNotError(t, readErr, "should not fail to read ../test/rate-limit-policies.yml") - writeErr := ioutil.WriteFile(filename, bodyOne, 0644) - test.AssertNotError(t, writeErr, "should not fail to write temp file") - - // Configure the RA to use the monitored temp file as the policy file - err := ra.SetRateLimitPoliciesFile(filename) - test.AssertNotError(t, err, "failed to SetRateLimitPoliciesFile") - - // Test some fields of the initial policy to ensure it loaded correctly - test.AssertEquals(t, ra.rlPolicies.CertificatesPerName().Overrides["le.wtf"], int64(10000)) - test.AssertEquals(t, ra.rlPolicies.RegistrationsPerIP().Overrides["127.0.0.1"], int64(1000000)) - test.AssertEquals(t, ra.rlPolicies.PendingAuthorizationsPerAccount().Threshold, int64(150)) - test.AssertEquals(t, ra.rlPolicies.CertificatesPerFQDNSet().Threshold, int64(6)) - test.AssertEquals(t, ra.rlPolicies.CertificatesPerFQDNSet().Overrides["le.wtf"], int64(10000)) - test.AssertEquals(t, ra.rlPolicies.CertificatesPerFQDNSetFast().Threshold, int64(2)) - test.AssertEquals(t, ra.rlPolicies.CertificatesPerFQDNSetFast().Overrides["le.wtf"], int64(100)) - - // Write a different policy YAML to the monitored file, expect a reload. - // Sleep a few milliseconds before writing so the timestamp isn't identical to - // when we wrote bodyOne to the file earlier. - bodyTwo, readErr := ioutil.ReadFile("../test/rate-limit-policies-b.yml") - test.AssertNotError(t, readErr, "should not fail to read ../test/rate-limit-policies-b.yml") - time.Sleep(1 * time.Second) - writeErr = ioutil.WriteFile(filename, bodyTwo, 0644) - test.AssertNotError(t, writeErr, "should not fail to write temp file") - - // Sleep to allow the reloader a chance to catch that an update occurred - time.Sleep(2 * time.Second) - - // Test fields of the policy to make sure writing the new policy to the monitored file - // resulted in the runtime values being updated - test.AssertEquals(t, ra.rlPolicies.CertificatesPerName().Overrides["le.wtf"], int64(9999)) - test.AssertEquals(t, ra.rlPolicies.CertificatesPerName().Overrides["le4.wtf"], int64(9999)) - test.AssertEquals(t, ra.rlPolicies.RegistrationsPerIP().Overrides["127.0.0.1"], int64(999990)) - test.AssertEquals(t, ra.rlPolicies.PendingAuthorizationsPerAccount().Threshold, int64(999)) - test.AssertEquals(t, ra.rlPolicies.CertificatesPerFQDNSet().Overrides["le.wtf"], int64(9999)) - test.AssertEquals(t, ra.rlPolicies.CertificatesPerFQDNSet().Threshold, int64(99999)) -} - -type mockSAWithNameCounts struct { - mocks.StorageAuthority - nameCounts *sapb.CountByNames - t *testing.T - clk clock.FakeClock -} - -func (m mockSAWithNameCounts) CountCertificatesByNames(ctx context.Context, req *sapb.CountCertificatesByNamesRequest, _ ...grpc.CallOption) (*sapb.CountByNames, error) { - expectedLatest := m.clk.Now().UnixNano() - if req.Range.Latest != expectedLatest { - m.t.Errorf("incorrect latest: got '%d', expected '%d'", req.Range.Latest, expectedLatest) - } - expectedEarliest := m.clk.Now().Add(-23 * time.Hour).UnixNano() - if req.Range.Earliest != expectedEarliest { - m.t.Errorf("incorrect earliest: got '%d', expected '%d'", req.Range.Earliest, expectedEarliest) - } - counts := make(map[string]int64) - for _, name := range req.Names { - if count, ok := m.nameCounts.Counts[name]; ok { - counts[name] = count - } - } - return &sapb.CountByNames{Counts: counts}, nil -} - -func TestCheckCertificatesPerNameLimit(t *testing.T) { - _, _, ra, fc, cleanUp := initAuthorities(t) - defer cleanUp() - - rlp := ratelimit.RateLimitPolicy{ - Threshold: 3, - Window: cmd.ConfigDuration{Duration: 23 * time.Hour}, - Overrides: map[string]int64{ - "bigissuer.com": 100, - "smallissuer.co.uk": 1, - }, - } - - mockSA := &mockSAWithNameCounts{ - nameCounts: &sapb.CountByNames{Counts: map[string]int64{"example.com": 1}}, - clk: fc, - t: t, - } - - ra.SA = mockSA - - // One base domain, below threshold - err := ra.checkCertificatesPerNameLimit(ctx, []string{"www.example.com", "example.com"}, rlp, 99) - test.AssertNotError(t, err, "rate limited example.com incorrectly") - - // Two base domains, one above threshold, one below - mockSA.nameCounts.Counts["example.com"] = 10 - mockSA.nameCounts.Counts["good-example.com"] = 1 - err = ra.checkCertificatesPerNameLimit(ctx, []string{"www.example.com", "example.com", "good-example.com"}, rlp, 99) - test.AssertError(t, err, "incorrectly failed to rate limit example.com") - test.AssertErrorIs(t, err, berrors.RateLimit) - // Verify it has no sub errors as there is only one bad name - test.AssertEquals(t, err.Error(), "too many certificates already issued for: example.com: see https://letsencrypt.org/docs/rate-limits/") - var bErr *berrors.BoulderError - test.AssertErrorWraps(t, err, &bErr) - test.AssertEquals(t, len(bErr.SubErrors), 0) - - // Three base domains, two above threshold, one below - mockSA.nameCounts.Counts["example.com"] = 10 - mockSA.nameCounts.Counts["other-example.com"] = 10 - mockSA.nameCounts.Counts["good-example.com"] = 1 - err = ra.checkCertificatesPerNameLimit(ctx, []string{"example.com", "other-example.com", "good-example.com"}, rlp, 99) - test.AssertError(t, err, "incorrectly failed to rate limit example.com, other-example.com") - test.AssertErrorIs(t, err, berrors.RateLimit) - // Verify it has two sub errors as there are two bad names - test.AssertEquals(t, err.Error(), "too many certificates already issued for multiple names (example.com and 2 others): see https://letsencrypt.org/docs/rate-limits/") - test.AssertErrorWraps(t, err, &bErr) - test.AssertEquals(t, len(bErr.SubErrors), 2) - - // SA misbehaved and didn't send back a count for every input name - err = ra.checkCertificatesPerNameLimit(ctx, []string{"zombo.com", "www.example.com", "example.com"}, rlp, 99) - test.AssertError(t, err, "incorrectly failed to error on misbehaving SA") - - // Two base domains, one above threshold but with an override. - mockSA.nameCounts.Counts["example.com"] = 0 - mockSA.nameCounts.Counts["bigissuer.com"] = 50 - err = ra.checkCertificatesPerNameLimit(ctx, []string{"www.example.com", "subdomain.bigissuer.com"}, rlp, 99) - test.AssertNotError(t, err, "incorrectly rate limited bigissuer") - - // Two base domains, one above its override - mockSA.nameCounts.Counts["example.com"] = 10 - mockSA.nameCounts.Counts["bigissuer.com"] = 100 - err = ra.checkCertificatesPerNameLimit(ctx, []string{"www.example.com", "subdomain.bigissuer.com"}, rlp, 99) - test.AssertError(t, err, "incorrectly failed to rate limit bigissuer") - test.AssertErrorIs(t, err, berrors.RateLimit) - - // One base domain, above its override (which is below threshold) - mockSA.nameCounts.Counts["smallissuer.co.uk"] = 1 - err = ra.checkCertificatesPerNameLimit(ctx, []string{"www.smallissuer.co.uk"}, rlp, 99) - test.AssertError(t, err, "incorrectly failed to rate limit smallissuer") - test.AssertErrorIs(t, err, berrors.RateLimit) -} - -// TestCheckExactCertificateLimit tests that the duplicate certificate limit -// applied to FQDN sets is respected. -func TestCheckExactCertificateLimit(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) - defer cleanUp() - - // Create a rate limit with a small threshold - const dupeCertLimit = 3 - rlp := ratelimit.RateLimitPolicy{ - Threshold: dupeCertLimit, - Window: cmd.ConfigDuration{Duration: 23 * time.Hour}, - } - - // Create a mock SA that has a count of already issued certificates for some - // test names - ra.SA = &mockSAWithFQDNSet{ - nameCounts: &sapb.CountByNames{ - Counts: map[string]int64{ - "under.example.com": dupeCertLimit - 1, - "equal.example.com": dupeCertLimit, - "over.example.com": dupeCertLimit + 1, - }, - }, - t: t, - } - - testCases := []struct { - Name string - Domain string - ExpectedErr error - }{ - { - Name: "FQDN set issuances less than limit", - Domain: "under.example.com", - ExpectedErr: nil, - }, - { - Name: "FQDN set issuances equal to limit", - Domain: "equal.example.com", - ExpectedErr: fmt.Errorf("too many certificates (3) already issued for this exact set of domains in the last 23 hours: equal.example.com: see https://letsencrypt.org/docs/rate-limits/"), - }, - { - Name: "FQDN set issuances above limit", - Domain: "over.example.com", - ExpectedErr: fmt.Errorf("too many certificates (3) already issued for this exact set of domains in the last 23 hours: over.example.com: see https://letsencrypt.org/docs/rate-limits/"), - }, - } - - // For each test case we check that the certificatesPerFQDNSetLimit is applied - // as we expect - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - result := ra.checkCertificatesPerFQDNSetLimit(ctx, []string{tc.Domain}, rlp, 0) - if tc.ExpectedErr == nil { - test.AssertNotError(t, result, fmt.Sprintf("Expected no error for %q", tc.Domain)) - } else { - test.AssertError(t, result, fmt.Sprintf("Expected error for %q", tc.Domain)) - test.AssertEquals(t, result.Error(), tc.ExpectedErr.Error()) - } - }) - } -} - -func TestRegistrationUpdate(t *testing.T) { - oldURL := "http://old.invalid" - newURL := "http://new.invalid" - base := &corepb.Registration{ - Id: 1, - Contact: []string{oldURL}, - Agreement: "", - } - update := &corepb.Registration{ - Contact: []string{newURL}, - ContactsPresent: true, - Agreement: "totally!", - } - - res, changed := mergeUpdate(base, update) - test.AssertEquals(t, changed, true) - test.AssertEquals(t, res.Contact[0], update.Contact[0]) - test.AssertEquals(t, res.Agreement, update.Agreement) - - // Make sure that a `MergeUpdate` call with an empty string doesn't produce an - // error and results in a change to the base reg. - emptyUpdate := &corepb.Registration{ - Contact: []string{""}, - ContactsPresent: true, - Agreement: "totally!", - } - _, changed = mergeUpdate(res, emptyUpdate) - test.AssertEquals(t, changed, true) -} - -func TestRegistrationContactUpdate(t *testing.T) { - contactURL := "mailto://example@example.com" - - // Test that a registration contact can be removed by updating with an empty - // Contact slice. - base := &corepb.Registration{ - Id: 1, - Contact: []string{contactURL}, - Agreement: "totally!", - } - update := &corepb.Registration{ - Id: 1, - Contact: []string{}, - ContactsPresent: true, - Agreement: "totally!", - } - res, changed := mergeUpdate(base, update) - test.AssertEquals(t, changed, true) - test.Assert(t, len(res.Contact) == 0, "Contact was not deleted in update") - - // Test that a registration contact isn't changed when an update is performed - // with no Contact field - base = &corepb.Registration{ - Id: 1, - Contact: []string{contactURL}, - Agreement: "totally!", - } - update = &corepb.Registration{ - Id: 1, - Agreement: "totally!", - } - res, changed = mergeUpdate(base, update) - test.AssertEquals(t, changed, false) - test.Assert(t, len(res.Contact) == 1, "len(Contact) was updated unexpectedly") - test.Assert(t, (res.Contact)[0] == contactURL, "Contact was changed unexpectedly") -} - -func TestRegistrationKeyUpdate(t *testing.T) { - oldKey, err := rsa.GenerateKey(rand.Reader, 512) - test.AssertNotError(t, err, "rsa.GenerateKey() for oldKey failed") - oldKeyJSON, err := jose.JSONWebKey{Key: oldKey}.MarshalJSON() - test.AssertNotError(t, err, "MarshalJSON for oldKey failed") - - base := &corepb.Registration{Key: oldKeyJSON} - update := &corepb.Registration{} - _, changed := mergeUpdate(base, update) - test.Assert(t, !changed, "mergeUpdate changed the key with empty update") - - newKey, err := rsa.GenerateKey(rand.Reader, 1024) - test.AssertNotError(t, err, "rsa.GenerateKey() for newKey failed") - newKeyJSON, err := jose.JSONWebKey{Key: newKey}.MarshalJSON() - test.AssertNotError(t, err, "MarshalJSON for newKey failed") - - update = &corepb.Registration{Key: newKeyJSON} - res, changed := mergeUpdate(base, update) - test.Assert(t, changed, "mergeUpdate didn't change the key with non-empty update") - test.AssertByteEquals(t, res.Key, update.Key) -} - -// A mockSAWithFQDNSet is a mock StorageAuthority that supports -// CountCertificatesByName as well as FQDNSetExists. This allows testing -// checkCertificatesPerNameRateLimit's FQDN exemption logic. -type mockSAWithFQDNSet struct { - mocks.StorageAuthority - fqdnSet map[string]bool - nameCounts *sapb.CountByNames - t *testing.T -} - -// Construct the FQDN Set key the same way as the SA (by using -// `core.UniqueLowerNames`, joining the names with a `,` and hashing them) -// but return a string so it can be used as a key in m.fqdnSet. -func (m mockSAWithFQDNSet) hashNames(names []string) string { - names = core.UniqueLowerNames(names) - hash := sha256.Sum256([]byte(strings.Join(names, ","))) - return string(hash[:]) -} - -// Add a set of domain names to the FQDN set -func (m mockSAWithFQDNSet) addFQDNSet(names []string) { - hash := m.hashNames(names) - m.fqdnSet[hash] = true -} - -// Search for a set of domain names in the FQDN set map -func (m mockSAWithFQDNSet) FQDNSetExists(_ context.Context, req *sapb.FQDNSetExistsRequest, _ ...grpc.CallOption) (*sapb.Exists, error) { - hash := m.hashNames(req.Domains) - if _, exists := m.fqdnSet[hash]; exists { - return &sapb.Exists{Exists: true}, nil - } - return &sapb.Exists{Exists: false}, nil -} - -// Return a map of domain -> certificate count. -func (m mockSAWithFQDNSet) CountCertificatesByNames(ctx context.Context, req *sapb.CountCertificatesByNamesRequest, _ ...grpc.CallOption) (*sapb.CountByNames, error) { - counts := make(map[string]int64) - for _, name := range req.Names { - if count, ok := m.nameCounts.Counts[name]; ok { - counts[name] = count - } - } - return &sapb.CountByNames{Counts: counts}, nil -} - -func (m mockSAWithFQDNSet) CountFQDNSets(_ context.Context, req *sapb.CountFQDNSetsRequest, _ ...grpc.CallOption) (*sapb.Count, error) { - var total int64 - for _, name := range req.Domains { - if count, ok := m.nameCounts.Counts[name]; ok { - total += count - } - } - return &sapb.Count{Count: total}, nil -} - -// Tests for boulder issue 1925[0] - that the `checkCertificatesPerNameLimit` -// properly honours the FQDNSet exemption. E.g. that if a set of domains has -// reached the certificates per name rate limit policy threshold but the exact -// same set of FQDN's was previously issued, then it should not be considered -// over the certificates per name limit. -// -// [0] https://github.com/letsencrypt/boulder/issues/1925 -func TestCheckFQDNSetRateLimitOverride(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) - defer cleanUp() - - // Simple policy that only allows 1 certificate per name. - certsPerNamePolicy := ratelimit.RateLimitPolicy{ - Threshold: 1, - Window: cmd.ConfigDuration{Duration: 24 * time.Hour}, - } - - // Create a mock SA that has both name counts and an FQDN set - mockSA := &mockSAWithFQDNSet{ - nameCounts: &sapb.CountByNames{ - Counts: map[string]int64{"example.com": 100, "zombo.com": 100}, - }, - fqdnSet: map[string]bool{}, - t: t, - } - ra.SA = mockSA - - // First check that without a pre-existing FQDN set that the provided set of - // names is rate limited due to being over the certificates per name limit for - // "example.com" and "zombo.com" - err := ra.checkCertificatesPerNameLimit(ctx, []string{"www.example.com", "example.com", "www.zombo.com"}, certsPerNamePolicy, 99) - test.AssertError(t, err, "certificate per name rate limit not applied correctly") - - // Now add a FQDN set entry for these domains - mockSA.addFQDNSet([]string{"www.example.com", "example.com", "www.zombo.com"}) - - // A subsequent check against the certificates per name limit should now be OK - // - there exists a FQDN set and so the exemption to this particular limit - // comes into effect. - err = ra.checkCertificatesPerNameLimit(ctx, []string{"www.example.com", "example.com", "www.zombo.com"}, certsPerNamePolicy, 99) - test.AssertNotError(t, err, "FQDN set certificate per name exemption not applied correctly") -} - -// TestExactPublicSuffixCertLimit tests the behaviour of issue #2681 with and -// without the feature flag for the fix enabled. -// See https://github.com/letsencrypt/boulder/issues/2681 -func TestExactPublicSuffixCertLimit(t *testing.T) { - _, _, ra, fc, cleanUp := initAuthorities(t) - defer cleanUp() - - // Simple policy that only allows 2 certificates per name. - certsPerNamePolicy := ratelimit.RateLimitPolicy{ - Threshold: 2, - Window: cmd.ConfigDuration{Duration: 23 * time.Hour}, - } - - // We use "dedyn.io" and "dynv6.net" domains for the test on the implicit - // assumption that both domains are present on the public suffix list. - // Quickly verify that this is true before continuing with the rest of the test. - _, err := publicsuffix.Domain("dedyn.io") - test.AssertError(t, err, "dedyn.io was not on the public suffix list, invaliding the test") - _, err = publicsuffix.Domain("dynv6.net") - test.AssertError(t, err, "dynv6.net was not on the public suffix list, invaliding the test") - - // Back the mock SA with counts as if so far we have issued the following - // certificates for the following domains: - // - test.dedyn.io (once) - // - test2.dedyn.io (once) - // - dynv6.net (twice) - mockSA := &mockSAWithNameCounts{ - nameCounts: &sapb.CountByNames{ - Counts: map[string]int64{ - "test.dedyn.io": 1, - "test2.dedyn.io": 1, - "test3.dedyn.io": 0, - "dedyn.io": 0, - "dynv6.net": 2, - }, - }, - clk: fc, - t: t, - } - ra.SA = mockSA - - // Trying to issue for "test3.dedyn.io" and "dedyn.io" should succeed because - // test3.dedyn.io has no certificates and "dedyn.io" is an exact public suffix - // match with no certificates issued for it. - err = ra.checkCertificatesPerNameLimit(ctx, []string{"test3.dedyn.io", "dedyn.io"}, certsPerNamePolicy, 99) - test.AssertNotError(t, err, "certificate per name rate limit not applied correctly") - - // Trying to issue for "test3.dedyn.io" and "dynv6.net" should fail because - // "dynv6.net" is an exact public suffic match with 2 certificates issued for - // it. - err = ra.checkCertificatesPerNameLimit(ctx, []string{"test3.dedyn.io", "dynv6.net"}, certsPerNamePolicy, 99) - test.AssertError(t, err, "certificate per name rate limit not applied correctly") -} - -func TestDeactivateAuthorization(t *testing.T) { - _, sa, ra, _, cleanUp := initAuthorities(t) - defer cleanUp() - - exp := ra.clk.Now().Add(365 * 24 * time.Hour) - authzID := createFinalizedAuthorization(t, sa, "not-example.com", exp, "valid", ra.clk.Now()) - dbAuthzPB := getAuthorization(t, fmt.Sprint(authzID), sa) - _, err := ra.DeactivateAuthorization(ctx, dbAuthzPB) - test.AssertNotError(t, err, "Could not deactivate authorization") - deact, err := sa.GetAuthorization2(ctx, &sapb.AuthorizationID2{Id: authzID}) - test.AssertNotError(t, err, "Could not get deactivated authorization with ID "+dbAuthzPB.Id) - test.AssertEquals(t, deact.Status, string(core.StatusDeactivated)) + // Set the default ratelimits to only allow one failed validation per 24 + // hours before pausing. + txnBuilder, err := ratelimits.NewTransactionBuilder(ratelimits.LimitConfigs{ + ratelimits.FailedAuthorizationsForPausingPerDomainPerAccount.String(): &ratelimits.LimitConfig{ + Burst: 1, + Count: 1, + Period: config.Duration{Duration: time.Hour * 24}}, + }, nil, metrics.NoopRegisterer, blog.NewMock()) + test.AssertNotError(t, err, "making transaction composer") + ra.txnBuilder = txnBuilder + + // The first deactivation of a pending authz should work and nothing should + // get paused. + _, err = ra.DeactivateAuthorization(ctx, &corepb.Authorization{ + Id: "1", + RegistrationID: registration.Id, + Identifier: identifier.NewDNS("example.com").ToProto(), + Status: string(core.StatusPending), + }) + test.AssertNotError(t, err, "mock deactivation should work") + test.AssertBoxedNil(t, msa.recv, "shouldn't be a pause request yet") + + // Deactivating a valid authz shouldn't increment any limits or pause anything. + _, err = ra.DeactivateAuthorization(ctx, &corepb.Authorization{ + Id: "2", + RegistrationID: registration.Id, + Identifier: identifier.NewDNS("example.com").ToProto(), + Status: string(core.StatusValid), + }) + test.AssertNotError(t, err, "mock deactivation should work") + test.AssertBoxedNil(t, msa.recv, "deactivating valid authz should never pause") + + // Deactivating a second pending authz should surpass the limit and result + // in a pause request. + _, err = ra.DeactivateAuthorization(ctx, &corepb.Authorization{ + Id: "3", + RegistrationID: registration.Id, + Identifier: identifier.NewDNS("example.com").ToProto(), + Status: string(core.StatusPending), + }) + test.AssertNotError(t, err, "mock deactivation should work") + test.AssertNotNil(t, msa.recv, "should have recorded a pause request") + test.AssertEquals(t, msa.recv.RegistrationID, registration.Id) + test.AssertEquals(t, msa.recv.Identifiers[0].Value, "example.com") } func TestDeactivateRegistration(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) + _, _, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() // Deactivate failure because incomplete registration provided - _, err := ra.DeactivateRegistration(context.Background(), &corepb.Registration{}) + _, err := ra.DeactivateRegistration(context.Background(), &rapb.DeactivateRegistrationRequest{}) test.AssertDeepEquals(t, err, fmt.Errorf("incomplete gRPC request message")) - // Deactivate failure because registration status already deactivated - _, err = ra.DeactivateRegistration(context.Background(), - &corepb.Registration{Id: 1, Status: string(core.StatusDeactivated)}) - test.AssertError(t, err, "DeactivateRegistration failed with a non-valid registration") - // Deactivate success with valid registration - _, err = ra.DeactivateRegistration(context.Background(), - &corepb.Registration{Id: 1, Status: string(core.StatusValid)}) + got, err := ra.DeactivateRegistration(context.Background(), &rapb.DeactivateRegistrationRequest{RegistrationID: registration.Id}) test.AssertNotError(t, err, "DeactivateRegistration failed") + test.AssertEquals(t, got.Status, string(core.StatusDeactivated)) // Check db to make sure account is deactivated - dbReg, err := ra.SA.GetRegistration(context.Background(), &sapb.RegistrationID{Id: 1}) + dbReg, err := ra.SA.GetRegistration(context.Background(), &sapb.RegistrationID{Id: registration.Id}) test.AssertNotError(t, err, "GetRegistration failed") test.AssertEquals(t, dbReg.Status, string(core.StatusDeactivated)) } -// noopCAA implements caaChecker, always returning nil +// noopCAA implements vapb.CAAClient, always returning nil type noopCAA struct{} func (cr noopCAA) IsCAAValid( @@ -1560,8 +1039,16 @@ func (cr noopCAA) IsCAAValid( return &vapb.IsCAAValidResponse{}, nil } -// caaRecorder implements caaChecker, always returning nil, but recording the -// names it was called for. +func (cr noopCAA) DoCAA( + ctx context.Context, + in *vapb.IsCAAValidRequest, + opts ...grpc.CallOption, +) (*vapb.IsCAAValidResponse, error) { + return &vapb.IsCAAValidResponse{}, nil +} + +// caaRecorder implements vapb.CAAClient, always returning nil, but recording +// the names it was called for. type caaRecorder struct { sync.Mutex names map[string]bool @@ -1574,33 +1061,38 @@ func (cr *caaRecorder) IsCAAValid( ) (*vapb.IsCAAValidResponse, error) { cr.Lock() defer cr.Unlock() - cr.names[in.Domain] = true + cr.names[in.Identifier.Value] = true + return &vapb.IsCAAValidResponse{}, nil +} + +func (cr *caaRecorder) DoCAA( + ctx context.Context, + in *vapb.IsCAAValidRequest, + opts ...grpc.CallOption, +) (*vapb.IsCAAValidResponse, error) { + cr.Lock() + defer cr.Unlock() + cr.names[in.Identifier.Value] = true return &vapb.IsCAAValidResponse{}, nil } // Test that the right set of domain names have their CAA rechecked, based on // their `Validated` (attemptedAt in the database) timestamp. func TestRecheckCAADates(t *testing.T) { - _, _, ra, fc, cleanUp := initAuthorities(t) + _, _, ra, _, fc, registration, cleanUp := initAuthorities(t) defer cleanUp() recorder := &caaRecorder{names: make(map[string]bool)} - ra.caa = recorder - ra.authorizationLifetime = 15 * time.Hour + ra.VA = va.RemoteClients{CAAClient: recorder} + ra.profiles.def().validAuthzLifetime = 15 * time.Hour recentValidated := fc.Now().Add(-1 * time.Hour) recentExpires := fc.Now().Add(15 * time.Hour) olderValidated := fc.Now().Add(-8 * time.Hour) olderExpires := fc.Now().Add(5 * time.Hour) - makeIdentifier := func(name string) identifier.ACMEIdentifier { - return identifier.ACMEIdentifier{ - Type: identifier.DNS, - Value: name, - } - } - authzs := map[string]*core.Authorization{ - "recent.com": { - Identifier: makeIdentifier("recent.com"), + authzs := map[identifier.ACMEIdentifier]*core.Authorization{ + identifier.NewDNS("recent.com"): { + Identifier: identifier.NewDNS("recent.com"), Expires: &recentExpires, Challenges: []core.Challenge{ { @@ -1611,8 +1103,8 @@ func TestRecheckCAADates(t *testing.T) { }, }, }, - "older.com": { - Identifier: makeIdentifier("older.com"), + identifier.NewDNS("older.com"): { + Identifier: identifier.NewDNS("older.com"), Expires: &olderExpires, Challenges: []core.Challenge{ { @@ -1623,8 +1115,8 @@ func TestRecheckCAADates(t *testing.T) { }, }, }, - "older2.com": { - Identifier: makeIdentifier("older2.com"), + identifier.NewDNS("older2.com"): { + Identifier: identifier.NewDNS("older2.com"), Expires: &olderExpires, Challenges: []core.Challenge{ { @@ -1635,8 +1127,8 @@ func TestRecheckCAADates(t *testing.T) { }, }, }, - "wildcard.com": { - Identifier: makeIdentifier("wildcard.com"), + identifier.NewDNS("wildcard.com"): { + Identifier: identifier.NewDNS("wildcard.com"), Expires: &olderExpires, Challenges: []core.Challenge{ { @@ -1647,8 +1139,8 @@ func TestRecheckCAADates(t *testing.T) { }, }, }, - "*.wildcard.com": { - Identifier: makeIdentifier("*.wildcard.com"), + identifier.NewDNS("*.wildcard.com"): { + Identifier: identifier.NewDNS("*.wildcard.com"), Expires: &olderExpires, Challenges: []core.Challenge{ { @@ -1659,9 +1151,11 @@ func TestRecheckCAADates(t *testing.T) { }, }, }, - "twochallenges.com": { + } + twoChallenges := map[identifier.ACMEIdentifier]*core.Authorization{ + identifier.NewDNS("twochallenges.com"): { ID: "twochal", - Identifier: makeIdentifier("twochallenges.com"), + Identifier: identifier.NewDNS("twochallenges.com"), Expires: &recentExpires, Challenges: []core.Challenge{ { @@ -1678,15 +1172,19 @@ func TestRecheckCAADates(t *testing.T) { }, }, }, - "nochallenges.com": { + } + noChallenges := map[identifier.ACMEIdentifier]*core.Authorization{ + identifier.NewDNS("nochallenges.com"): { ID: "nochal", - Identifier: makeIdentifier("nochallenges.com"), + Identifier: identifier.NewDNS("nochallenges.com"), Expires: &recentExpires, Challenges: []core.Challenge{}, }, - "novalidationtime.com": { + } + noValidationTime := map[identifier.ACMEIdentifier]*core.Authorization{ + identifier.NewDNS("novalidationtime.com"): { ID: "noval", - Identifier: makeIdentifier("novalidationtime.com"), + Identifier: identifier.NewDNS("novalidationtime.com"), Expires: &recentExpires, Challenges: []core.Challenge{ { @@ -1701,29 +1199,24 @@ func TestRecheckCAADates(t *testing.T) { // NOTE: The names provided here correspond to authorizations in the // `mockSAWithRecentAndOlder` - names := []string{"recent.com", "older.com", "older2.com", "wildcard.com", "*.wildcard.com"} - err := ra.checkAuthorizationsCAA(context.Background(), names, authzs, 999, fc.Now()) + err := ra.checkAuthorizationsCAA(context.Background(), registration.Id, authzs, fc.Now()) // We expect that there is no error rechecking authorizations for these names if err != nil { t.Errorf("expected nil err, got %s", err) } // Should error if a authorization has `!= 1` challenge - err = ra.checkAuthorizationsCAA(context.Background(), []string{"twochallenges.com"}, authzs, 999, fc.Now()) + err = ra.checkAuthorizationsCAA(context.Background(), registration.Id, twoChallenges, fc.Now()) test.AssertEquals(t, err.Error(), "authorization has incorrect number of challenges. 1 expected, 2 found for: id twochal") // Should error if a authorization has `!= 1` challenge - err = ra.checkAuthorizationsCAA(context.Background(), []string{"nochallenges.com"}, authzs, 999, fc.Now()) + err = ra.checkAuthorizationsCAA(context.Background(), registration.Id, noChallenges, fc.Now()) test.AssertEquals(t, err.Error(), "authorization has incorrect number of challenges. 1 expected, 0 found for: id nochal") // Should error if authorization's challenge has no validated timestamp - err = ra.checkAuthorizationsCAA(context.Background(), []string{"novalidationtime.com"}, authzs, 999, fc.Now()) + err = ra.checkAuthorizationsCAA(context.Background(), registration.Id, noValidationTime, fc.Now()) test.AssertEquals(t, err.Error(), "authorization's challenge has no validated timestamp for: id noval") - // Test to make sure the authorization lifetime codepath was not used - // to determine if CAA needed recheck. - test.AssertMetricWithLabelsEquals(t, ra.recheckCAAUsedAuthzLifetime, prometheus.Labels{}, 0) - // We expect that "recent.com" is not checked because its mock authorization // isn't expired if _, present := recorder.names["recent.com"]; present { @@ -1760,55 +1253,83 @@ func (cf *caaFailer) IsCAAValid( opts ...grpc.CallOption, ) (*vapb.IsCAAValidResponse, error) { cvrpb := &vapb.IsCAAValidResponse{} - switch in.Domain { + switch in.Identifier.Value { + case "a.com": + cvrpb.Problem = &corepb.ProblemDetails{ + Detail: "CAA invalid for a.com", + } + case "b.com": + case "c.com": + cvrpb.Problem = &corepb.ProblemDetails{ + Detail: "CAA invalid for c.com", + } + case "d.com": + return nil, fmt.Errorf("Error checking CAA for d.com") + default: + return nil, fmt.Errorf("Unexpected test case") + } + return cvrpb, nil +} + +func (cf *caaFailer) DoCAA( + ctx context.Context, + in *vapb.IsCAAValidRequest, + opts ...grpc.CallOption, +) (*vapb.IsCAAValidResponse, error) { + cvrpb := &vapb.IsCAAValidResponse{} + switch in.Identifier.Value { case "a.com": cvrpb.Problem = &corepb.ProblemDetails{ Detail: "CAA invalid for a.com", } + case "b.com": case "c.com": cvrpb.Problem = &corepb.ProblemDetails{ Detail: "CAA invalid for c.com", } case "d.com": return nil, fmt.Errorf("Error checking CAA for d.com") + default: + return nil, fmt.Errorf("Unexpected test case") } return cvrpb, nil } func TestRecheckCAAEmpty(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) + _, _, ra, _, _, _, cleanUp := initAuthorities(t) defer cleanUp() err := ra.recheckCAA(context.Background(), nil) test.AssertNotError(t, err, "expected nil") } -func makeHTTP01Authorization(domain string) *core.Authorization { +func makeHTTP01Authorization(ident identifier.ACMEIdentifier) *core.Authorization { return &core.Authorization{ - Identifier: identifier.ACMEIdentifier{Type: identifier.DNS, Value: domain}, + Identifier: ident, Challenges: []core.Challenge{{Status: core.StatusValid, Type: core.ChallengeTypeHTTP01}}, } } func TestRecheckCAASuccess(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) + _, _, ra, _, _, _, cleanUp := initAuthorities(t) defer cleanUp() + ra.VA = va.RemoteClients{CAAClient: &noopCAA{}} authzs := []*core.Authorization{ - makeHTTP01Authorization("a.com"), - makeHTTP01Authorization("b.com"), - makeHTTP01Authorization("c.com"), + makeHTTP01Authorization(identifier.NewDNS("a.com")), + makeHTTP01Authorization(identifier.NewDNS("b.com")), + makeHTTP01Authorization(identifier.NewDNS("c.com")), } err := ra.recheckCAA(context.Background(), authzs) test.AssertNotError(t, err, "expected nil") } func TestRecheckCAAFail(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) + _, _, ra, _, _, _, cleanUp := initAuthorities(t) defer cleanUp() - ra.caa = &caaFailer{} + ra.VA = va.RemoteClients{CAAClient: &caaFailer{}} authzs := []*core.Authorization{ - makeHTTP01Authorization("a.com"), - makeHTTP01Authorization("b.com"), - makeHTTP01Authorization("c.com"), + makeHTTP01Authorization(identifier.NewDNS("a.com")), + makeHTTP01Authorization(identifier.NewDNS("b.com")), + makeHTTP01Authorization(identifier.NewDNS("c.com")), } err := ra.recheckCAA(context.Background(), authzs) @@ -1841,7 +1362,7 @@ func TestRecheckCAAFail(t *testing.T) { // Recheck CAA with just one bad authz authzs = []*core.Authorization{ - makeHTTP01Authorization("a.com"), + makeHTTP01Authorization(identifier.NewDNS("a.com")), } err = ra.recheckCAA(context.Background(), authzs) // It should error @@ -1853,343 +1374,674 @@ func TestRecheckCAAFail(t *testing.T) { } func TestRecheckCAAInternalServerError(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) + _, _, ra, _, _, _, cleanUp := initAuthorities(t) defer cleanUp() - ra.caa = &caaFailer{} + ra.VA = va.RemoteClients{CAAClient: &caaFailer{}} authzs := []*core.Authorization{ - makeHTTP01Authorization("a.com"), - makeHTTP01Authorization("b.com"), - makeHTTP01Authorization("d.com"), + makeHTTP01Authorization(identifier.NewDNS("a.com")), + makeHTTP01Authorization(identifier.NewDNS("b.com")), + makeHTTP01Authorization(identifier.NewDNS("d.com")), } err := ra.recheckCAA(context.Background(), authzs) test.AssertError(t, err, "expected err, got nil") test.AssertErrorIs(t, err, berrors.InternalServer) } +func TestRecheckSkipIPAddress(t *testing.T) { + _, _, ra, _, fc, registration, cleanUp := initAuthorities(t) + defer cleanUp() + ra.VA = va.RemoteClients{CAAClient: &caaFailer{}} + ident := identifier.NewIP(netip.MustParseAddr("127.0.0.1")) + olderValidated := fc.Now().Add(-8 * time.Hour) + olderExpires := fc.Now().Add(5 * time.Hour) + authzs := map[identifier.ACMEIdentifier]*core.Authorization{ + ident: { + Identifier: ident, + Expires: &olderExpires, + Challenges: []core.Challenge{ + { + Status: core.StatusValid, + Type: core.ChallengeTypeHTTP01, + Token: "exampleToken", + Validated: &olderValidated, + }, + }, + }, + } + err := ra.checkAuthorizationsCAA(context.Background(), registration.Id, authzs, fc.Now()) + test.AssertNotError(t, err, "rechecking CAA for IP address, should have skipped") +} + +func TestRecheckInvalidIdentifierType(t *testing.T) { + _, _, ra, _, fc, registration, cleanUp := initAuthorities(t) + defer cleanUp() + ident := identifier.ACMEIdentifier{ + Type: "fnord", + Value: "well this certainly shouldn't have happened", + } + olderValidated := fc.Now().Add(-8 * time.Hour) + olderExpires := fc.Now().Add(5 * time.Hour) + authzs := map[identifier.ACMEIdentifier]*core.Authorization{ + ident: { + Identifier: ident, + Expires: &olderExpires, + Challenges: []core.Challenge{ + { + Status: core.StatusValid, + Type: core.ChallengeTypeHTTP01, + Token: "exampleToken", + Validated: &olderValidated, + }, + }, + }, + } + err := ra.checkAuthorizationsCAA(context.Background(), registration.Id, authzs, fc.Now()) + test.AssertError(t, err, "expected err, got nil") + test.AssertErrorIs(t, err, berrors.Malformed) + test.AssertContains(t, err.Error(), "invalid identifier type") +} + func TestNewOrder(t *testing.T) { - _, _, ra, fc, cleanUp := initAuthorities(t) + _, _, ra, _, fc, registration, cleanUp := initAuthorities(t) defer cleanUp() - ra.orderLifetime = time.Hour + now := fc.Now() orderA, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{"b.com", "a.com", "a.com", "C.COM"}, - }) - test.AssertNotError(t, err, "ra.NewOrder failed") - test.AssertEquals(t, orderA.RegistrationID, int64(1)) - test.AssertEquals(t, orderA.Expires, fc.Now().Add(time.Hour).UnixNano()) - test.AssertEquals(t, len(orderA.Names), 3) - // We expect the order names to have been sorted, deduped, and lowercased - test.AssertDeepEquals(t, orderA.Names, []string{"a.com", "b.com", "c.com"}) - test.AssertEquals(t, orderA.Id, int64(1)) - test.AssertEquals(t, numAuthorizations(orderA), 3) - - // Reuse all existing authorizations - orderB, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{"b.com", "a.com", "C.COM"}, + RegistrationID: registration.Id, + CertificateProfileName: "test", + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("b.com").ToProto(), + identifier.NewDNS("a.com").ToProto(), + identifier.NewDNS("a.com").ToProto(), + identifier.NewDNS("C.COM").ToProto(), + }, }) test.AssertNotError(t, err, "ra.NewOrder failed") - test.AssertEquals(t, orderB.RegistrationID, int64(1)) - test.AssertEquals(t, orderB.Expires, fc.Now().Add(time.Hour).UnixNano()) - // We expect orderB's ID to match orderA's because of pending order reuse - test.AssertEquals(t, orderB.Id, orderA.Id) - test.AssertEquals(t, len(orderB.Names), 3) - test.AssertDeepEquals(t, orderB.Names, []string{"a.com", "b.com", "c.com"}) - test.AssertEquals(t, numAuthorizations(orderB), 3) - test.AssertDeepEquals(t, orderB.V2Authorizations, orderA.V2Authorizations) - - // Reuse all of the existing authorizations from the previous order and - // add a new one - orderA.Names = append(orderA.Names, "d.com") - orderC, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: orderA.Names, + test.AssertEquals(t, orderA.RegistrationID, registration.Id) + test.AssertEquals(t, orderA.Expires.AsTime(), now.Add(ra.profiles.def().orderLifetime)) + test.AssertEquals(t, len(orderA.Identifiers), 3) + test.AssertEquals(t, orderA.CertificateProfileName, "test") + // We expect the order's identifier values to have been sorted, + // deduplicated, and lowercased. + test.AssertDeepEquals(t, orderA.Identifiers, []*corepb.Identifier{ + identifier.NewDNS("a.com").ToProto(), + identifier.NewDNS("b.com").ToProto(), + identifier.NewDNS("c.com").ToProto(), }) - test.AssertNotError(t, err, "ra.NewOrder failed") - test.AssertEquals(t, orderC.RegistrationID, int64(1)) - test.AssertEquals(t, orderC.Expires, fc.Now().Add(time.Hour).UnixNano()) - test.AssertEquals(t, len(orderC.Names), 4) - test.AssertDeepEquals(t, orderC.Names, []string{"a.com", "b.com", "c.com", "d.com"}) - // We expect orderC's ID to not match orderA/orderB's because it is for - // a different set of names - test.AssertNotEquals(t, orderC.Id, orderA.Id) - test.AssertEquals(t, numAuthorizations(orderC), 4) - // Abuse the order of the queries used to extract the reused authorizations - existing := orderC.V2Authorizations[:3] - test.AssertDeepEquals(t, existing, orderA.V2Authorizations) + + test.Assert(t, orderA.Id != 0, "order ID should not be zero") + test.AssertEquals(t, numAuthorizations(orderA), 3) _, err = ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{"a"}, + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{identifier.NewDNS("a").ToProto()}, }) test.AssertError(t, err, "NewOrder with invalid names did not error") test.AssertEquals(t, err.Error(), "Cannot issue for \"a\": Domain name needs at least one dot") } -// TestNewOrderReuse tests that subsequent requests by an ACME account to create +// TestNewOrder_OrderReuse tests that subsequent requests by an ACME account to create // an identical order results in only one order being created & subsequently // reused. -func TestNewOrderReuse(t *testing.T) { - _, _, ra, fc, cleanUp := initAuthorities(t) +func TestNewOrder_OrderReuse(t *testing.T) { + _, _, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() - ctx := context.Background() - names := []string{"zombo.com", "welcome.to.zombo.com"} - - // Configure the RA to use a short order lifetime - ra.orderLifetime = time.Hour - // Create a var with two times the order lifetime to reference later - doubleLifetime := ra.orderLifetime * 2 + // Create an initial order with regA and names + idents := identifier.ACMEIdentifiers{ + identifier.NewDNS("zombo.com"), + identifier.NewDNS("welcome.to.zombo.com"), + } - // Create an initial request with regA and names orderReq := &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: names, + RegistrationID: registration.Id, + Identifiers: idents.ToProtoSlice(), + CertificateProfileName: "test", } + firstOrder, err := ra.NewOrder(context.Background(), orderReq) + test.AssertNotError(t, err, "Adding an initial order for regA failed") // Create a second registration to reference acctKeyB, err := AccountKeyB.MarshalJSON() test.AssertNotError(t, err, "failed to marshal account key") - input := &corepb.Registration{ - Key: acctKeyB, - InitialIP: parseAndMarshalIP(t, "42.42.42.42"), - } - secondReg, err := ra.NewRegistration(ctx, input) + input := &corepb.Registration{Key: acctKeyB} + secondReg, err := ra.NewRegistration(context.Background(), input) test.AssertNotError(t, err, "Error creating a second test registration") - // First, add an order with `names` for regA - firstOrder, err := ra.NewOrder(context.Background(), orderReq) - // It shouldn't fail - test.AssertNotError(t, err, "Adding an initial order for regA failed") - // It should have an ID - test.AssertNotNil(t, firstOrder.Id, "Initial order had a nil ID") + + // Insert a second (albeit identical) profile to reference + ra.profiles.byName["different"] = ra.profiles.def() testCases := []struct { - Name string - OrderReq *rapb.NewOrderRequest - ExpectReuse bool - AdvanceClock *time.Duration + Name string + RegistrationID int64 + Identifiers identifier.ACMEIdentifiers + Profile string + ExpectReuse bool }{ { - Name: "Duplicate order, same regID", - OrderReq: orderReq, + Name: "Duplicate order, same regID", + RegistrationID: registration.Id, + Identifiers: idents, + Profile: "test", // We expect reuse since the order matches firstOrder ExpectReuse: true, }, { - Name: "Subset of order names, same regID", - OrderReq: &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{names[1]}, - }, + Name: "Subset of order names, same regID", + RegistrationID: registration.Id, + Identifiers: idents[:1], + Profile: "test", // We do not expect reuse because the order names don't match firstOrder ExpectReuse: false, }, { - Name: "Duplicate order, different regID", - OrderReq: &rapb.NewOrderRequest{ - RegistrationID: secondReg.Id, - Names: names, - }, - // We do not expect reuse because the order regID differs from firstOrder + Name: "Superset of order names, same regID", + RegistrationID: registration.Id, + Identifiers: append(idents, identifier.NewDNS("blog.zombo.com")), + Profile: "test", + // We do not expect reuse because the order names don't match firstOrder ExpectReuse: false, }, { - Name: "Duplicate order, same regID, first expired", - OrderReq: orderReq, - AdvanceClock: &doubleLifetime, - // We do not expect reuse because firstOrder has expired - ExpectReuse: true, + Name: "Missing profile, same regID", + RegistrationID: registration.Id, + Identifiers: append(idents, identifier.NewDNS("blog.zombo.com")), + // We do not expect reuse because the profile is missing + ExpectReuse: false, }, + { + Name: "Missing profile, same regID", + RegistrationID: registration.Id, + Identifiers: append(idents, identifier.NewDNS("blog.zombo.com")), + Profile: "different", + // We do not expect reuse because a different profile is specified + ExpectReuse: false, + }, + { + Name: "Duplicate order, different regID", + RegistrationID: secondReg.Id, + Identifiers: idents, + Profile: "test", + // We do not expect reuse because the order regID differs from firstOrder + ExpectReuse: false, + }, + // TODO(#7324): Integrate certificate profile variance into this test. } for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - // If the testcase specifies, advance the clock before adding the order - if tc.AdvanceClock != nil { - fc.Now().Add(*tc.AdvanceClock) - } // Add the order for the test request - order, err := ra.NewOrder(ctx, tc.OrderReq) - // It shouldn't fail + order, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ + RegistrationID: tc.RegistrationID, + Identifiers: tc.Identifiers.ToProtoSlice(), + CertificateProfileName: tc.Profile, + }) test.AssertNotError(t, err, "NewOrder returned an unexpected error") - // The order should not have a nil ID test.AssertNotNil(t, order.Id, "NewOrder returned an order with a nil Id") if tc.ExpectReuse { // If we expected order reuse for this testcase assert that the order // has the same ID as the firstOrder - test.AssertEquals(t, firstOrder.Id, order.Id) + test.AssertEquals(t, order.Id, firstOrder.Id) } else { // Otherwise assert that the order doesn't have the same ID as the // firstOrder - test.AssertNotEquals(t, firstOrder.Id, order.Id) + test.AssertNotEquals(t, order.Id, firstOrder.Id) } }) } } -func TestNewOrderReuseInvalidAuthz(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) +// TestNewOrder_OrderReuse_Expired tests that expired orders are not reused. +// This is not simply a test case in TestNewOrder_OrderReuse because it has +// side effects. +func TestNewOrder_OrderReuse_Expired(t *testing.T) { + _, _, ra, _, fc, registration, cleanUp := initAuthorities(t) defer cleanUp() - ctx := context.Background() - names := []string{"zombo.com"} - - // Create an initial request with regA and names - orderReq := &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: names, - } - - // First, add an order with `names` for regA - order, err := ra.NewOrder(ctx, orderReq) - // It shouldn't fail - test.AssertNotError(t, err, "Adding an initial order for regA failed") - // It should have an ID - test.AssertNotNil(t, order.Id, "Initial order had a nil ID") - // It should have one authorization - test.AssertEquals(t, numAuthorizations(order), 1) + // Set the order lifetime to something short and known. + ra.profiles.def().orderLifetime = time.Hour - _, err = ra.SA.FinalizeAuthorization2(ctx, &sapb.FinalizeAuthorizationRequest{ - Id: order.V2Authorizations[0], - Status: string(core.StatusInvalid), - Expires: order.Expires, - Attempted: string(core.ChallengeTypeDNS01), - AttemptedAt: ra.clk.Now().UnixNano(), + // Create an initial order. + extant, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("a.com").ToProto(), + identifier.NewDNS("b.com").ToProto(), + }, }) - test.AssertNotError(t, err, "FinalizeAuthorization2 failed") - - // The order associated with the authz should now be invalid - updatedOrder, err := ra.SA.GetOrder(ctx, &sapb.OrderRequest{Id: order.Id}) - test.AssertNotError(t, err, "Error getting order to check status") - test.AssertEquals(t, updatedOrder.Status, "invalid") - - // Create a second order for the same names/regID - secondOrder, err := ra.NewOrder(ctx, orderReq) - // It shouldn't fail - test.AssertNotError(t, err, "Adding an initial order for regA failed") - // It should have a different ID than the first now-invalid order - test.AssertNotEquals(t, secondOrder.Id, order.Id) - // It should be status pending - test.AssertEquals(t, secondOrder.Status, "pending") - test.AssertEquals(t, numAuthorizations(secondOrder), 1) - // It should have a different authorization than the first order's now-invalid authorization - test.AssertNotEquals(t, secondOrder.V2Authorizations[0], order.V2Authorizations[0]) + test.AssertNotError(t, err, "creating test order") + + // Transition the original order to status invalid by jumping forward in time + // to when it has expired. + fc.Set(extant.Expires.AsTime().Add(2 * time.Hour)) + + // Now a new order for the same names should not reuse the first one. + new, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("a.com").ToProto(), + identifier.NewDNS("b.com").ToProto(), + }, + }) + test.AssertNotError(t, err, "creating test order") + test.AssertNotEquals(t, new.Id, extant.Id) } -// Test that the failed authorizations limit is checked before authz reuse. -func TestNewOrderCheckFailedAuthorizationsFirst(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) +// TestNewOrder_OrderReuse_Invalid tests that invalid orders are not reused. +// This is not simply a test case in TestNewOrder_OrderReuse because it has +// side effects. +func TestNewOrder_OrderReuse_Invalid(t *testing.T) { + _, sa, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() - _ = features.Set(map[string]bool{"CheckFailedAuthorizationsFirst": true}) - defer features.Reset() - - // Create an order (and thus a pending authz) for example.com - ctx := context.Background() - order, err := ra.NewOrder(ctx, &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{"example.com"}, + // Create an initial order. + extant, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("a.com").ToProto(), + identifier.NewDNS("b.com").ToProto(), + }, }) - test.AssertNotError(t, err, "adding an initial order for regA") - test.AssertNotNil(t, order.Id, "initial order had a nil ID") - test.AssertEquals(t, numAuthorizations(order), 1) + test.AssertNotError(t, err, "creating test order") - // Now treat example.com as if it had a recent failure. - ra.SA = &mockInvalidPlusValidAuthzAuthority{mockInvalidAuthorizationsAuthority{domainWithFailures: "example.com"}} - // Set a very restrictive police for invalid authorizations - one failure - // and you're done for a day. - ra.rlPolicies = &dummyRateLimitConfig{ - InvalidAuthorizationsPerAccountPolicy: ratelimit.RateLimitPolicy{ - Threshold: 1, - Window: cmd.ConfigDuration{Duration: 24 * time.Hour}, + // Transition the original order to status invalid by invalidating one of its + // authorizations. + _, err = sa.DeactivateAuthorization2(context.Background(), &sapb.AuthorizationID2{ + Id: extant.V2Authorizations[0], + }) + test.AssertNotError(t, err, "deactivating test authorization") + + // Now a new order for the same names should not reuse the first one. + new, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("a.com").ToProto(), + identifier.NewDNS("b.com").ToProto(), }, - } + }) + test.AssertNotError(t, err, "creating test order") + test.AssertNotEquals(t, new.Id, extant.Id) +} + +func TestNewOrder_AuthzReuse(t *testing.T) { + _, sa, ra, _, fc, registration, cleanUp := initAuthorities(t) + defer cleanUp() - // Creating an order for example.com should error with the "too many failed - // authorizations recently" error. - _, err = ra.NewOrder(ctx, &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{"example.com"}, + // Create three initial authzs by creating an initial order, then updating + // the individual authz statuses. + const ( + pending = "a-pending.com" + valid = "b-valid.com" + invalid = "c-invalid.com" + ) + extant, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{ + identifier.NewDNS(pending).ToProto(), + identifier.NewDNS(valid).ToProto(), + identifier.NewDNS(invalid).ToProto(), + }, + }) + test.AssertNotError(t, err, "creating test order") + extantAuthzs := map[string]int64{ + // Take advantage of the fact that authz IDs are returned in the same order + // as the lexicographically-sorted identifiers. + pending: extant.V2Authorizations[0], + valid: extant.V2Authorizations[1], + invalid: extant.V2Authorizations[2], + } + _, err = sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ + Id: extantAuthzs[valid], + Status: string(core.StatusValid), + Attempted: "hello", + Expires: timestamppb.New(fc.Now().Add(48 * time.Hour)), + }) + test.AssertNotError(t, err, "marking test authz as valid") + _, err = sa.DeactivateAuthorization2(context.Background(), &sapb.AuthorizationID2{ + Id: extantAuthzs[invalid], }) + test.AssertNotError(t, err, "marking test authz as invalid") - test.AssertError(t, err, "expected error for domain with too many failures") - test.AssertEquals(t, err.Error(), "too many failed authorizations recently: see https://letsencrypt.org/docs/rate-limits/") -} + // Create a second registration to reference later. + acctKeyB, err := AccountKeyB.MarshalJSON() + test.AssertNotError(t, err, "failed to marshal account key") + input := &corepb.Registration{Key: acctKeyB} + secondReg, err := ra.NewRegistration(context.Background(), input) + test.AssertNotError(t, err, "Error creating a second test registration") + + testCases := []struct { + Name string + RegistrationID int64 + Identifier identifier.ACMEIdentifier + Profile string + ExpectReuse bool + }{ + { + Name: "Reuse pending authz", + RegistrationID: registration.Id, + Identifier: identifier.NewDNS(pending), + ExpectReuse: false, + }, + { + Name: "Reuse valid authz", + RegistrationID: registration.Id, + Identifier: identifier.NewDNS(valid), + ExpectReuse: true, + }, + { + Name: "Don't reuse invalid authz", + RegistrationID: registration.Id, + Identifier: identifier.NewDNS(invalid), + ExpectReuse: false, + }, + { + Name: "Don't reuse valid authz with wrong profile", + RegistrationID: registration.Id, + Identifier: identifier.NewDNS(valid), + Profile: "test", + ExpectReuse: false, + }, + { + Name: "Don't reuse valid authz from other acct", + RegistrationID: secondReg.Id, + Identifier: identifier.NewDNS(valid), + ExpectReuse: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + new, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ + RegistrationID: tc.RegistrationID, + Identifiers: []*corepb.Identifier{tc.Identifier.ToProto()}, + CertificateProfileName: tc.Profile, + }) + test.AssertNotError(t, err, "creating test order") + test.AssertNotEquals(t, new.Id, extant.Id) -// mockSAUnsafeAuthzReuse has a GetAuthorizations implementation that returns -// an HTTP-01 validated wildcard authz. -type mockSAUnsafeAuthzReuse struct { - mocks.StorageAuthority + if tc.ExpectReuse { + test.AssertEquals(t, new.V2Authorizations[0], extantAuthzs[tc.Identifier.Value]) + } else { + test.AssertNotEquals(t, new.V2Authorizations[0], extantAuthzs[tc.Identifier.Value]) + } + }) + } } -// GetAuthorizations2 returns a _bizarre_ authorization for "*.zombo.com" that -// was validated by HTTP-01. This should never happen in real life since the -// name is a wildcard. We use this mock to test that we reject this bizarre -// situation correctly. -func (msa *mockSAUnsafeAuthzReuse) GetAuthorizations2(ctx context.Context, req *sapb.GetAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) { - expires := time.Now() - authzs := map[string]*core.Authorization{ - "*.zombo.com": { - // A static fake ID we can check for in a unit test - ID: "1", - Identifier: identifier.DNSIdentifier("*.zombo.com"), - RegistrationID: req.RegistrationID, - // Authz is valid - Status: "valid", - Expires: &expires, - Challenges: []core.Challenge{ - // HTTP-01 challenge is valid - { - Type: core.ChallengeTypeHTTP01, // The dreaded HTTP-01! X__X - Status: core.StatusValid, - }, - // DNS-01 challenge is pending - { - Type: core.ChallengeTypeDNS01, - Status: core.StatusPending, - }, +func TestNewOrder_ValidationProfiles(t *testing.T) { + _, _, ra, _, _, registration, cleanUp := initAuthorities(t) + defer cleanUp() + + ra.profiles = &validationProfiles{ + defaultName: "one", + byName: map[string]*validationProfile{ + "one": { + pendingAuthzLifetime: 1 * 24 * time.Hour, + validAuthzLifetime: 1 * 24 * time.Hour, + orderLifetime: 1 * 24 * time.Hour, + maxNames: 10, + identifierTypes: []identifier.IdentifierType{identifier.TypeDNS}, }, - }, - "zombo.com": { - // A static fake ID we can check for in a unit test - ID: "2", - Identifier: identifier.DNSIdentifier("zombo.com"), - RegistrationID: req.RegistrationID, - // Authz is valid - Status: "valid", - Expires: &expires, - Challenges: []core.Challenge{ - // HTTP-01 challenge is valid - { - Type: core.ChallengeTypeHTTP01, - Status: core.StatusValid, - }, - // DNS-01 challenge is pending - { - Type: core.ChallengeTypeDNS01, - Status: core.StatusPending, - }, + "two": { + pendingAuthzLifetime: 2 * 24 * time.Hour, + validAuthzLifetime: 2 * 24 * time.Hour, + orderLifetime: 2 * 24 * time.Hour, + maxNames: 10, + identifierTypes: []identifier.IdentifierType{identifier.TypeDNS}, }, }, } - return sa.AuthzMapToPB(authzs) + for _, tc := range []struct { + name string + profile string + wantExpires time.Time + }{ + { + // A request with no profile should get an order and authzs with one-day lifetimes. + name: "no profile specified", + profile: "", + wantExpires: ra.clk.Now().Add(1 * 24 * time.Hour), + }, + { + // A request for profile one should get an order and authzs with one-day lifetimes. + name: "profile one", + profile: "one", + wantExpires: ra.clk.Now().Add(1 * 24 * time.Hour), + }, + { + // A request for profile two should get an order and authzs with one-day lifetimes. + name: "profile two", + profile: "two", + wantExpires: ra.clk.Now().Add(2 * 24 * time.Hour), + }, + } { + t.Run(tc.name, func(t *testing.T) { + order, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{identifier.NewDNS(randomDomain()).ToProto()}, + CertificateProfileName: tc.profile, + }) + if err != nil { + t.Fatalf("creating order: %s", err) + } + gotExpires := order.Expires.AsTime() + if gotExpires != tc.wantExpires { + t.Errorf("NewOrder(profile: %q).Expires = %s, expected %s", tc.profile, gotExpires, tc.wantExpires) + } + + authz, err := ra.GetAuthorization(context.Background(), &rapb.GetAuthorizationRequest{ + Id: order.V2Authorizations[0], + }) + if err != nil { + t.Fatalf("fetching test authz: %s", err) + } + gotExpires = authz.Expires.AsTime() + if gotExpires != tc.wantExpires { + t.Errorf("GetAuthorization(profile: %q).Expires = %s, expected %s", tc.profile, gotExpires, tc.wantExpires) + } + }) + } +} + +func TestNewOrder_ProfileSelectionAllowList(t *testing.T) { + _, _, ra, _, _, registration, cleanUp := initAuthorities(t) + defer cleanUp() + + testCases := []struct { + name string + profile validationProfile + expectErr bool + expectErrContains string + }{ + { + name: "Allow all account IDs", + profile: validationProfile{allowList: nil}, + expectErr: false, + }, + { + name: "Deny all but account Id 1337", + profile: validationProfile{allowList: allowlist.NewList([]int64{1337})}, + expectErr: true, + expectErrContains: "not permitted to use certificate profile", + }, + { + name: "Deny all", + profile: validationProfile{allowList: allowlist.NewList([]int64{})}, + expectErr: true, + expectErrContains: "not permitted to use certificate profile", + }, + { + name: "Allow registration ID", + profile: validationProfile{allowList: allowlist.NewList([]int64{registration.Id})}, + expectErr: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.profile.maxNames = 1 + tc.profile.identifierTypes = []identifier.IdentifierType{identifier.TypeDNS} + ra.profiles.byName = map[string]*validationProfile{ + "test": &tc.profile, + } + + orderReq := &rapb.NewOrderRequest{ + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{identifier.NewDNS(randomDomain()).ToProto()}, + CertificateProfileName: "test", + } + _, err := ra.NewOrder(context.Background(), orderReq) + + if tc.expectErrContains != "" { + test.AssertErrorIs(t, err, berrors.Unauthorized) + test.AssertContains(t, err.Error(), tc.expectErrContains) + } else { + test.AssertNotError(t, err, "NewOrder failed") + } + }) + } } -func (msa *mockSAUnsafeAuthzReuse) NewAuthorizations2(_ context.Context, _ *sapb.AddPendingAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorization2IDs, error) { - return &sapb.Authorization2IDs{ - Ids: []int64{5}, - }, nil +func TestNewOrder_ProfileIdentifierTypes(t *testing.T) { + _, _, ra, _, _, registration, cleanUp := initAuthorities(t) + defer cleanUp() + + testCases := []struct { + name string + identTypes []identifier.IdentifierType + idents []*corepb.Identifier + expectErr string + }{ + { + name: "Permit DNS, provide DNS names", + identTypes: []identifier.IdentifierType{identifier.TypeDNS}, + idents: []*corepb.Identifier{identifier.NewDNS(randomDomain()).ToProto(), identifier.NewDNS(randomDomain()).ToProto()}, + }, + { + name: "Permit IP, provide IPs", + identTypes: []identifier.IdentifierType{identifier.TypeIP}, + idents: []*corepb.Identifier{identifier.NewIP(randomIPv6()).ToProto(), identifier.NewIP(randomIPv6()).ToProto()}, + }, + { + name: "Permit DNS & IP, provide DNS & IP", + identTypes: []identifier.IdentifierType{identifier.TypeDNS, identifier.TypeIP}, + idents: []*corepb.Identifier{identifier.NewIP(randomIPv6()).ToProto(), identifier.NewDNS(randomDomain()).ToProto()}, + }, + { + name: "Permit DNS, provide IP", + identTypes: []identifier.IdentifierType{identifier.TypeDNS}, + idents: []*corepb.Identifier{identifier.NewIP(randomIPv6()).ToProto()}, + expectErr: "Profile \"test\" does not permit ip type identifiers", + }, + { + name: "Permit DNS, provide DNS & IP", + identTypes: []identifier.IdentifierType{identifier.TypeDNS}, + idents: []*corepb.Identifier{identifier.NewDNS(randomDomain()).ToProto(), identifier.NewIP(randomIPv6()).ToProto()}, + expectErr: "Profile \"test\" does not permit ip type identifiers", + }, + { + name: "Permit IP, provide DNS", + identTypes: []identifier.IdentifierType{identifier.TypeIP}, + idents: []*corepb.Identifier{identifier.NewDNS(randomDomain()).ToProto()}, + expectErr: "Profile \"test\" does not permit dns type identifiers", + }, + { + name: "Permit IP, provide DNS & IP", + identTypes: []identifier.IdentifierType{identifier.TypeIP}, + idents: []*corepb.Identifier{identifier.NewIP(randomIPv6()).ToProto(), identifier.NewDNS(randomDomain()).ToProto()}, + expectErr: "Profile \"test\" does not permit dns type identifiers", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var profile validationProfile + profile.maxNames = 2 + profile.identifierTypes = tc.identTypes + ra.profiles.byName = map[string]*validationProfile{ + "test": &profile, + } + + orderReq := &rapb.NewOrderRequest{ + RegistrationID: registration.Id, + Identifiers: tc.idents, + CertificateProfileName: "test", + } + _, err := ra.NewOrder(context.Background(), orderReq) + + if tc.expectErr != "" { + test.AssertErrorIs(t, err, berrors.RejectedIdentifier) + test.AssertContains(t, err.Error(), tc.expectErr) + } else { + test.AssertNotError(t, err, "NewOrder failed") + } + }) + } } -func (msa *mockSAUnsafeAuthzReuse) NewOrderAndAuthzs(ctx context.Context, req *sapb.NewOrderAndAuthzsRequest, _ ...grpc.CallOption) (*corepb.Order, error) { - r := req.NewOrder - for range req.NewAuthzs { - r.V2Authorizations = append(r.V2Authorizations, mrand.Int63()) +// mockSAWithAuthzs has a GetValidAuthorizations2 method that returns the protobuf +// version of its authzs struct member. It also has a fake GetOrderForNames +// which always fails, and a fake NewOrderAndAuthzs which always succeeds, to +// facilitate the full execution of RA.NewOrder. +type mockSAWithAuthzs struct { + sapb.StorageAuthorityClient + authzs []*core.Authorization +} + +// GetOrderForNames is a mock which always returns NotFound so that NewOrder +// proceeds to attempt authz reuse instead of wholesale order reuse. +func (msa *mockSAWithAuthzs) GetOrderForNames(ctx context.Context, req *sapb.GetOrderForNamesRequest, _ ...grpc.CallOption) (*corepb.Order, error) { + return nil, berrors.NotFoundError("no such order") +} + +// GetValidAuthorizations2 returns a _bizarre_ authorization for "*.zombo.com" that +// was validated by HTTP-01. This should never happen in real life since the +// name is a wildcard. We use this mock to test that we reject this bizarre +// situation correctly. +func (msa *mockSAWithAuthzs) GetValidAuthorizations2(ctx context.Context, req *sapb.GetValidAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) { + resp := &sapb.Authorizations{} + for _, v := range msa.authzs { + authzPB, err := bgrpc.AuthzToPB(*v) + if err != nil { + return nil, err + } + resp.Authzs = append(resp.Authzs, authzPB) + } + return resp, nil +} + +func (msa *mockSAWithAuthzs) GetAuthorization2(ctx context.Context, req *sapb.AuthorizationID2, _ ...grpc.CallOption) (*corepb.Authorization, error) { + for _, authz := range msa.authzs { + if authz.ID == fmt.Sprintf("%d", req.Id) { + return bgrpc.AuthzToPB(*authz) + } } - return msa.NewOrder(ctx, r) + return nil, berrors.NotFoundError("no such authz") +} + +// NewOrderAndAuthzs is a mock which just reflects the incoming request back, +// pretending to have created new db rows for the requested newAuthzs. +func (msa *mockSAWithAuthzs) NewOrderAndAuthzs(ctx context.Context, req *sapb.NewOrderAndAuthzsRequest, _ ...grpc.CallOption) (*corepb.Order, error) { + authzIDs := req.NewOrder.V2Authorizations + for range req.NewAuthzs { + authzIDs = append(authzIDs, mrand.Int64()) + } + return &corepb.Order{ + // Fields from the input new order request. + RegistrationID: req.NewOrder.RegistrationID, + Expires: req.NewOrder.Expires, + Identifiers: req.NewOrder.Identifiers, + V2Authorizations: authzIDs, + CertificateProfileName: req.NewOrder.CertificateProfileName, + // Mock new fields generated by the database transaction. + Id: mrand.Int64(), + Created: timestamppb.Now(), + // A new order is never processing because it can't have been finalized yet. + BeganProcessing: false, + Status: string(core.StatusPending), + }, nil } // TestNewOrderAuthzReuseSafety checks that the RA's safety check for reusing an @@ -2199,69 +2051,189 @@ func (msa *mockSAUnsafeAuthzReuse) NewOrderAndAuthzs(ctx context.Context, req *s // for background - this safety check was previously broken! // https://github.com/letsencrypt/boulder/issues/3420 func TestNewOrderAuthzReuseSafety(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) + _, _, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() ctx := context.Background() - names := []string{"*.zombo.com"} + idents := identifier.ACMEIdentifiers{identifier.NewDNS("*.zombo.com")} // Use a mock SA that always returns a valid HTTP-01 authz for the name // "zombo.com" - ra.SA = &mockSAUnsafeAuthzReuse{} + expires := time.Now() + ra.SA = &mockSAWithAuthzs{ + authzs: []*core.Authorization{ + { + // A static fake ID we can check for in a unit test + ID: "1", + Identifier: identifier.NewDNS("*.zombo.com"), + RegistrationID: registration.Id, + // Authz is valid + Status: "valid", + Expires: &expires, + Challenges: []core.Challenge{ + // HTTP-01 challenge is valid + { + Type: core.ChallengeTypeHTTP01, // The dreaded HTTP-01! X__X + Status: core.StatusValid, + Token: core.NewToken(), + }, + // DNS-01 challenge is pending + { + Type: core.ChallengeTypeDNS01, + Status: core.StatusPending, + Token: core.NewToken(), + }, + }, + }, + { + // A static fake ID we can check for in a unit test + ID: "2", + Identifier: identifier.NewDNS("zombo.com"), + RegistrationID: registration.Id, + // Authz is valid + Status: "valid", + Expires: &expires, + Challenges: []core.Challenge{ + // HTTP-01 challenge is valid + { + Type: core.ChallengeTypeHTTP01, + Status: core.StatusValid, + Token: core.NewToken(), + }, + // DNS-01 challenge is pending + { + Type: core.ChallengeTypeDNS01, + Status: core.StatusPending, + Token: core.NewToken(), + }, + }, + }, + }, + } // Create an initial request with regA and names orderReq := &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: names, + RegistrationID: registration.Id, + Identifiers: idents.ToProtoSlice(), } // Create an order for that request - order, err := ra.NewOrder(ctx, orderReq) - // It shouldn't fail - test.AssertNotError(t, err, "Adding an initial order for regA failed") - test.AssertEquals(t, numAuthorizations(order), 1) - // It should *not* be the bad authorization! - test.AssertNotEquals(t, order.V2Authorizations[0], int64(1)) + _, err := ra.NewOrder(ctx, orderReq) + // It should fail because HTTP-01 is not a valid challenge type for wildcards + test.AssertError(t, err, "Added an initial order for regA with invalid challenge(s)") + test.AssertContains(t, err.Error(), "SA.GetAuthorizations returned a DNS wildcard authz (1) with invalid challenge(s)") } -func TestNewOrderAuthzReuseDisabled(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) +// TestNewOrderAuthzReuseDNSAccount01 checks that the RA correctly allows reuse +// of a wildcard authorization with a DNS-Account-01 challenge. +func TestNewOrderAuthzReuseDNSAccount01(t *testing.T) { + _, _, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() + features.Set(features.Config{DNSAccount01Enabled: true}) + defer features.Reset() + ctx := context.Background() - names := []string{"zombo.com"} + idents := identifier.ACMEIdentifiers{identifier.NewDNS("*.zombo.com")} + + // Use a mock SA that returns a pending authz with both DNS-01 and + // DNS-Account-01 challenges for wildcard. This tests that pending wildcard + // authorizations with 2 challenges can be reused. + expires := time.Now().Add(24 * time.Hour) + ra.SA = &mockSAWithAuthzs{ + authzs: []*core.Authorization{ + { + ID: "1", + Identifier: identifier.NewDNS("*.zombo.com"), + RegistrationID: registration.Id, + Status: "pending", + Expires: &expires, + Challenges: []core.Challenge{ + { + Type: core.ChallengeTypeDNS01, + Status: core.StatusPending, + Token: core.NewToken(), + }, + { + Type: core.ChallengeTypeDNSAccount01, + Status: core.StatusPending, + Token: core.NewToken(), + }, + }, + }, + }, + } - // Use a mock SA that always returns a valid HTTP-01 authz for the name - // "zombo.com" - ra.SA = &mockSAUnsafeAuthzReuse{} + orderReq := &rapb.NewOrderRequest{ + RegistrationID: registration.Id, + Identifiers: idents.ToProtoSlice(), + } + + // Create an order for the wildcard domain. NewOrder should recognize that + // the existing valid authorization with DNS-Account-01 challenge can be + // reused for this wildcard domain request. + order, err := ra.NewOrder(ctx, orderReq) + test.AssertNotError(t, err, "NewOrder failed to reuse wildcard authz with DNS-Account-01") + // The order should contain exactly one authorization (the reused one) + test.AssertEquals(t, len(order.V2Authorizations), 1) + // The authorization ID should match the mock authz we provided (ID "1") + test.AssertEquals(t, order.V2Authorizations[0], int64(1)) +} - // Disable authz reuse - ra.reuseValidAuthz = false +// TestNewOrderAuthzReuseDNSAccount01Disabled checks that the RA rejects +// wildcard authorization reuse with DNS-Account-01 when the feature is disabled. +func TestNewOrderAuthzReuseDNSAccount01Disabled(t *testing.T) { + _, _, ra, _, _, registration, cleanUp := initAuthorities(t) + defer cleanUp() + + // Feature flag is NOT set - DNSAccount01Enabled defaults to false + + ctx := context.Background() + idents := identifier.ACMEIdentifiers{identifier.NewDNS("*.zombo.com")} + + // Use a mock SA that returns a DNS-Account-01 authz for wildcard + expires := time.Now().Add(24 * time.Hour) + ra.SA = &mockSAWithAuthzs{ + authzs: []*core.Authorization{ + { + ID: "1", + Identifier: identifier.NewDNS("*.zombo.com"), + RegistrationID: registration.Id, + Status: "valid", + Expires: &expires, + Challenges: []core.Challenge{ + { + Type: core.ChallengeTypeDNSAccount01, + Status: core.StatusValid, + Token: core.NewToken(), + }, + }, + }, + }, + } - // Create an initial request with regA and names orderReq := &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: names, + RegistrationID: registration.Id, + Identifiers: idents.ToProtoSlice(), } - // Create an order for that request - order, err := ra.NewOrder(ctx, orderReq) - // It shouldn't fail - test.AssertNotError(t, err, "Adding an initial order for regA failed") - test.AssertEquals(t, numAuthorizations(order), 1) - // It should *not* be the bad authorization that indicates reuse! - test.AssertNotEquals(t, order.V2Authorizations[0], int64(2)) + // NewOrder should reject the DNS-Account-01 authz when feature is disabled + _, err := ra.NewOrder(ctx, orderReq) + test.AssertError(t, err, "NewOrder should reject DNS-Account-01 when feature disabled") + test.AssertContains(t, err.Error(), "SA.GetAuthorizations returned a DNS wildcard authz (1) with invalid challenge(s)") } func TestNewOrderWildcard(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) + _, _, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() - ra.orderLifetime = time.Hour - orderNames := []string{"example.com", "*.welcome.zombo.com"} + orderIdents := identifier.ACMEIdentifiers{ + identifier.NewDNS("example.com"), + identifier.NewDNS("*.welcome.zombo.com"), + } wildcardOrderRequest := &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: orderNames, + RegistrationID: registration.Id, + Identifiers: orderIdents.ToProtoSlice(), } order, err := ra.NewOrder(context.Background(), wildcardOrderRequest) @@ -2269,18 +2241,18 @@ func TestNewOrderWildcard(t *testing.T) { // We expect the order to be pending test.AssertEquals(t, order.Status, string(core.StatusPending)) - // We expect the order to have two names - test.AssertEquals(t, len(order.Names), 2) - // We expect the order to have the names we requested + // We expect the order to have two identifiers + test.AssertEquals(t, len(order.Identifiers), 2) + + // We expect the order to have the identifiers we requested test.AssertDeepEquals(t, - core.UniqueLowerNames(order.Names), - core.UniqueLowerNames(orderNames)) + identifier.Normalize(identifier.FromProtoSlice(order.Identifiers)), + identifier.Normalize(orderIdents)) test.AssertEquals(t, numAuthorizations(order), 2) // Check each of the authz IDs in the order for _, authzID := range order.V2Authorizations { // We should be able to retrieve the authz from the db without error - authzID := authzID authzPB, err := ra.SA.GetAuthorization2(ctx, &sapb.AuthorizationID2{Id: authzID}) test.AssertNotError(t, err, "sa.GetAuthorization2 failed") authz, err := bgrpc.PBToAuthz(authzPB) @@ -2299,7 +2271,7 @@ func TestNewOrderWildcard(t *testing.T) { test.AssertEquals(t, authz.Challenges[0].Type, core.ChallengeTypeDNS01) case "example.com": // If the authz is for example.com, we expect it has normal challenges - test.AssertEquals(t, len(authz.Challenges), 2) + test.AssertEquals(t, len(authz.Challenges), 3) default: t.Fatalf("Received an authorization for a name not requested: %q", name) } @@ -2308,27 +2280,29 @@ func TestNewOrderWildcard(t *testing.T) { // An order for a base domain and a wildcard for the same base domain should // return just 2 authz's, one for the wildcard with a DNS-01 // challenge and one for the base domain with the normal challenges. - orderNames = []string{"zombo.com", "*.zombo.com"} + orderIdents = identifier.ACMEIdentifiers{ + identifier.NewDNS("zombo.com"), + identifier.NewDNS("*.zombo.com"), + } wildcardOrderRequest = &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: orderNames, + RegistrationID: registration.Id, + Identifiers: orderIdents.ToProtoSlice(), } order, err = ra.NewOrder(context.Background(), wildcardOrderRequest) test.AssertNotError(t, err, "NewOrder failed for a wildcard order request") // We expect the order to be pending test.AssertEquals(t, order.Status, string(core.StatusPending)) - // We expect the order to have two names - test.AssertEquals(t, len(order.Names), 2) - // We expect the order to have the names we requested + // We expect the order to have two identifiers + test.AssertEquals(t, len(order.Identifiers), 2) + // We expect the order to have the identifiers we requested test.AssertDeepEquals(t, - core.UniqueLowerNames(order.Names), - core.UniqueLowerNames(orderNames)) + identifier.Normalize(identifier.FromProtoSlice(order.Identifiers)), + identifier.Normalize(orderIdents)) test.AssertEquals(t, numAuthorizations(order), 2) for _, authzID := range order.V2Authorizations { // We should be able to retrieve the authz from the db without error - authzID := authzID authzPB, err := ra.SA.GetAuthorization2(ctx, &sapb.AuthorizationID2{Id: authzID}) test.AssertNotError(t, err, "sa.GetAuthorization2 failed") authz, err := bgrpc.PBToAuthz(authzPB) @@ -2339,7 +2313,7 @@ func TestNewOrderWildcard(t *testing.T) { case "zombo.com": // We expect that the base domain identifier auth has the normal number of // challenges - test.AssertEquals(t, len(authz.Challenges), 2) + test.AssertEquals(t, len(authz.Challenges), 3) case "*.zombo.com": // We expect that the wildcard identifier auth has only a pending // DNS-01 type challenge @@ -2354,8 +2328,8 @@ func TestNewOrderWildcard(t *testing.T) { // Make an order for a single domain, no wildcards. This will create a new // pending authz for the domain normalOrderReq := &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{"everything.is.possible.zombo.com"}, + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{identifier.NewDNS("everything.is.possible.zombo.com").ToProto()}, } normalOrder, err := ra.NewOrder(context.Background(), normalOrderReq) test.AssertNotError(t, err, "NewOrder failed for a normal non-wildcard order") @@ -2373,15 +2347,15 @@ func TestNewOrderWildcard(t *testing.T) { // We expect the authz is for the identifier the correct domain test.AssertEquals(t, authz.Identifier.Value, "everything.is.possible.zombo.com") // We expect the authz has the normal # of challenges - test.AssertEquals(t, len(authz.Challenges), 2) + test.AssertEquals(t, len(authz.Challenges), 3) // Now submit an order request for a wildcard of the domain we just created an // order for. We should **NOT** reuse the authorization from the previous // order since we now require a DNS-01 challenge for the `*.` prefixed name. - orderNames = []string{"*.everything.is.possible.zombo.com"} + orderIdents = identifier.ACMEIdentifiers{identifier.NewDNS("*.everything.is.possible.zombo.com")} wildcardOrderRequest = &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: orderNames, + RegistrationID: registration.Id, + Identifiers: orderIdents.ToProtoSlice(), } order, err = ra.NewOrder(context.Background(), wildcardOrderRequest) test.AssertNotError(t, err, "NewOrder failed for a wildcard order request") @@ -2418,50 +2392,15 @@ func TestNewOrderWildcard(t *testing.T) { test.AssertEquals(t, dupeOrder.V2Authorizations[0], order.V2Authorizations[0]) } -// mockSANearExpiredAuthz is a mock SA that always returns an authz near expiry -// to test orders expiry calculations -type mockSANearExpiredAuthz struct { - mocks.StorageAuthority - expiry time.Time -} - -// GetAuthorizations2 is a mock that always returns a valid authorization for -// "zombo.com" very near to expiry -func (msa *mockSANearExpiredAuthz) GetAuthorizations2(ctx context.Context, req *sapb.GetAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) { - authzs := map[string]*core.Authorization{ - "zombo.com": { - // A static fake ID we can check for in a unit test - ID: "1", - Identifier: identifier.DNSIdentifier("zombo.com"), - RegistrationID: req.RegistrationID, - Expires: &msa.expiry, - Status: "valid", - Challenges: []core.Challenge{ - { - Type: core.ChallengeTypeHTTP01, - Status: core.StatusValid, - }, - }, - }, - } - return sa.AuthzMapToPB(authzs) -} - -func (msa *mockSANearExpiredAuthz) NewAuthorizations2(_ context.Context, _ *sapb.AddPendingAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorization2IDs, error) { - return &sapb.Authorization2IDs{ - Ids: []int64{5}, - }, nil -} - func TestNewOrderExpiry(t *testing.T) { - _, _, ra, clk, cleanUp := initAuthorities(t) + _, _, ra, _, clk, registration, cleanUp := initAuthorities(t) defer cleanUp() ctx := context.Background() - names := []string{"zombo.com"} + idents := identifier.ACMEIdentifiers{identifier.NewDNS("zombo.com")} // Set the order lifetime to 48 hours. - ra.orderLifetime = 48 * time.Hour + ra.profiles.def().orderLifetime = 48 * time.Hour // Use an expiry that is sooner than the configured order expiry but greater // than 24 hours away. @@ -2469,12 +2408,30 @@ func TestNewOrderExpiry(t *testing.T) { // Use a mock SA that always returns a soon-to-be-expired valid authz for // "zombo.com". - ra.SA = &mockSANearExpiredAuthz{expiry: fakeAuthzExpires} + ra.SA = &mockSAWithAuthzs{ + authzs: []*core.Authorization{ + { + // A static fake ID we can check for in a unit test + ID: "1", + Identifier: identifier.NewDNS("zombo.com"), + RegistrationID: registration.Id, + Expires: &fakeAuthzExpires, + Status: "valid", + Challenges: []core.Challenge{ + { + Type: core.ChallengeTypeHTTP01, + Status: core.StatusValid, + Token: core.NewToken(), + }, + }, + }, + }, + } // Create an initial request with regA and names orderReq := &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: names, + RegistrationID: registration.Id, + Identifiers: idents.ToProtoSlice(), } // Create an order for that request @@ -2486,11 +2443,11 @@ func TestNewOrderExpiry(t *testing.T) { test.AssertEquals(t, order.V2Authorizations[0], int64(1)) // The order's expiry should be the fake authz's expiry since it is sooner // than the order's own expiry. - test.AssertEquals(t, order.Expires, fakeAuthzExpires.UnixNano()) + test.AssertEquals(t, order.Expires.AsTime(), fakeAuthzExpires) // Set the order lifetime to be lower than the fakeAuthzLifetime - ra.orderLifetime = 12 * time.Hour - expectedOrderExpiry := clk.Now().Add(ra.orderLifetime).UnixNano() + ra.profiles.def().orderLifetime = 12 * time.Hour + expectedOrderExpiry := clk.Now().Add(12 * time.Hour) // Create the order again order, err = ra.NewOrder(ctx, orderReq) // It shouldn't fail @@ -2500,19 +2457,19 @@ func TestNewOrderExpiry(t *testing.T) { test.AssertEquals(t, order.V2Authorizations[0], int64(1)) // The order's expiry should be the order's own expiry since it is sooner than // the fake authz's expiry. - test.AssertEquals(t, order.Expires, expectedOrderExpiry) + test.AssertEquals(t, order.Expires.AsTime(), expectedOrderExpiry) } func TestFinalizeOrder(t *testing.T) { - _, sa, ra, fc, cleanUp := initAuthorities(t) + _, sa, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() - ra.orderLifetime = time.Hour // Create one finalized authorization for not-example.com and one finalized // authorization for www.not-example.org - exp := ra.clk.Now().Add(365 * 24 * time.Hour) - authzIDA := createFinalizedAuthorization(t, sa, "not-example.com", exp, "valid", ra.clk.Now()) - authzIDB := createFinalizedAuthorization(t, sa, "www.not-example.com", exp, "valid", ra.clk.Now()) + now := ra.clk.Now() + exp := now.Add(365 * 24 * time.Hour) + authzIDA := createFinalizedAuthorization(t, sa, registration.Id, identifier.NewDNS("not-example.com"), exp, core.ChallengeTypeHTTP01, ra.clk.Now()) + authzIDB := createFinalizedAuthorization(t, sa, registration.Id, identifier.NewDNS("www.not-example.com"), exp, core.ChallengeTypeHTTP01, ra.clk.Now()) testKey, err := rsa.GenerateKey(rand.Reader, 2048) test.AssertNotError(t, err, "error generating test key") @@ -2550,7 +2507,7 @@ func TestFinalizeOrder(t *testing.T) { Subject: pkix.Name{CommonName: "not-example.com"}, DNSNames: []string{"not-example.com", "www.not-example.com"}, PublicKey: testKey.Public(), - NotBefore: fc.Now(), + NotBefore: now, BasicConstraintsValid: true, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, } @@ -2565,30 +2522,29 @@ func TestFinalizeOrder(t *testing.T) { // finalize the order will put it into processing state and the other tests // will fail because you can't finalize an order that is already being // processed. - emptyOrder, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{"000.example.com"}, - }) - test.AssertNotError(t, err, "Could not add test order for fake order ID") - // Add a new order for the fake reg ID fakeRegOrder, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{"001.example.com"}, + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{identifier.NewDNS("001.example.com").ToProto()}, }) test.AssertNotError(t, err, "Could not add test order for fake reg ID order ID") missingAuthzOrder, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: []string{"002.example.com"}, + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{identifier.NewDNS("002.example.com").ToProto()}, }) test.AssertNotError(t, err, "Could not add test order for missing authz order ID") - validatedOrder, err := sa.NewOrder(context.Background(), &sapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Expires: exp.UnixNano(), - Names: []string{"not-example.com", "www.not-example.com"}, - V2Authorizations: []int64{authzIDA, authzIDB}, + validatedOrder, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: registration.Id, + Expires: timestamppb.New(exp), + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("not-example.com").ToProto(), + identifier.NewDNS("www.not-example.com").ToProto(), + }, + V2Authorizations: []int64{authzIDA, authzIDB}, + }, }) test.AssertNotError(t, err, "Could not add test order with finalized authz IDs, ready status") @@ -2598,23 +2554,45 @@ func TestFinalizeOrder(t *testing.T) { ExpectedErrMsg string ExpectIssuance bool }{ + { + Name: "No id in order", + OrderReq: &rapb.FinalizeOrderRequest{ + Order: &corepb.Order{}, + Csr: oneDomainCSR, + }, + ExpectedErrMsg: "invalid order ID: 0", + }, + { + Name: "No account id in order", + OrderReq: &rapb.FinalizeOrderRequest{ + Order: &corepb.Order{ + Id: 1, + }, + Csr: oneDomainCSR, + }, + ExpectedErrMsg: "invalid account ID: 0", + }, { Name: "No names in order", OrderReq: &rapb.FinalizeOrderRequest{ Order: &corepb.Order{ - Status: string(core.StatusReady), - Names: []string{}, + Id: 1, + RegistrationID: 1, + Status: string(core.StatusReady), + Identifiers: []*corepb.Identifier{}, }, Csr: oneDomainCSR, }, - ExpectedErrMsg: "Order has no associated names", + ExpectedErrMsg: "Order has no associated identifiers", }, { Name: "Wrong order state (valid)", OrderReq: &rapb.FinalizeOrderRequest{ Order: &corepb.Order{ - Status: string(core.StatusValid), - Names: []string{"a.com"}, + Id: 1, + RegistrationID: 1, + Status: string(core.StatusValid), + Identifiers: []*corepb.Identifier{identifier.NewDNS("a.com").ToProto()}, }, Csr: oneDomainCSR, }, @@ -2624,8 +2602,10 @@ func TestFinalizeOrder(t *testing.T) { Name: "Wrong order state (pending)", OrderReq: &rapb.FinalizeOrderRequest{ Order: &corepb.Order{ - Status: string(core.StatusPending), - Names: []string{"a.com"}, + Id: 1, + RegistrationID: 1, + Status: string(core.StatusPending), + Identifiers: []*corepb.Identifier{identifier.NewDNS("a.com").ToProto()}, }, Csr: oneDomainCSR, }, @@ -2636,58 +2616,66 @@ func TestFinalizeOrder(t *testing.T) { Name: "Invalid CSR", OrderReq: &rapb.FinalizeOrderRequest{ Order: &corepb.Order{ - Status: string(core.StatusReady), - Names: []string{"a.com"}, + Id: 1, + RegistrationID: 1, + Status: string(core.StatusReady), + Identifiers: []*corepb.Identifier{identifier.NewDNS("a.com").ToProto()}, }, Csr: []byte{0xC0, 0xFF, 0xEE}, }, - ExpectedErrMsg: "asn1: syntax error: truncated tag or length", + ExpectedErrMsg: "unable to parse CSR: asn1: syntax error: truncated tag or length", }, { Name: "CSR and Order with diff number of names", OrderReq: &rapb.FinalizeOrderRequest{ Order: &corepb.Order{ + Id: 1, + RegistrationID: 1, Status: string(core.StatusReady), - Names: []string{"a.com", "b.com"}, - RegistrationID: fakeRegID, + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("a.com").ToProto(), + identifier.NewDNS("b.com").ToProto(), + }, }, Csr: oneDomainCSR, }, - ExpectedErrMsg: "Order includes different number of names than CSR specifies", + ExpectedErrMsg: "CSR does not specify same identifiers as Order", }, { Name: "CSR and Order with diff number of names (other way)", OrderReq: &rapb.FinalizeOrderRequest{ Order: &corepb.Order{ + Id: 1, + RegistrationID: 1, Status: string(core.StatusReady), - Names: []string{"a.com"}, - RegistrationID: fakeRegID, + Identifiers: []*corepb.Identifier{identifier.NewDNS("a.com").ToProto()}, }, Csr: twoDomainCSR, }, - ExpectedErrMsg: "Order includes different number of names than CSR specifies", + ExpectedErrMsg: "CSR does not specify same identifiers as Order", }, { Name: "CSR missing an order name", OrderReq: &rapb.FinalizeOrderRequest{ Order: &corepb.Order{ + Id: 1, + RegistrationID: 1, Status: string(core.StatusReady), - Names: []string{"foobar.com"}, - RegistrationID: fakeRegID, + Identifiers: []*corepb.Identifier{identifier.NewDNS("foobar.com").ToProto()}, }, Csr: oneDomainCSR, }, - ExpectedErrMsg: "CSR is missing Order domain \"foobar.com\"", + ExpectedErrMsg: "CSR does not specify same identifiers as Order", }, { Name: "CSR with policy forbidden name", OrderReq: &rapb.FinalizeOrderRequest{ Order: &corepb.Order{ + Id: 1, + RegistrationID: 1, Status: string(core.StatusReady), - Names: []string{"example.org"}, - RegistrationID: Registration.Id, - Id: emptyOrder.Id, - Expires: exp.UnixNano(), + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.org").ToProto()}, + Expires: timestamppb.New(exp), CertificateSerial: "", BeganProcessing: false, }, @@ -2700,13 +2688,13 @@ func TestFinalizeOrder(t *testing.T) { OrderReq: &rapb.FinalizeOrderRequest{ Order: &corepb.Order{ Status: string(core.StatusReady), - Names: []string{"a.com"}, + Identifiers: []*corepb.Identifier{identifier.NewDNS("a.com").ToProto()}, Id: fakeRegOrder.Id, RegistrationID: fakeRegID, - Expires: exp.UnixNano(), + Expires: timestamppb.New(exp), CertificateSerial: "", BeganProcessing: false, - Created: ra.clk.Now().UnixNano(), + Created: timestamppb.New(now), }, Csr: oneDomainCSR, }, @@ -2716,18 +2704,21 @@ func TestFinalizeOrder(t *testing.T) { Name: "Order with missing authorizations", OrderReq: &rapb.FinalizeOrderRequest{ Order: &corepb.Order{ - Status: string(core.StatusReady), - Names: []string{"a.com", "b.com"}, + Status: string(core.StatusReady), + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("a.com").ToProto(), + identifier.NewDNS("b.com").ToProto(), + }, Id: missingAuthzOrder.Id, - RegistrationID: Registration.Id, - Expires: exp.UnixNano(), + RegistrationID: registration.Id, + Expires: timestamppb.New(exp), CertificateSerial: "", BeganProcessing: false, - Created: ra.clk.Now().UnixNano(), + Created: timestamppb.New(now), }, Csr: twoDomainCSR, }, - ExpectedErrMsg: "authorizations for these names not found or expired: a.com, b.com", + ExpectedErrMsg: "authorizations for these identifiers not found: a.com, b.com", }, { Name: "Order with correct authorizations, ready status", @@ -2757,30 +2748,36 @@ func TestFinalizeOrder(t *testing.T) { test.AssertNotError(t, err, "Error getting order to check serial") test.AssertNotEquals(t, updatedOrder.CertificateSerial, "") test.AssertEquals(t, updatedOrder.Status, "valid") + test.AssertEquals(t, updatedOrder.Expires.AsTime(), exp) } }) } } func TestFinalizeOrderWithMixedSANAndCN(t *testing.T) { - _, sa, ra, _, cleanUp := initAuthorities(t) + _, sa, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() - ra.orderLifetime = time.Hour // Pick an expiry in the future - exp := ra.clk.Now().Add(365 * 24 * time.Hour) + now := ra.clk.Now() + exp := now.Add(365 * 24 * time.Hour) - // Create one finalized authorization for Registration.Id for not-example.com and - // one finalized authorization for Registration.Id for www.not-example.org - authzIDA := createFinalizedAuthorization(t, sa, "not-example.com", exp, "valid", ra.clk.Now()) - authzIDB := createFinalizedAuthorization(t, sa, "www.not-example.com", exp, "valid", ra.clk.Now()) + // Create one finalized authorization for the registration for not-example.com and + // one finalized authorization for www.not-example.org + authzIDA := createFinalizedAuthorization(t, sa, registration.Id, identifier.NewDNS("not-example.com"), exp, core.ChallengeTypeHTTP01, ra.clk.Now()) + authzIDB := createFinalizedAuthorization(t, sa, registration.Id, identifier.NewDNS("www.not-example.com"), exp, core.ChallengeTypeHTTP01, ra.clk.Now()) // Create a new order to finalize with names in SAN and CN - mixedOrder, err := sa.NewOrder(context.Background(), &sapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Expires: exp.UnixNano(), - Names: []string{"not-example.com", "www.not-example.com"}, - V2Authorizations: []int64{authzIDA, authzIDB}, + mixedOrder, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: registration.Id, + Expires: timestamppb.New(exp), + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("not-example.com").ToProto(), + identifier.NewDNS("www.not-example.com").ToProto(), + }, + V2Authorizations: []int64{authzIDA, authzIDB}, + }, }) test.AssertNotError(t, err, "Could not add test order with finalized authz IDs") testKey, err := rsa.GenerateKey(rand.Reader, 2048) @@ -2822,11 +2819,12 @@ func TestFinalizeOrderWithMixedSANAndCN(t *testing.T) { } func TestFinalizeOrderWildcard(t *testing.T) { - _, sa, ra, _, cleanUp := initAuthorities(t) + _, sa, ra, _, _, registration, cleanUp := initAuthorities(t) defer cleanUp() // Pick an expiry in the future - exp := ra.clk.Now().Add(365 * 24 * time.Hour) + now := ra.clk.Now() + exp := now.Add(365 * 24 * time.Hour) testKey, err := rsa.GenerateKey(rand.Reader, 2048) test.AssertNotError(t, err, "Error creating test RSA key") @@ -2863,16 +2861,17 @@ func TestFinalizeOrderWildcard(t *testing.T) { ra.CA = ca // Create a new order for a wildcard domain - orderNames := []string{"*.zombo.com"} + orderIdents := identifier.ACMEIdentifiers{identifier.NewDNS("*.zombo.com")} + test.AssertNotError(t, err, "Converting identifiers to DNS names") wildcardOrderRequest := &rapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Names: orderNames, + RegistrationID: registration.Id, + Identifiers: orderIdents.ToProtoSlice(), } order, err := ra.NewOrder(context.Background(), wildcardOrderRequest) test.AssertNotError(t, err, "NewOrder failed for wildcard domain order") - // Create one standard finalized authorization for Registration.Id for zombo.com - _ = createFinalizedAuthorization(t, sa, "zombo.com", exp, "valid", ra.clk.Now()) + // Create one standard finalized authorization for the registration for zombo.com + _ = createFinalizedAuthorization(t, sa, registration.Id, identifier.NewDNS("zombo.com"), exp, core.ChallengeTypeHTTP01, ra.clk.Now()) // Finalizing the order should *not* work since the existing validated authz // is not a special DNS-01-Wildcard challenge authz, so the order will be @@ -2896,12 +2895,13 @@ func TestFinalizeOrderWildcard(t *testing.T) { test.AssertNotError(t, err, "sa.GetAuthorization2 failed") // Finalize the authorization with the challenge validated + expires := now.Add(time.Hour * 24 * 7) _, err = sa.FinalizeAuthorization2(ctx, &sapb.FinalizeAuthorizationRequest{ Id: validOrder.V2Authorizations[0], Status: string(core.StatusValid), - Expires: ra.clk.Now().Add(time.Hour * 24 * 7).UnixNano(), + Expires: timestamppb.New(expires), Attempted: string(core.ChallengeTypeDNS01), - AttemptedAt: ra.clk.Now().UnixNano(), + AttemptedAt: timestamppb.New(now), }) test.AssertNotError(t, err, "sa.FinalizeAuthorization2 failed") @@ -2921,83 +2921,166 @@ func TestFinalizeOrderWildcard(t *testing.T) { "wildcard order") } -func TestIssueCertificateAuditLog(t *testing.T) { - _, sa, ra, _, cleanUp := initAuthorities(t) +func TestFinalizeOrderDisabledChallenge(t *testing.T) { + _, sa, ra, _, fc, registration, cleanUp := initAuthorities(t) defer cleanUp() - // Set up order and authz expiries - ra.orderLifetime = 24 * time.Hour - exp := ra.clk.Now().Add(24 * time.Hour) - - authzForChalType := func(domain, chalType string) int64 { - template := core.Authorization{ - Identifier: identifier.ACMEIdentifier{ - Type: "dns", - Value: domain, - }, - RegistrationID: Registration.Id, - Status: "pending", - Expires: &exp, - } - // Create challenges - token := core.NewToken() - httpChal := core.HTTPChallenge01(token) - dnsChal := core.DNSChallenge01(token) - // Set the selected challenge to valid - switch chalType { - case "http-01": - httpChal.Status = core.StatusValid - case "dns-01": - dnsChal.Status = core.StatusValid - default: - t.Fatalf("Invalid challenge type used with authzForChalType: %q", chalType) - } - // Set the template's challenges - template.Challenges = []core.Challenge{httpChal, dnsChal} - - // Create the pending authz - authzPB, err := bgrpc.AuthzToPB(template) - test.AssertNotError(t, err, "bgrpc.AuthzToPB failed") - ids, err := sa.NewAuthorizations2(ctx, &sapb.AddPendingAuthorizationsRequest{ - Authz: []*corepb.Authorization{authzPB}, - }) - test.AssertNotError(t, err, "sa.NewAuthorzations2 failed") - // Finalize the authz - _, err = sa.FinalizeAuthorization2(ctx, &sapb.FinalizeAuthorizationRequest{ - Id: ids.Ids[0], - Status: "valid", - Expires: exp.UnixNano(), - Attempted: chalType, - AttemptedAt: ra.clk.Now().UnixNano(), - }) - test.AssertNotError(t, err, "sa.FinalizeAuthorization2 failed") - return ids.Ids[0] - } + domain := randomDomain() + ident := identifier.NewDNS(domain) - // Make some valid authorizations for some names using different challenge types - names := []string{"not-example.com", "www.not-example.com", "still.not-example.com", "definitely.not-example.com"} - chalTypes := []string{"http-01", "dns-01", "http-01", "dns-01"} - var authzIDs []int64 - for i, name := range names { - authzIDs = append(authzIDs, authzForChalType(name, chalTypes[i])) - } + // Create a finalized authorization for that domain + authzID := createFinalizedAuthorization( + t, sa, registration.Id, ident, fc.Now().Add(24*time.Hour), core.ChallengeTypeHTTP01, fc.Now().Add(-1*time.Hour)) - // Create a pending order for all of the names - order, err := sa.NewOrder(context.Background(), &sapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Expires: exp.UnixNano(), - Names: names, - V2Authorizations: authzIDs, + // Create an order that reuses that authorization + order, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{ident.ToProto()}, }) - test.AssertNotError(t, err, "Could not add test order with finalized authz IDs") + test.AssertNotError(t, err, "creating test order") + test.AssertEquals(t, order.V2Authorizations[0], authzID) - // Generate a CSR covering the order names with a random RSA key - testKey, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "error generating test key") + // Create a CSR for this order + testKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "generating test key") csr, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{ - PublicKey: testKey.PublicKey, - SignatureAlgorithm: x509.SHA256WithRSA, - Subject: pkix.Name{CommonName: "not-example.com"}, + PublicKey: testKey.PublicKey, + DNSNames: []string{domain}, + }, testKey) + test.AssertNotError(t, err, "Error creating policy forbid CSR") + + // Replace the Policy Authority with one which has this challenge type disabled + pa, err := policy.New( + map[identifier.IdentifierType]bool{ + identifier.TypeDNS: true, + identifier.TypeIP: true, + }, + map[core.AcmeChallenge]bool{ + core.ChallengeTypeDNS01: true, + core.ChallengeTypeTLSALPN01: true, + }, + ra.log) + test.AssertNotError(t, err, "creating test PA") + err = pa.LoadIdentPolicyFile("../test/ident-policy.yaml") + test.AssertNotError(t, err, "loading test identifier policy") + ra.PA = pa + + // Now finalizing this order should fail + _, err = ra.FinalizeOrder(context.Background(), &rapb.FinalizeOrderRequest{ + Order: order, + Csr: csr, + }) + test.AssertError(t, err, "finalization should fail") + + // Unfortunately we can't test for the PA's "which is now disabled" error + // message directly, because the RA discards it and collects all invalid names + // into a single more generic error message. But it does at least distinguish + // between missing, expired, and invalid, so we can test for "invalid". + test.AssertContains(t, err.Error(), "authorizations for these identifiers not valid") +} + +func TestFinalizeWithMustStaple(t *testing.T) { + _, sa, ra, _, fc, registration, cleanUp := initAuthorities(t) + defer cleanUp() + + ocspMustStapleExt := pkix.Extension{ + // RFC 7633: id-pe-tlsfeature OBJECT IDENTIFIER ::= { id-pe 24 } + Id: asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24}, + // ASN.1 encoding of: + // SEQUENCE + // INTEGER 5 + // where "5" is the status_request feature (RFC 6066) + Value: []byte{0x30, 0x03, 0x02, 0x01, 0x05}, + } + + domain := randomDomain() + + authzID := createFinalizedAuthorization( + t, sa, registration.Id, identifier.NewDNS(domain), fc.Now().Add(24*time.Hour), core.ChallengeTypeHTTP01, fc.Now().Add(-1*time.Hour)) + + order, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{identifier.NewDNS(domain).ToProto()}, + }) + test.AssertNotError(t, err, "creating test order") + test.AssertEquals(t, order.V2Authorizations[0], authzID) + + testKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "generating test key") + + csr, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{ + PublicKey: testKey.Public(), + DNSNames: []string{domain}, + ExtraExtensions: []pkix.Extension{ocspMustStapleExt}, + }, testKey) + test.AssertNotError(t, err, "creating must-staple CSR") + + serial, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + test.AssertNotError(t, err, "generating random serial number") + template := &x509.Certificate{ + SerialNumber: serial, + Subject: pkix.Name{CommonName: domain}, + DNSNames: []string{domain}, + NotBefore: fc.Now(), + NotAfter: fc.Now().Add(365 * 24 * time.Hour), + BasicConstraintsValid: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + ExtraExtensions: []pkix.Extension{ocspMustStapleExt}, + } + cert, err := x509.CreateCertificate(rand.Reader, template, template, testKey.Public(), testKey) + test.AssertNotError(t, err, "creating certificate") + ra.CA = &mocks.MockCA{ + PEM: pem.EncodeToMemory(&pem.Block{ + Bytes: cert, + Type: "CERTIFICATE", + }), + } + + _, err = ra.FinalizeOrder(context.Background(), &rapb.FinalizeOrderRequest{ + Order: order, + Csr: csr, + }) + test.AssertError(t, err, "finalization should fail") + test.AssertContains(t, err.Error(), "no longer available") +} + +func TestIssueCertificateAuditLog(t *testing.T) { + _, sa, ra, _, _, registration, cleanUp := initAuthorities(t) + defer cleanUp() + + // Make some valid authorizations for some names using different challenge types + names := []string{"not-example.com", "www.not-example.com", "still.not-example.com", "definitely.not-example.com"} + idents := identifier.ACMEIdentifiers{ + identifier.NewDNS("not-example.com"), + identifier.NewDNS("www.not-example.com"), + identifier.NewDNS("still.not-example.com"), + identifier.NewDNS("definitely.not-example.com"), + } + exp := ra.clk.Now().Add(ra.profiles.def().orderLifetime) + challs := []core.AcmeChallenge{core.ChallengeTypeHTTP01, core.ChallengeTypeDNS01, core.ChallengeTypeHTTP01, core.ChallengeTypeDNS01} + var authzIDs []int64 + for i, ident := range idents { + authzIDs = append(authzIDs, createFinalizedAuthorization(t, sa, registration.Id, ident, exp, challs[i], ra.clk.Now())) + } + + // Create a pending order for all of the names + order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: registration.Id, + Expires: timestamppb.New(exp), + Identifiers: idents.ToProtoSlice(), + V2Authorizations: authzIDs, + }, + }) + test.AssertNotError(t, err, "Could not add test order with finalized authz IDs") + + // Generate a CSR covering the order names with a random RSA key + testKey, err := rsa.GenerateKey(rand.Reader, 2048) + test.AssertNotError(t, err, "error generating test key") + csr, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{ + PublicKey: testKey.PublicKey, + SignatureAlgorithm: x509.SHA256WithRSA, + Subject: pkix.Name{CommonName: "not-example.com"}, DNSNames: names, }, testKey) test.AssertNotError(t, err, "Could not create test order CSR") @@ -3059,7 +3142,7 @@ func TestIssueCertificateAuditLog(t *testing.T) { // The event should have no error test.AssertEquals(t, event.Error, "") // The event requester should be the expected reg ID - test.AssertEquals(t, event.Requester, Registration.Id) + test.AssertEquals(t, event.Requester, registration.Id) // The event order ID should be the expected order ID test.AssertEquals(t, event.OrderID, order.Id) // The event serial number should be the expected serial number @@ -3068,64 +3151,150 @@ func TestIssueCertificateAuditLog(t *testing.T) { test.AssertDeepEquals(t, event.VerifiedFields, []string{"subject.commonName", "subjectAltName"}) // The event CommonName should match the expected common name test.AssertEquals(t, event.CommonName, "not-example.com") - // The event names should match the order names - test.AssertDeepEquals(t, core.UniqueLowerNames(event.Names), core.UniqueLowerNames(order.Names)) // The event's NotBefore and NotAfter should match the cert's test.AssertEquals(t, event.NotBefore, parsedCert.NotBefore) test.AssertEquals(t, event.NotAfter, parsedCert.NotAfter) - // There should be one event Authorization entry for each name - test.AssertEquals(t, len(event.Authorizations), len(names)) + // There should be one event identifier/authz entry for each name. + test.AssertEquals(t, len(event.Identifiers), len(names)) - // Check the authz entry for each name + // The event identifiers should match the order identifiers + eventIdents := make([]identifier.ACMEIdentifier, 0) + for _, eventIdent := range event.Identifiers { + eventIdents = append(eventIdents, eventIdent.Ident) + } + test.AssertDeepEquals(t, identifier.Normalize(eventIdents), identifier.Normalize(identifier.FromProtoSlice(order.Identifiers))) + + // Check the identifier/authz entry for each name for i, name := range names { - authzEntry := event.Authorizations[name] - // The authz entry should have the correct authz ID - test.AssertEquals(t, authzEntry.ID, fmt.Sprintf("%d", authzIDs[i])) - // The authz entry should have the correct challenge type - test.AssertEquals(t, string(authzEntry.ChallengeType), chalTypes[i]) + for _, entry := range event.Identifiers { + if entry.Ident.Value == name { + // The authz entry should have the correct authz ID + test.AssertEquals(t, entry.Authz, fmt.Sprintf("%d", authzIDs[i])) + // The authz entry should have the correct challenge type + test.AssertEquals(t, entry.Challenge, challs[i]) + } + } } } -// TestUpdateMissingAuthorization tests the race condition where a challenge is -// updated to valid concurrently with another attempt to have the challenge -// updated. Previously this would return a `berrors.InternalServer` error when -// the row was found missing from `pendingAuthorizations` by the 2nd update -// since the 1st had already deleted it. We accept this may happen and now test -// for a `berrors.NotFound` error return. -// -// See https://github.com/letsencrypt/boulder/issues/3201 -func TestUpdateMissingAuthorization(t *testing.T) { - _, sa, ra, fc, cleanUp := initAuthorities(t) +func TestIssueCertificateCAACheckLog(t *testing.T) { + _, sa, ra, _, fc, registration, cleanUp := initAuthorities(t) defer cleanUp() - ctx := context.Background() + ra.VA = va.RemoteClients{CAAClient: &noopCAA{}} + + exp := fc.Now().Add(24 * time.Hour) + recent := fc.Now().Add(-1 * time.Hour) + older := fc.Now().Add(-8 * time.Hour) + + // Make some valid authzs for four names. Half of them were validated + // recently and half were validated in excess of our CAA recheck time. + names := []string{ + "not-example.com", + "www.not-example.com", + "still.not-example.com", + "definitely.not-example.com", + } + idents := identifier.NewDNSSlice(names) + var authzIDs []int64 + for i, ident := range idents { + attemptedAt := older + if i%2 == 0 { + attemptedAt = recent + } + authzIDs = append(authzIDs, createFinalizedAuthorization(t, sa, registration.Id, ident, exp, core.ChallengeTypeHTTP01, attemptedAt)) + } + + // Create a pending order for all of the names. + order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: registration.Id, + Expires: timestamppb.New(exp), + Identifiers: idents.ToProtoSlice(), + V2Authorizations: authzIDs, + }, + }) + test.AssertNotError(t, err, "Could not add test order with finalized authz IDs") + + // Generate a CSR covering the order names with a random RSA key. + testKey, err := rsa.GenerateKey(rand.Reader, 2048) + test.AssertNotError(t, err, "error generating test key") + csr, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{ + PublicKey: testKey.PublicKey, + SignatureAlgorithm: x509.SHA256WithRSA, + Subject: pkix.Name{CommonName: "not-example.com"}, + DNSNames: names, + }, testKey) + test.AssertNotError(t, err, "Could not create test order CSR") + + // Create a mock certificate for the fake CA to return. + template := &x509.Certificate{ + SerialNumber: big.NewInt(12), + Subject: pkix.Name{ + CommonName: "not-example.com", + }, + DNSNames: names, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(0, 0, 1), + BasicConstraintsValid: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + } + cert, err := x509.CreateCertificate(rand.Reader, template, template, testKey.Public(), testKey) + test.AssertNotError(t, err, "Failed to create mock cert for test CA") + + // Set up the RA's CA with a mock that returns the cert from above. + ra.CA = &mocks.MockCA{ + PEM: pem.EncodeToMemory(&pem.Block{ + Bytes: cert, + }), + } + + // Cast the RA's mock log so we can ensure its cleared and can access the + // matched log lines. + mockLog := ra.log.(*blog.Mock) + mockLog.Clear() + + // Finalize the order with the CSR. + order.Status = string(core.StatusReady) + _, err = ra.FinalizeOrder(context.Background(), &rapb.FinalizeOrderRequest{ + Order: order, + Csr: csr, + }) + test.AssertNotError(t, err, "Error finalizing test order") - authzPB := createPendingAuthorization(t, sa, Identifier, fc.Now().Add(12*time.Hour)) - authz, err := bgrpc.PBToAuthz(authzPB) - test.AssertNotError(t, err, "failed to deserialize authz") + // Get the logged lines from the mock logger. + loglines := mockLog.GetAllMatching("FinalizationCaaCheck JSON=") + // There should be exactly 1 matching log line. + test.AssertEquals(t, len(loglines), 1) - // Twiddle the authz to pretend its been validated by the VA - authz.Status = "valid" - authz.Challenges[0].Status = "valid" - err = ra.recordValidation(ctx, authz.ID, authz.Expires, &authz.Challenges[0]) - test.AssertNotError(t, err, "ra.recordValidation failed") + // Strip away the stuff before 'JSON='. + jsonContent := strings.TrimPrefix(loglines[0], "INFO: FinalizationCaaCheck JSON=") - err = ra.recordValidation(ctx, authz.ID, authz.Expires, &authz.Challenges[0]) - test.AssertError(t, err, "ra.recordValidation didn't fail") - test.AssertErrorIs(t, err, berrors.NotFound) + // Unmarshal the JSON into an event object. + var event finalizationCAACheckEvent + err = json.Unmarshal([]byte(jsonContent), &event) + // The JSON should unmarshal without error. + test.AssertNotError(t, err, "Error unmarshalling logged JSON issuance event.") + // The event requester should be the expected registration ID. + test.AssertEquals(t, event.Requester, registration.Id) + // The event should have the expected number of Authzs where CAA was reused. + test.AssertEquals(t, event.Reused, 2) + // The event should have the expected number of Authzs where CAA was + // rechecked. + test.AssertEquals(t, event.Rechecked, 2) } func TestPerformValidationBadChallengeType(t *testing.T) { - _, _, ra, fc, cleanUp := initAuthorities(t) + _, _, ra, _, fc, _, cleanUp := initAuthorities(t) defer cleanUp() - pa, err := policy.New(map[core.AcmeChallenge]bool{}) + pa, err := policy.New(map[identifier.IdentifierType]bool{}, map[core.AcmeChallenge]bool{}, blog.NewMock()) test.AssertNotError(t, err, "Couldn't create PA") ra.PA = pa exp := fc.Now().Add(10 * time.Hour) authz := core.Authorization{ ID: "1337", - Identifier: identifier.DNSIdentifier("not-example.com"), + Identifier: identifier.NewDNS("not-example.com"), RegistrationID: 1, Status: "valid", Challenges: []core.Challenge{ @@ -3156,289 +3325,165 @@ func (mp *timeoutPub) SubmitToSingleCTWithResult(_ context.Context, _ *pubpb.Req } func TestCTPolicyMeasurements(t *testing.T) { - _, ssa, ra, _, cleanup := initAuthorities(t) + _, _, ra, _, _, _, cleanup := initAuthorities(t) defer cleanup() - ctp := ctpolicy.New(&timeoutPub{}, []ctconfig.CTGroup{{}}, nil, log, metrics.NoopRegisterer) - ra.ctpolicy = ctp - - // Create valid authorizations for not-example.com and www.not-example.com - exp := ra.clk.Now().Add(365 * 24 * time.Hour) - authzIDA := createFinalizedAuthorization(t, ssa, "not-example.com", exp, "valid", ra.clk.Now()) - authzIDB := createFinalizedAuthorization(t, ssa, "www.not-example.com", exp, "valid", ra.clk.Now()) - - order, err := ra.SA.NewOrder(context.Background(), &sapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Expires: exp.UnixNano(), - Names: []string{"not-example.com", "www.not-example.com"}, - V2Authorizations: []int64{authzIDA, authzIDB}, - }) - test.AssertNotError(t, err, "error generating test order") - - testKey, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "error generating test key") - - csr, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{ - PublicKey: testKey.Public(), - SignatureAlgorithm: x509.SHA256WithRSA, - DNSNames: []string{"not-example.com", "www.not-example.com"}, - }, testKey) - test.AssertNotError(t, err, "error generating test CSR") + ra.ctpolicy = ctpolicy.New(&timeoutPub{}, loglist.List{ + {Name: "LogA1", Operator: "OperA", Url: "UrlA1", Key: []byte("KeyA1")}, + {Name: "LogB1", Operator: "OperB", Url: "UrlB1", Key: []byte("KeyB1")}, + }, nil, nil, 0, log, metrics.NoopRegisterer) - _, err = ra.FinalizeOrder(context.Background(), &rapb.FinalizeOrderRequest{ - Order: order, - Csr: csr, + _, cert := test.ThrowAwayCert(t, clock.NewFake()) + _, err := ra.GetSCTs(context.Background(), &rapb.SCTRequest{ + PrecertDER: cert.Raw, }) - test.AssertError(t, err, "FinalizeOrder should have failed when SCTs timed out") - test.AssertContains(t, err.Error(), "getting SCTs") + test.AssertError(t, err, "GetSCTs should have failed when SCTs timed out") + test.AssertContains(t, err.Error(), "failed to get 2 SCTs") test.AssertMetricWithLabelsEquals(t, ra.ctpolicyResults, prometheus.Labels{"result": "failure"}, 1) } func TestWildcardOverlap(t *testing.T) { - err := wildcardOverlap([]string{ - "*.example.com", - "*.example.net", + err := wildcardOverlap(identifier.ACMEIdentifiers{ + identifier.NewDNS("*.example.com"), + identifier.NewDNS("*.example.net"), }) if err != nil { t.Errorf("Got error %q, expected none", err) } - err = wildcardOverlap([]string{ - "*.example.com", - "*.example.net", - "www.example.com", + err = wildcardOverlap(identifier.ACMEIdentifiers{ + identifier.NewDNS("*.example.com"), + identifier.NewDNS("*.example.net"), + identifier.NewDNS("www.example.com"), }) if err == nil { t.Errorf("Got no error, expected one") } test.AssertErrorIs(t, err, berrors.Malformed) - err = wildcardOverlap([]string{ - "*.foo.example.com", - "*.example.net", - "www.example.com", + err = wildcardOverlap(identifier.ACMEIdentifiers{ + identifier.NewDNS("*.foo.example.com"), + identifier.NewDNS("*.example.net"), + identifier.NewDNS("www.example.com"), }) if err != nil { t.Errorf("Got error %q, expected none", err) } } -// mockCAFailPrecert is a mock CA that always returns an error from `IssuePrecertificate` -type mockCAFailPrecert struct { - mocks.MockCA - err error +type MockCARecordingProfile struct { + inner *mocks.MockCA + profileName string } -func (ca *mockCAFailPrecert) IssuePrecertificate( - context.Context, - *capb.IssueCertificateRequest, - ...grpc.CallOption) (*capb.IssuePrecertificateResponse, error) { - return nil, ca.err +func (ca *MockCARecordingProfile) IssueCertificate(ctx context.Context, req *capb.IssueCertificateRequest, _ ...grpc.CallOption) (*capb.IssueCertificateResponse, error) { + ca.profileName = req.CertProfileName + return ca.inner.IssueCertificate(ctx, req) } -// mockCAFailCertForPrecert is a mock CA that always returns an error from -// `IssueCertificateForPrecertificate` -type mockCAFailCertForPrecert struct { - mocks.MockCA - err error +type mockSAWithFinalize struct { + sapb.StorageAuthorityClient } -// IssuePrecertificate needs to be mocked for mockCAFailCertForPrecert's `IssueCertificateForPrecertificate` to get called. -func (ca *mockCAFailCertForPrecert) IssuePrecertificate( - context.Context, - *capb.IssueCertificateRequest, - ...grpc.CallOption) (*capb.IssuePrecertificateResponse, error) { - k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return nil, err - } - tmpl := &ctx509.Certificate{ - SerialNumber: big.NewInt(1), - ExtraExtensions: []ctpkix.Extension{ - { - Id: ctx509.OIDExtensionCTPoison, - Critical: true, - Value: ctasn1.NullBytes, - }, - }, - } - precert, err := ctx509.CreateCertificate(rand.Reader, tmpl, tmpl, k.Public(), k) - if err != nil { - return nil, err - } - return &capb.IssuePrecertificateResponse{ - DER: precert, - }, nil +func (sa *mockSAWithFinalize) FinalizeOrder(ctx context.Context, req *sapb.FinalizeOrderRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { + return &emptypb.Empty{}, nil } -func (ca *mockCAFailCertForPrecert) IssueCertificateForPrecertificate( - context.Context, - *capb.IssueCertificateForPrecertificateRequest, - ...grpc.CallOption) (*corepb.Certificate, error) { - return &corepb.Certificate{}, ca.err +func (sa *mockSAWithFinalize) FQDNSetTimestampsForWindow(ctx context.Context, in *sapb.CountFQDNSetsRequest, opts ...grpc.CallOption) (*sapb.Timestamps, error) { + return &sapb.Timestamps{ + Timestamps: []*timestamppb.Timestamp{ + timestamppb.Now(), + }, + }, nil } -// TestIssueCertificateInnerErrs tests that errors from the CA caught during -// `ra.issueCertificateInner` are propagated correctly, with the part of the -// issuance process that failed prefixed on the error message. -func TestIssueCertificateInnerErrs(t *testing.T) { - _, sa, ra, _, cleanUp := initAuthorities(t) - defer cleanUp() - - ra.orderLifetime = 24 * time.Hour - exp := ra.clk.Now().Add(24 * time.Hour) - - authzForIdent := func(domain string) int64 { - template := core.Authorization{ - Identifier: identifier.ACMEIdentifier{ - Type: "dns", - Value: domain, - }, - RegistrationID: Registration.Id, - Status: "pending", - Expires: &exp, - } - // Create one valid HTTP challenge - httpChal := core.HTTPChallenge01(core.NewToken()) - httpChal.Status = core.StatusValid - // Set the template's challenges - template.Challenges = []core.Challenge{httpChal} - // Create the pending authz - authzPB, err := bgrpc.AuthzToPB(template) - test.AssertNotError(t, err, "bgrpc.AuthzToPB failed") - ids, err := sa.NewAuthorizations2(ctx, &sapb.AddPendingAuthorizationsRequest{ - Authz: []*corepb.Authorization{authzPB}, - }) - test.AssertNotError(t, err, "sa.NewAuthorzations2 failed") - // Finalize the authz - attempted := string(httpChal.Type) - _, err = sa.FinalizeAuthorization2(ctx, &sapb.FinalizeAuthorizationRequest{ - Id: ids.Ids[0], - Status: "valid", - Expires: exp.UnixNano(), - Attempted: attempted, - AttemptedAt: ra.clk.Now().UnixNano(), - }) - test.AssertNotError(t, err, "sa.FinalizeAuthorization2 failed") - return ids.Ids[0] - } - - // Make some valid authorizations for some names - names := []string{"not-example.com", "www.not-example.com", "still.not-example.com", "definitely.not-example.com"} - var authzIDs []int64 - for _, name := range names { - authzIDs = append(authzIDs, authzForIdent(name)) - } - - // Create a pending order for all of the names - order, err := sa.NewOrder(context.Background(), &sapb.NewOrderRequest{ - RegistrationID: Registration.Id, - Expires: exp.UnixNano(), - Names: names, - V2Authorizations: authzIDs, - }) - test.AssertNotError(t, err, "Could not add test order with finalized authz IDs") - - // Generate a CSR covering the order names with a random RSA key - testKey, err := rsa.GenerateKey(rand.Reader, 2048) - test.AssertNotError(t, err, "error generating test key") - csr, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{ - PublicKey: testKey.PublicKey, - SignatureAlgorithm: x509.SHA256WithRSA, - Subject: pkix.Name{CommonName: "not-example.com"}, - DNSNames: names, - }, testKey) - test.AssertNotError(t, err, "Could not create test order CSR") - - csrOb, err := x509.ParseCertificateRequest(csr) - test.AssertNotError(t, err, "Error pasring generated CSR") - - req := core.CertificateRequest{ - Bytes: csr, - CSR: csrOb, - } - logEvent := &certificateRequestEvent{} - - testCases := []struct { - Name string - Mock capb.CertificateAuthorityClient - ExpectedErr error - ExpectedProb *berrors.BoulderError +func TestIssueCertificateOuter(t *testing.T) { + _, _, ra, _, fc, registration, cleanup := initAuthorities(t) + defer cleanup() + ra.SA = &mockSAWithFinalize{} + + // Create a CSR to submit and a certificate for the fake CA to return. + testKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + test.AssertNotError(t, err, "generating test key") + csrDER, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{DNSNames: []string{"example.com"}}, testKey) + test.AssertNotError(t, err, "creating test csr") + csr, err := x509.ParseCertificateRequest(csrDER) + test.AssertNotError(t, err, "parsing test csr") + certDER, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{ + SerialNumber: big.NewInt(1), + DNSNames: []string{"example.com"}, + NotBefore: fc.Now(), + BasicConstraintsValid: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + }, &x509.Certificate{}, testKey.Public(), testKey) + test.AssertNotError(t, err, "creating test cert") + certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) + + for _, tc := range []struct { + name string + profile string + wantProfile string }{ { - Name: "vanilla error during IssuePrecertificate", - Mock: &mockCAFailPrecert{ - err: fmt.Errorf("bad bad not good"), - }, - ExpectedErr: fmt.Errorf("issuing precertificate: bad bad not good"), - }, - { - Name: "malformed problem during IssuePrecertificate", - Mock: &mockCAFailPrecert{ - err: berrors.MalformedError("detected 1x whack attack"), - }, - ExpectedProb: &berrors.BoulderError{ - Detail: "issuing precertificate: detected 1x whack attack", - Type: berrors.Malformed, - }, + name: "select default profile when none specified", + wantProfile: "test", // matches ra.defaultProfileName }, { - Name: "vanilla error during IssueCertificateForPrecertificate", - Mock: &mockCAFailCertForPrecert{ - err: fmt.Errorf("aaaaaaaaaaaaaaaaaaaa!!"), - }, - ExpectedErr: fmt.Errorf("issuing certificate for precertificate: aaaaaaaaaaaaaaaaaaaa!!"), + name: "default profile specified", + profile: "test", + wantProfile: "test", }, { - Name: "malformed problem during IssueCertificateForPrecertificate", - Mock: &mockCAFailCertForPrecert{ - err: berrors.MalformedError("provided DER is DERanged"), - }, - ExpectedProb: &berrors.BoulderError{ - Detail: "issuing certificate for precertificate: provided DER is DERanged", - Type: berrors.Malformed, - }, + name: "other profile specified", + profile: "other", + wantProfile: "other", }, - } + } { + t.Run(tc.name, func(t *testing.T) { + // Use a mock CA that will record the profile name and profile hash included + // in the RA's request messages. Populate it with the cert generated above. + mockCA := MockCARecordingProfile{inner: &mocks.MockCA{PEM: certPEM}} + ra.CA = &mockCA + + order := &corepb.Order{ + RegistrationID: registration.Id, + Expires: timestamppb.New(fc.Now().Add(24 * time.Hour)), + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, + CertificateProfileName: tc.profile, + } - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - // Mock the CA - ra.CA = tc.Mock - // Attempt issuance - _, err = ra.issueCertificateInner(ctx, req, accountID(Registration.Id), orderID(order.Id), issuance.IssuerNameID(0), logEvent) - // We expect all of the testcases to fail because all use mocked CAs that deliberately error - test.AssertError(t, err, "issueCertificateInner with failing mock CA did not fail") - // If there is an expected `error` then match the error message - if tc.ExpectedErr != nil { - test.AssertEquals(t, err.Error(), tc.ExpectedErr.Error()) - } else if tc.ExpectedProb != nil { - // If there is an expected `berrors.BoulderError` then we expect the - // `issueCertificateInner` error to be a `berrors.BoulderError` - var berr *berrors.BoulderError - test.AssertErrorWraps(t, err, &berr) - // Match the expected berror Type and Detail to the observed - test.AssertErrorIs(t, berr, tc.ExpectedProb.Type) - test.AssertEquals(t, berr.Detail, tc.ExpectedProb.Detail) + order, err = ra.issueCertificateOuter(context.Background(), order, csr, nil, certificateRequestEvent{}) + + // The resulting order should have new fields populated + if order.Status != string(core.StatusValid) { + t.Errorf("order.Status = %+v, want %+v", order.Status, core.StatusValid) + } + if order.CertificateSerial != core.SerialToString(big.NewInt(1)) { + t.Errorf("CertificateSerial = %+v, want %+v", order.CertificateSerial, 1) + } + + // The recorded profile and profile hash should match what we expect. + if mockCA.profileName != tc.wantProfile { + t.Errorf("recorded profileName = %+v, want %+v", mockCA.profileName, tc.wantProfile) } }) } } func TestNewOrderMaxNames(t *testing.T) { - _, _, ra, _, cleanUp := initAuthorities(t) + _, _, ra, _, _, _, cleanUp := initAuthorities(t) defer cleanUp() - ra.maxNames = 2 + ra.profiles.def().maxNames = 2 _, err := ra.NewOrder(context.Background(), &rapb.NewOrderRequest{ RegistrationID: 1, - Names: []string{ - "a", - "b", - "c", + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("a").ToProto(), + identifier.NewDNS("b").ToProto(), + identifier.NewDNS("c").ToProto(), }, }) test.AssertError(t, err, "NewOrder didn't fail with too many names in request") - test.AssertEquals(t, err.Error(), "Order cannot contain more than 2 DNS names") + test.AssertEquals(t, err.Error(), "Order cannot contain more than 2 identifiers") test.AssertErrorIs(t, err, berrors.Malformed) } @@ -3496,171 +3541,157 @@ rA== -----END CERTIFICATE----- `) +// mockSARevocation is a fake which includes all of the SA methods called in the +// course of a revocation. Its behavior can be customized by providing sets of +// issued (known) certs, already-revoked certs, and already-blocked keys. It +// also updates the sets of revoked certs and blocked keys when certain methods +// are called, to allow for more complex test logic. type mockSARevocation struct { - mocks.StorageAuthority + sapb.StorageAuthorityClient - known *corepb.CertificateStatus + known map[string]*x509.Certificate + revoked map[string]*corepb.CertificateStatus blocked []*sapb.AddBlockedKeyRequest - revoked map[string]int64 } -func newMockSARevocation(known *x509.Certificate, clk clock.Clock) *mockSARevocation { +func newMockSARevocation(known *x509.Certificate) *mockSARevocation { return &mockSARevocation{ - StorageAuthority: *mocks.NewStorageAuthority(clk), - known: &corepb.CertificateStatus{ - Serial: core.SerialToString(known.SerialNumber), - IssuerID: int64(issuance.GetIssuerNameID(known)), - }, + known: map[string]*x509.Certificate{core.SerialToString(known.SerialNumber): known}, + revoked: make(map[string]*corepb.CertificateStatus), blocked: make([]*sapb.AddBlockedKeyRequest, 0), - revoked: make(map[string]int64), } } +func (msar *mockSARevocation) reset() { + msar.revoked = make(map[string]*corepb.CertificateStatus) + msar.blocked = make([]*sapb.AddBlockedKeyRequest, 0) +} + func (msar *mockSARevocation) AddBlockedKey(_ context.Context, req *sapb.AddBlockedKeyRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { msar.blocked = append(msar.blocked, req) return &emptypb.Empty{}, nil } +func (msar *mockSARevocation) GetSerialMetadata(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*sapb.SerialMetadata, error) { + if cert, present := msar.known[req.Serial]; present { + return &sapb.SerialMetadata{ + Serial: req.Serial, + RegistrationID: 1, + Created: timestamppb.New(cert.NotBefore), + Expires: timestamppb.New(cert.NotAfter), + }, nil + } + return nil, berrors.UnknownSerialError() +} + +func (msar *mockSARevocation) GetLintPrecertificate(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { + if cert, present := msar.known[req.Serial]; present { + return &corepb.Certificate{Der: cert.Raw}, nil + } + return nil, berrors.UnknownSerialError() +} + func (msar *mockSARevocation) GetCertificateStatus(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.CertificateStatus, error) { - if msar.known != nil && req.Serial == msar.known.Serial { - return msar.known, nil + if status, present := msar.revoked[req.Serial]; present { + return status, nil + } + if cert, present := msar.known[req.Serial]; present { + return &corepb.CertificateStatus{ + Serial: core.SerialToString(cert.SerialNumber), + IssuerID: int64(issuance.IssuerNameID(cert)), + }, nil + } + return nil, berrors.UnknownSerialError() +} + +func (msar *mockSARevocation) GetCertificate(_ context.Context, req *sapb.Serial, _ ...grpc.CallOption) (*corepb.Certificate, error) { + var serialBytes [16]byte + _, _ = rand.Read(serialBytes[:]) + serial := big.NewInt(0).SetBytes(serialBytes[:]) + + key, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + if err != nil { + return nil, err + } + + template := &x509.Certificate{ + SerialNumber: serial, + DNSNames: []string{"revokememaybe.example.com"}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(6 * 24 * time.Hour), + IssuingCertificateURL: []string{"http://localhost:4001/acme/issuer-cert/1234"}, + CRLDistributionPoints: []string{"http://example.com/123.crl"}, } - return nil, fmt.Errorf("unknown certificate status") + + testCertDER, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, err + } + + return &corepb.Certificate{ + Der: testCertDER, + }, nil } func (msar *mockSARevocation) RevokeCertificate(_ context.Context, req *sapb.RevokeCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { if _, present := msar.revoked[req.Serial]; present { return nil, berrors.AlreadyRevokedError("already revoked") } - msar.revoked[req.Serial] = req.Reason - msar.known.Status = string(core.OCSPStatusRevoked) + cert, present := msar.known[req.Serial] + if !present { + return nil, berrors.UnknownSerialError() + } + msar.revoked[req.Serial] = &corepb.CertificateStatus{ + Serial: req.Serial, + IssuerID: int64(issuance.IssuerNameID(cert)), + Status: string(core.OCSPStatusRevoked), + RevokedReason: req.Reason, + } return &emptypb.Empty{}, nil } func (msar *mockSARevocation) UpdateRevokedCertificate(_ context.Context, req *sapb.RevokeCertificateRequest, _ ...grpc.CallOption) (*emptypb.Empty, error) { - reason, present := msar.revoked[req.Serial] + status, present := msar.revoked[req.Serial] if !present { return nil, errors.New("not already revoked") } - if present && reason == ocsp.KeyCompromise { + if revocation.Reason(req.Reason) != revocation.KeyCompromise { + return nil, errors.New("cannot re-revoke except for keyCompromise") + } + if present && revocation.Reason(status.RevokedReason) == revocation.KeyCompromise { return nil, berrors.AlreadyRevokedError("already revoked for keyCompromise") } - msar.revoked[req.Serial] = req.Reason - return &emptypb.Empty{}, nil -} - -type mockCAOCSP struct { - mocks.MockCA -} - -func (mcao *mockCAOCSP) GenerateOCSP(context.Context, *capb.GenerateOCSPRequest, ...grpc.CallOption) (*capb.OCSPResponse, error) { - return &capb.OCSPResponse{Response: []byte{1, 2, 3}}, nil -} - -type mockPurger struct{} - -func (mp *mockPurger) Purge(context.Context, *akamaipb.PurgeRequest, ...grpc.CallOption) (*emptypb.Empty, error) { + msar.revoked[req.Serial].RevokedReason = req.Reason return &emptypb.Empty{}, nil } -func TestRevokerCertificateWithReg(t *testing.T) { - _, _, ra, clk, cleanUp := initAuthorities(t) +func TestRevokeCertByApplicant_Subscriber(t *testing.T) { + _, _, ra, _, clk, _, cleanUp := initAuthorities(t) defer cleanUp() - ra.CA = &mockCAOCSP{} - ra.purger = &mockPurger{} - - k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "ecdsa.GenerateKey failed") - digest, err := core.KeyDigest(k.Public()) - test.AssertNotError(t, err, "core.KeyDigest failed") - - template := x509.Certificate{SerialNumber: big.NewInt(257)} - der, err := x509.CreateCertificate(rand.Reader, &template, &template, k.Public(), k) - test.AssertNotError(t, err, "x509.CreateCertificate failed") - cert, err := x509.ParseCertificate(der) - test.AssertNotError(t, err, "x509.ParseCertificate failed") + // Use the same self-signed cert as both issuer and issuee for revocation. + _, cert := test.ThrowAwayCert(t, clk) + cert.IsCA = true ic, err := issuance.NewCertificate(cert) test.AssertNotError(t, err, "failed to create issuer cert") - ra.issuersByNameID = map[issuance.IssuerNameID]*issuance.Certificate{ + ra.issuersByNameID = map[issuance.NameID]*issuance.Certificate{ ic.NameID(): ic, } - ra.issuersByID = map[issuance.IssuerID]*issuance.Certificate{ - ic.ID(): ic, - } - // Revoking for an unspecified reason should work but not block the key. - mockSA := newMockSARevocation(cert, clk) - ra.SA = mockSA - _, err = ra.RevokeCertificateWithReg(context.Background(), &rapb.RevokeCertificateWithRegRequest{ - Cert: cert.Raw, - Code: ocsp.Unspecified, - RegID: 0, - }) - test.AssertNotError(t, err, "RevokeCertificateWithReg failed") - test.AssertEquals(t, len(mockSA.blocked), 0) - test.AssertMetricWithLabelsEquals( - t, ra.revocationReasonCounter, prometheus.Labels{"reason": "unspecified"}, 1) + ra.SA = newMockSARevocation(cert) - // Revoking for key comprommise should work and block the key. - mockSA = newMockSARevocation(cert, clk) - ra.SA = mockSA - _, err = ra.RevokeCertificateWithReg(context.Background(), &rapb.RevokeCertificateWithRegRequest{ + // Revoking without a regID should fail. + _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ Cert: cert.Raw, - Code: ocsp.KeyCompromise, - RegID: 0, - }) - test.AssertNotError(t, err, "RevokeCertificateWithReg failed") - test.AssertEquals(t, len(mockSA.blocked), 1) - test.Assert(t, bytes.Equal(digest[:], mockSA.blocked[0].KeyHash), "key hash mismatch") - test.AssertEquals(t, mockSA.blocked[0].Source, "API") - test.AssertEquals(t, len(mockSA.blocked[0].Comment), 0) - test.AssertMetricWithLabelsEquals( - t, ra.revocationReasonCounter, prometheus.Labels{"reason": "keyCompromise"}, 1) -} - -func TestRevokeCertByApplicant_Subscriber(t *testing.T) { - _, _, ra, clk, cleanUp := initAuthorities(t) - defer cleanUp() - - _ = features.Set(map[string]bool{features.MozRevocationReasons.String(): false}) - defer features.Reset() - - ra.CA = &mockCAOCSP{} - ra.purger = &mockPurger{} - - _, cert := test.ThrowAwayCert(t, 1) - ic, err := issuance.NewCertificate(cert) - test.AssertNotError(t, err, "failed to create issuer cert") - ra.issuersByNameID = map[issuance.IssuerNameID]*issuance.Certificate{ - ic.NameID(): ic, - } - ra.issuersByID = map[issuance.IssuerID]*issuance.Certificate{ - ic.ID(): ic, - } - ra.SA = newMockSARevocation(cert, clk) - - // Revoking without a regID should fail. - _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ - Cert: cert.Raw, - Code: ocsp.Unspecified, + Code: int64(revocation.Unspecified), RegID: 0, }) test.AssertError(t, err, "should have failed with no RegID") test.AssertContains(t, err.Error(), "incomplete") - // Revoking for keyCompromise should fail. - _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ - Cert: cert.Raw, - Code: ocsp.KeyCompromise, - RegID: 1, - }) - test.AssertError(t, err, "should have failed with bad reasonCode") - test.AssertContains(t, err.Error(), "disallowed revocation reason") - // Revoking for a disallowed reason should fail. _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ Cert: cert.Raw, - Code: ocsp.CertificateHold, + Code: int64(revocation.CertificateHold), RegID: 1, }) test.AssertError(t, err, "should have failed with bad reasonCode") @@ -3669,7 +3700,7 @@ func TestRevokeCertByApplicant_Subscriber(t *testing.T) { // Revoking with the correct regID should succeed. _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ Cert: cert.Raw, - Code: ocsp.Unspecified, + Code: int64(revocation.Unspecified), RegID: 1, }) test.AssertNotError(t, err, "should have succeeded") @@ -3677,303 +3708,100 @@ func TestRevokeCertByApplicant_Subscriber(t *testing.T) { // Revoking an already-revoked serial should fail. _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ Cert: cert.Raw, - Code: ocsp.Unspecified, + Code: int64(revocation.Unspecified), RegID: 1, }) test.AssertError(t, err, "should have failed with bad reasonCode") test.AssertContains(t, err.Error(), "already revoked") } -func TestRevokeCertByApplicant_Subscriber_Moz(t *testing.T) { - _, _, ra, clk, cleanUp := initAuthorities(t) - defer cleanUp() - - _ = features.Set(map[string]bool{features.MozRevocationReasons.String(): true}) - defer features.Reset() - - ra.CA = &mockCAOCSP{} - ra.purger = &mockPurger{} - - _, cert := test.ThrowAwayCert(t, 1) - ic, err := issuance.NewCertificate(cert) - test.AssertNotError(t, err, "failed to create issuer cert") - ra.issuersByNameID = map[issuance.IssuerNameID]*issuance.Certificate{ - ic.NameID(): ic, - } - ra.issuersByID = map[issuance.IssuerID]*issuance.Certificate{ - ic.ID(): ic, - } - ra.SA = newMockSARevocation(cert, clk) - - // Revoking without a regID should fail. - _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ - Cert: cert.Raw, - Code: ocsp.Unspecified, - RegID: 0, - }) - test.AssertError(t, err, "should have failed with no RegID") - test.AssertContains(t, err.Error(), "incomplete") - - // Revoking for a disallowed reason should fail. - _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ - Cert: cert.Raw, - Code: ocsp.CertificateHold, - RegID: 1, - }) - test.AssertError(t, err, "should have failed with bad reasonCode") - test.AssertContains(t, err.Error(), "disallowed revocation reason") - - // Revoking with the correct regID should succeed. - _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ - Cert: cert.Raw, - Code: ocsp.Unspecified, - RegID: 1, - }) - test.AssertNotError(t, err, "should have succeeded") - - // Revoking an already-revoked serial should fail. - _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ - Cert: cert.Raw, - Code: ocsp.Unspecified, - RegID: 1, - }) - test.AssertError(t, err, "should have failed with bad reasonCode") - test.AssertContains(t, err.Error(), "already revoked") +// mockSARevocationWithAuthzs embeds a mockSARevocation and so inherits all its +// methods, but also adds GetValidAuthorizations2 so that it can pretend to +// either be authorized or not for all of the names in the to-be-revoked cert. +type mockSARevocationWithAuthzs struct { + *mockSARevocation + authorized bool } -func TestRevokeCertByApplicant_Controller(t *testing.T) { - _, _, ra, clk, cleanUp := initAuthorities(t) - defer cleanUp() - - _ = features.Set(map[string]bool{features.MozRevocationReasons.String(): false}) - defer features.Reset() - - ra.CA = &mockCAOCSP{} - ra.purger = &mockPurger{} +func (msa *mockSARevocationWithAuthzs) GetValidAuthorizations2(ctx context.Context, req *sapb.GetValidAuthorizationsRequest, _ ...grpc.CallOption) (*sapb.Authorizations, error) { + authzs := &sapb.Authorizations{} - _, cert := test.ThrowAwayCert(t, 1) - ic, err := issuance.NewCertificate(cert) - test.AssertNotError(t, err, "failed to create issuer cert") - ra.issuersByNameID = map[issuance.IssuerNameID]*issuance.Certificate{ - ic.NameID(): ic, - } - ra.issuersByID = map[issuance.IssuerID]*issuance.Certificate{ - ic.ID(): ic, + if !msa.authorized { + return authzs, nil } - mockSA := newMockSARevocation(cert, clk) - ra.SA = mockSA - // Revoking with the wrong regID should fail. - _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ - Cert: cert.Raw, - Code: ocsp.Unspecified, - RegID: 2, - }) - test.AssertError(t, err, "should have failed with wrong RegID") - test.AssertContains(t, err.Error(), "requester does not control all names") + for _, ident := range req.Identifiers { + authzs.Authzs = append(authzs.Authzs, &corepb.Authorization{Identifier: ident}) + } - // Revoking with a different RegID that has valid authorizations should succeed. - _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ - Cert: cert.Raw, - Code: ocsp.Unspecified, - RegID: 5, - }) - test.AssertNotError(t, err, "should have succeeded") - test.AssertEquals(t, mockSA.revoked[core.SerialToString(cert.SerialNumber)], int64(ocsp.Unspecified)) + return authzs, nil } -func TestRevokeCertByApplicant_Controller_Moz(t *testing.T) { - _, _, ra, clk, cleanUp := initAuthorities(t) +func TestRevokeCertByApplicant_Controller(t *testing.T) { + _, _, ra, _, clk, _, cleanUp := initAuthorities(t) defer cleanUp() - _ = features.Set(map[string]bool{features.MozRevocationReasons.String(): true}) - defer features.Reset() - - ra.CA = &mockCAOCSP{} - ra.purger = &mockPurger{} - - _, cert := test.ThrowAwayCert(t, 1) + // Use the same self-signed cert as both issuer and issuee for revocation. + _, cert := test.ThrowAwayCert(t, clk) + cert.IsCA = true ic, err := issuance.NewCertificate(cert) test.AssertNotError(t, err, "failed to create issuer cert") - ra.issuersByNameID = map[issuance.IssuerNameID]*issuance.Certificate{ + ra.issuersByNameID = map[issuance.NameID]*issuance.Certificate{ ic.NameID(): ic, } - ra.issuersByID = map[issuance.IssuerID]*issuance.Certificate{ - ic.ID(): ic, - } - mockSA := newMockSARevocation(cert, clk) - ra.SA = mockSA + mockSA := newMockSARevocation(cert) - // Revoking with the wrong regID should fail. + // Revoking when the account doesn't have valid authzs for the name should fail. + // We use RegID 2 here and below because the mockSARevocation believes regID 1 + // is the original issuer. + ra.SA = &mockSARevocationWithAuthzs{mockSA, false} _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ Cert: cert.Raw, - Code: ocsp.Unspecified, + Code: int64(revocation.Unspecified), RegID: 2, }) test.AssertError(t, err, "should have failed with wrong RegID") - test.AssertContains(t, err.Error(), "requester does not control all names") + test.AssertContains(t, err.Error(), "requester does not control all identifiers") - // Revoking with a different RegID that has valid authorizations should succeed, + // Revoking when the account does have valid authzs for the name should succeed, // but override the revocation reason to cessationOfOperation. + ra.SA = &mockSARevocationWithAuthzs{mockSA, true} _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ Cert: cert.Raw, - Code: ocsp.Unspecified, - RegID: 5, + Code: int64(revocation.Unspecified), + RegID: 2, }) test.AssertNotError(t, err, "should have succeeded") - test.AssertEquals(t, mockSA.revoked[core.SerialToString(cert.SerialNumber)], int64(ocsp.CessationOfOperation)) + test.AssertEquals(t, mockSA.revoked[core.SerialToString(cert.SerialNumber)].RevokedReason, int64(revocation.CessationOfOperation)) } func TestRevokeCertByKey(t *testing.T) { - _, _, ra, clk, cleanUp := initAuthorities(t) + _, _, ra, _, clk, _, cleanUp := initAuthorities(t) defer cleanUp() - _ = features.Set(map[string]bool{features.MozRevocationReasons.String(): false}) - defer features.Reset() - - ra.CA = &mockCAOCSP{} - ra.purger = &mockPurger{} - - k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "ecdsa.GenerateKey failed") - digest, err := core.KeyDigest(k.Public()) + // Use the same self-signed cert as both issuer and issuee for revocation. + _, cert := test.ThrowAwayCert(t, clk) + digest, err := core.KeyDigest(cert.PublicKey) test.AssertNotError(t, err, "core.KeyDigest failed") - - template := x509.Certificate{SerialNumber: big.NewInt(257)} - der, err := x509.CreateCertificate(rand.Reader, &template, &template, k.Public(), k) - test.AssertNotError(t, err, "x509.CreateCertificate failed") - cert, err := x509.ParseCertificate(der) - test.AssertNotError(t, err, "x509.ParseCertificate failed") + cert.IsCA = true ic, err := issuance.NewCertificate(cert) test.AssertNotError(t, err, "failed to create issuer cert") - ra.issuersByNameID = map[issuance.IssuerNameID]*issuance.Certificate{ + ra.issuersByNameID = map[issuance.NameID]*issuance.Certificate{ ic.NameID(): ic, } - ra.issuersByID = map[issuance.IssuerID]*issuance.Certificate{ - ic.ID(): ic, - } - mockSA := newMockSARevocation(cert, clk) - ra.SA = mockSA - - // Revoking for a forbidden reason should fail. - _, err = ra.RevokeCertByKey(context.Background(), &rapb.RevokeCertByKeyRequest{ - Cert: cert.Raw, - Code: ocsp.CACompromise, - }) - test.AssertError(t, err, "should have failed") - - // Revoking for any reason should work and preserve the requested reason. - // It should not block the key. - _, err = ra.RevokeCertByKey(context.Background(), &rapb.RevokeCertByKeyRequest{ - Cert: cert.Raw, - Code: ocsp.Unspecified, - }) - test.AssertNotError(t, err, "should have succeeded") - test.AssertEquals(t, len(mockSA.blocked), 0) - test.AssertEquals(t, mockSA.revoked[core.SerialToString(cert.SerialNumber)], int64(ocsp.Unspecified)) - - // Re-revoking for any reason should fail, because it isn't enabled. - _, err = ra.RevokeCertByKey(context.Background(), &rapb.RevokeCertByKeyRequest{ - Cert: cert.Raw, - Code: ocsp.KeyCompromise, - }) - test.AssertError(t, err, "should have failed") - - // Enable re-revocation. - _ = features.Set(map[string]bool{ - features.MozRevocationReasons.String(): false, - features.AllowReRevocation.String(): true, - }) - - // Re-revoking for the same reason should fail. - _, err = ra.RevokeCertByKey(context.Background(), &rapb.RevokeCertByKeyRequest{ - Cert: cert.Raw, - Code: ocsp.Unspecified, - }) - test.AssertError(t, err, "should have failed") - - // Re-revoking for keyCompromise should succeed, update the reason, and block - // the key. - _, err = ra.RevokeCertByKey(context.Background(), &rapb.RevokeCertByKeyRequest{ - Cert: cert.Raw, - Code: ocsp.KeyCompromise, - }) - test.AssertNotError(t, err, "should have succeeded") - test.AssertEquals(t, len(mockSA.blocked), 1) - test.Assert(t, bytes.Equal(digest[:], mockSA.blocked[0].KeyHash), "key hash mismatch") - test.AssertEquals(t, mockSA.blocked[0].Source, "API") - test.AssertEquals(t, len(mockSA.blocked[0].Comment), 0) - test.AssertEquals(t, mockSA.revoked[core.SerialToString(cert.SerialNumber)], int64(ocsp.KeyCompromise)) - - // Re-revoking should fail because it is already revoked for keyCompromise. - _, err = ra.RevokeCertByKey(context.Background(), &rapb.RevokeCertByKeyRequest{ - Cert: cert.Raw, - Code: ocsp.Unspecified, - }) - test.AssertError(t, err, "should have failed") - - // Re-revoking even for keyCompromise should fail for the same reason. - _, err = ra.RevokeCertByKey(context.Background(), &rapb.RevokeCertByKeyRequest{ - Cert: cert.Raw, - Code: ocsp.KeyCompromise, - }) - test.AssertError(t, err, "should have failed") -} - -func TestRevokeCertByKey_Moz(t *testing.T) { - _, _, ra, clk, cleanUp := initAuthorities(t) - defer cleanUp() - - _ = features.Set(map[string]bool{features.MozRevocationReasons.String(): true}) - defer features.Reset() - - ra.CA = &mockCAOCSP{} - ra.purger = &mockPurger{} - - k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "ecdsa.GenerateKey failed") - digest, err := core.KeyDigest(k.Public()) - test.AssertNotError(t, err, "core.KeyDigest failed") - - template := x509.Certificate{SerialNumber: big.NewInt(257)} - der, err := x509.CreateCertificate(rand.Reader, &template, &template, k.Public(), k) - test.AssertNotError(t, err, "x509.CreateCertificate failed") - cert, err := x509.ParseCertificate(der) - test.AssertNotError(t, err, "x509.ParseCertificate failed") - ic, err := issuance.NewCertificate(cert) - test.AssertNotError(t, err, "failed to create issuer cert") - ra.issuersByNameID = map[issuance.IssuerNameID]*issuance.Certificate{ - ic.NameID(): ic, - } - ra.issuersByID = map[issuance.IssuerID]*issuance.Certificate{ - ic.ID(): ic, - } - mockSA := newMockSARevocation(cert, clk) + mockSA := newMockSARevocation(cert) ra.SA = mockSA // Revoking should work, but override the requested reason and block the key. _, err = ra.RevokeCertByKey(context.Background(), &rapb.RevokeCertByKeyRequest{ Cert: cert.Raw, - Code: ocsp.Unspecified, }) test.AssertNotError(t, err, "should have succeeded") test.AssertEquals(t, len(mockSA.blocked), 1) test.Assert(t, bytes.Equal(digest[:], mockSA.blocked[0].KeyHash), "key hash mismatch") test.AssertEquals(t, mockSA.blocked[0].Source, "API") test.AssertEquals(t, len(mockSA.blocked[0].Comment), 0) - test.AssertEquals(t, mockSA.revoked[core.SerialToString(cert.SerialNumber)], int64(ocsp.KeyCompromise)) - - // Re-revoking should fail, because re-revocation is not allowed. - _, err = ra.RevokeCertByKey(context.Background(), &rapb.RevokeCertByKeyRequest{ - Cert: cert.Raw, - }) - test.AssertError(t, err, "should have failed") - - // Enable re-revocation. - _ = features.Set(map[string]bool{ - features.MozRevocationReasons.String(): true, - features.AllowReRevocation.String(): true, - }) + test.AssertEquals(t, mockSA.revoked[core.SerialToString(cert.SerialNumber)].RevokedReason, int64(revocation.KeyCompromise)) // Re-revoking should fail, because it is already revoked for keyCompromise. _, err = ra.RevokeCertByKey(context.Background(), &rapb.RevokeCertByKeyRequest{ @@ -3983,10 +3811,10 @@ func TestRevokeCertByKey_Moz(t *testing.T) { // Reset and have the Subscriber revoke for a different reason. // Then re-revoking using the key should work. - mockSA.revoked = make(map[string]int64) + mockSA.revoked = make(map[string]*corepb.CertificateStatus) _, err = ra.RevokeCertByApplicant(context.Background(), &rapb.RevokeCertByApplicantRequest{ Cert: cert.Raw, - Code: ocsp.Unspecified, + Code: int64(revocation.Unspecified), RegID: 1, }) test.AssertNotError(t, err, "should have succeeded") @@ -3997,96 +3825,101 @@ func TestRevokeCertByKey_Moz(t *testing.T) { } func TestAdministrativelyRevokeCertificate(t *testing.T) { - _, _, ra, clk, cleanUp := initAuthorities(t) + _, _, ra, _, clk, _, cleanUp := initAuthorities(t) defer cleanUp() - ra.CA = &mockCAOCSP{} - ra.purger = &mockPurger{} - - k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - test.AssertNotError(t, err, "ecdsa.GenerateKey failed") - digest, err := core.KeyDigest(k.Public()) + // Use the same self-signed cert as both issuer and issuee for revocation. + serial, cert := test.ThrowAwayCert(t, clk) + digest, err := core.KeyDigest(cert.PublicKey) test.AssertNotError(t, err, "core.KeyDigest failed") - - template := x509.Certificate{SerialNumber: big.NewInt(257)} - der, err := x509.CreateCertificate(rand.Reader, &template, &template, k.Public(), k) - test.AssertNotError(t, err, "x509.CreateCertificate failed") - cert, err := x509.ParseCertificate(der) - test.AssertNotError(t, err, "x509.ParseCertificate failed") + cert.IsCA = true ic, err := issuance.NewCertificate(cert) test.AssertNotError(t, err, "failed to create issuer cert") - ra.issuersByNameID = map[issuance.IssuerNameID]*issuance.Certificate{ + ra.issuersByNameID = map[issuance.NameID]*issuance.Certificate{ ic.NameID(): ic, } - ra.issuersByID = map[issuance.IssuerID]*issuance.Certificate{ - ic.ID(): ic, - } - mockSA := newMockSARevocation(cert, clk) + mockSA := newMockSARevocation(cert) ra.SA = mockSA // Revoking with an empty request should fail immediately. _, err = ra.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{}) test.AssertError(t, err, "AdministrativelyRevokeCertificate should have failed for nil request object") - // Revoking with neither a cert nor a serial should fail immediately. + // Revoking with no serial should fail immediately. + mockSA.reset() _, err = ra.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{ - Code: ocsp.Unspecified, + Code: int64(revocation.Unspecified), AdminName: "root", }) test.AssertError(t, err, "AdministrativelyRevokeCertificate should have failed with no cert or serial") - // Revoking with a nil cert and no serial should fail immediately. - _, err = ra.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{ - Cert: []byte{}, - Code: ocsp.KeyCompromise, - AdminName: "", - }) - test.AssertError(t, err, "AdministrativelyRevokeCertificate should have failed for nil `Cert`") - // Revoking without an admin name should fail immediately. + mockSA.reset() _, err = ra.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{ - Cert: cert.Raw, - Code: ocsp.KeyCompromise, + Serial: serial, + Code: int64(revocation.Unspecified), AdminName: "", }) test.AssertError(t, err, "AdministrativelyRevokeCertificate should have failed with empty string for `AdminName`") // Revoking for a forbidden reason should fail immediately. + mockSA.reset() _, err = ra.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{ - Cert: cert.Raw, - Code: ocsp.CertificateHold, + Serial: serial, + Code: int64(revocation.CertificateHold), AdminName: "root", }) test.AssertError(t, err, "AdministrativelyRevokeCertificate should have failed with forbidden revocation reason") // Revoking a cert for an unspecified reason should work but not block the key. + mockSA.reset() _, err = ra.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{ - Cert: cert.Raw, - Code: ocsp.Unspecified, + Serial: serial, + Code: int64(revocation.Unspecified), AdminName: "root", }) test.AssertNotError(t, err, "AdministrativelyRevokeCertificate failed") test.AssertEquals(t, len(mockSA.blocked), 0) - test.AssertMetricWithLabelsEquals( - t, ra.revocationReasonCounter, prometheus.Labels{"reason": "unspecified"}, 1) // Revoking a serial for an unspecified reason should work but not block the key. - mockSA.revoked = make(map[string]int64) + mockSA.reset() _, err = ra.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{ - Serial: core.SerialToString(cert.SerialNumber), - Code: ocsp.Unspecified, + Serial: serial, + Code: int64(revocation.Unspecified), + AdminName: "root", + }) + test.AssertNotError(t, err, "AdministrativelyRevokeCertificate failed") + test.AssertEquals(t, len(mockSA.blocked), 0) + + // Duplicate administrative revocation of a serial for any reason other than + // keyCompromise should fail. + // Note that we *don't* call reset() here, so it recognizes the duplicate. + _, err = ra.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{ + Serial: serial, + Code: int64(revocation.Unspecified), AdminName: "root", }) + test.AssertError(t, err, "Should be revoked") + test.AssertContains(t, err.Error(), "already revoked") + test.AssertEquals(t, len(mockSA.blocked), 0) + + // Revoking a cert for key compromise with skipBlockKey set should work but + // not block the key. + mockSA.reset() + _, err = ra.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{ + Serial: serial, + Code: int64(revocation.KeyCompromise), + AdminName: "root", + SkipBlockKey: true, + }) test.AssertNotError(t, err, "AdministrativelyRevokeCertificate failed") test.AssertEquals(t, len(mockSA.blocked), 0) - test.AssertMetricWithLabelsEquals( - t, ra.revocationReasonCounter, prometheus.Labels{"reason": "unspecified"}, 2) // Revoking a cert for key compromise should work and block the key. - mockSA.revoked = make(map[string]int64) + mockSA.reset() _, err = ra.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{ - Cert: cert.Raw, - Code: ocsp.KeyCompromise, + Serial: serial, + Code: int64(revocation.KeyCompromise), AdminName: "root", }) test.AssertNotError(t, err, "AdministrativelyRevokeCertificate failed") @@ -4094,15 +3927,315 @@ func TestAdministrativelyRevokeCertificate(t *testing.T) { test.Assert(t, bytes.Equal(digest[:], mockSA.blocked[0].KeyHash), "key hash mismatch") test.AssertEquals(t, mockSA.blocked[0].Source, "admin-revoker") test.AssertEquals(t, mockSA.blocked[0].Comment, "revoked by root") - test.AssertMetricWithLabelsEquals( - t, ra.revocationReasonCounter, prometheus.Labels{"reason": "keyCompromise"}, 1) + test.AssertEquals(t, mockSA.blocked[0].Added.AsTime(), clk.Now()) - // Revoking a serial for key compromise should fail because we don't have the pubkey to block. - mockSA.revoked = make(map[string]int64) + // Revoking a malformed cert for key compromise should fail because we don't + // have the pubkey to block. + mockSA.reset() _, err = ra.AdministrativelyRevokeCertificate(context.Background(), &rapb.AdministrativelyRevokeCertificateRequest{ Serial: core.SerialToString(cert.SerialNumber), - Code: ocsp.KeyCompromise, + Code: int64(revocation.KeyCompromise), AdminName: "root", + Malformed: true, }) test.AssertError(t, err, "AdministrativelyRevokeCertificate should have failed with just serial for keyCompromise") } + +// An authority that returns an error from NewOrderAndAuthzs if the +// "ReplacesSerial" field of the request is empty. +type mockNewOrderMustBeReplacementAuthority struct { + mockSAWithAuthzs +} + +func (sa *mockNewOrderMustBeReplacementAuthority) NewOrderAndAuthzs(ctx context.Context, req *sapb.NewOrderAndAuthzsRequest, _ ...grpc.CallOption) (*corepb.Order, error) { + if req.NewOrder.ReplacesSerial == "" { + return nil, status.Error(codes.InvalidArgument, "NewOrder is not a replacement") + } + return &corepb.Order{ + Id: 1, + RegistrationID: req.NewOrder.RegistrationID, + Expires: req.NewOrder.Expires, + Status: string(core.StatusPending), + Created: timestamppb.New(time.Now()), + Identifiers: req.NewOrder.Identifiers, + }, nil +} + +func TestNewOrderReplacesSerialCarriesThroughToSA(t *testing.T) { + _, _, ra, _, _, registration, cleanUp := initAuthorities(t) + defer cleanUp() + + exampleOrder := &rapb.NewOrderRequest{ + RegistrationID: registration.Id, + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, + ReplacesSerial: "1234", + } + + // Mock SA that returns an error from NewOrderAndAuthzs if the + // "ReplacesSerial" field of the request is empty. + ra.SA = &mockNewOrderMustBeReplacementAuthority{mockSAWithAuthzs{}} + + _, err := ra.NewOrder(ctx, exampleOrder) + test.AssertNotError(t, err, "order with ReplacesSerial should have succeeded") +} + +// newMockSAUnpauseAccount is a fake which includes all of the SA methods called +// in the course of an account unpause. Its behavior can be customized by +// providing the number of unpaused account identifiers to allow testing of +// various scenarios. +type mockSAUnpauseAccount struct { + sapb.StorageAuthorityClient + identsToUnpause int64 + receivedRegID int64 +} + +func (sa *mockSAUnpauseAccount) UnpauseAccount(_ context.Context, req *sapb.RegistrationID, _ ...grpc.CallOption) (*sapb.Count, error) { + sa.receivedRegID = req.Id + return &sapb.Count{Count: sa.identsToUnpause}, nil +} + +// TestUnpauseAccount tests that the RA's UnpauseAccount method correctly passes +// the requested RegID to the SA, and correctly passes the SA's count back to +// the caller. +func TestUnpauseAccount(t *testing.T) { + _, _, ra, _, _, _, cleanUp := initAuthorities(t) + defer cleanUp() + + mockSA := mockSAUnpauseAccount{identsToUnpause: 0} + ra.SA = &mockSA + + res, err := ra.UnpauseAccount(context.Background(), &rapb.UnpauseAccountRequest{ + RegistrationID: 1, + }) + test.AssertNotError(t, err, "Should have been able to unpause account") + test.AssertEquals(t, res.Count, int64(0)) + test.AssertEquals(t, mockSA.receivedRegID, int64(1)) + + mockSA.identsToUnpause = 50001 + res, err = ra.UnpauseAccount(context.Background(), &rapb.UnpauseAccountRequest{ + RegistrationID: 1, + }) + test.AssertNotError(t, err, "Should have been able to unpause account") + test.AssertEquals(t, res.Count, int64(50001)) +} + +func TestGetAuthorization(t *testing.T) { + _, _, ra, _, _, _, cleanup := initAuthorities(t) + defer cleanup() + + ra.SA = &mockSAWithAuthzs{ + authzs: []*core.Authorization{ + { + ID: "1", + Identifier: identifier.NewDNS("example.com"), + Status: "valid", + Challenges: []core.Challenge{ + { + Type: core.ChallengeTypeHTTP01, + Status: core.StatusValid, + }, + }, + }, + }, + } + + // With HTTP01 enabled, GetAuthorization should pass the mock challenge through. + pa, err := policy.New( + map[identifier.IdentifierType]bool{ + identifier.TypeDNS: true, + identifier.TypeIP: true, + }, + map[core.AcmeChallenge]bool{ + core.ChallengeTypeHTTP01: true, + core.ChallengeTypeDNS01: true, + }, + blog.NewMock()) + test.AssertNotError(t, err, "Couldn't create PA") + ra.PA = pa + authz, err := ra.GetAuthorization(context.Background(), &rapb.GetAuthorizationRequest{Id: 1}) + test.AssertNotError(t, err, "should not fail") + test.AssertEquals(t, len(authz.Challenges), 1) + test.AssertEquals(t, authz.Challenges[0].Type, string(core.ChallengeTypeHTTP01)) + + // With HTTP01 disabled, GetAuthorization should filter out the mock challenge. + pa, err = policy.New( + map[identifier.IdentifierType]bool{ + identifier.TypeDNS: true, + identifier.TypeIP: true, + }, + map[core.AcmeChallenge]bool{ + core.ChallengeTypeDNS01: true, + }, + blog.NewMock()) + test.AssertNotError(t, err, "Couldn't create PA") + ra.PA = pa + authz, err = ra.GetAuthorization(context.Background(), &rapb.GetAuthorizationRequest{Id: 1}) + test.AssertNotError(t, err, "should not fail") + test.AssertEquals(t, len(authz.Challenges), 0) +} + +type NoUpdateSA struct { + sapb.StorageAuthorityClient +} + +func (sa *NoUpdateSA) UpdateRegistrationKey(_ context.Context, _ *sapb.UpdateRegistrationKeyRequest, _ ...grpc.CallOption) (*corepb.Registration, error) { + return nil, fmt.Errorf("UpdateRegistrationKey() is mocked to always error") +} + +// mockSARecordingRegistration tests UpdateRegistrationKey. +type mockSARecordingRegistration struct { + sapb.StorageAuthorityClient + providedRegistrationID int64 + providedJwk []byte +} + +// UpdateRegistrationKey records the registration ID and updated key provided. +func (sa *mockSARecordingRegistration) UpdateRegistrationKey(ctx context.Context, req *sapb.UpdateRegistrationKeyRequest, _ ...grpc.CallOption) (*corepb.Registration, error) { + sa.providedRegistrationID = req.RegistrationID + sa.providedJwk = req.Jwk + + return &corepb.Registration{ + Id: req.RegistrationID, + Key: req.Jwk, + }, nil +} + +// TestUpdateRegistrationKey tests that the RA's UpdateRegistrationKey method +// correctly requires a registration ID and key, passes them to the SA, and +// passes the updated Registration back to the caller. +func TestUpdateRegistrationKey(t *testing.T) { + _, _, ra, _, _, _, cleanUp := initAuthorities(t) + defer cleanUp() + + expectRegID := int64(1) + expectJwk := AccountKeyJSONA + mockSA := mockSARecordingRegistration{} + ra.SA = &mockSA + + _, err := ra.UpdateRegistrationKey(context.Background(), &rapb.UpdateRegistrationKeyRequest{}) + test.AssertError(t, err, "should not have been able to update registration key without a registration ID or key") + test.AssertContains(t, err.Error(), "incomplete gRPC request message") + + _, err = ra.UpdateRegistrationKey(context.Background(), &rapb.UpdateRegistrationKeyRequest{RegistrationID: expectRegID}) + test.AssertError(t, err, "should not have been able to update registration key without a key") + test.AssertContains(t, err.Error(), "incomplete gRPC request message") + + _, err = ra.UpdateRegistrationKey(context.Background(), &rapb.UpdateRegistrationKeyRequest{Jwk: expectJwk}) + test.AssertError(t, err, "should not have been able to update registration key without a registration ID") + test.AssertContains(t, err.Error(), "incomplete gRPC request message") + + res, err := ra.UpdateRegistrationKey(context.Background(), &rapb.UpdateRegistrationKeyRequest{ + RegistrationID: expectRegID, + Jwk: expectJwk, + }) + test.AssertNotError(t, err, "should have been able to update registration key") + test.AssertEquals(t, res.Id, expectRegID) + test.AssertEquals(t, mockSA.providedRegistrationID, expectRegID) + test.AssertDeepEquals(t, res.Key, expectJwk) + test.AssertDeepEquals(t, mockSA.providedJwk, expectJwk) + + // Switch to a mock SA that will always error if UpdateRegistrationKey() is + // called. + ra.SA = &NoUpdateSA{} + _, err = ra.UpdateRegistrationKey(context.Background(), &rapb.UpdateRegistrationKeyRequest{ + RegistrationID: expectRegID, + Jwk: expectJwk, + }) + test.AssertError(t, err, "should have received an error from the SA") + test.AssertContains(t, err.Error(), "failed to update registration key") + test.AssertContains(t, err.Error(), "mocked to always error") +} + +func TestCRLShard(t *testing.T) { + var cdp []string + n, err := crlShard(&x509.Certificate{CRLDistributionPoints: cdp}) + if err == nil { + t.Errorf("crlShard(%+v) = %d, %s, want 0, some error", cdp, n, err) + } + + cdp = []string{ + "https://example.com/123.crl", + "https://example.net/123.crl", + } + n, err = crlShard(&x509.Certificate{CRLDistributionPoints: cdp}) + if err == nil { + t.Errorf("crlShard(%+v) = %d, %s, want 0, some error", cdp, n, err) + } + + cdp = []string{ + "https://example.com/abc", + } + n, err = crlShard(&x509.Certificate{CRLDistributionPoints: cdp}) + if err == nil { + t.Errorf("crlShard(%+v) = %d, %s, want 0, some error", cdp, n, err) + } + + cdp = []string{ + "example", + } + n, err = crlShard(&x509.Certificate{CRLDistributionPoints: cdp}) + if err == nil { + t.Errorf("crlShard(%+v) = %d, %s, want 0, some error", cdp, n, err) + } + + cdp = []string{ + "https://example.com/abc/-77.crl", + } + n, err = crlShard(&x509.Certificate{CRLDistributionPoints: cdp}) + if err == nil { + t.Errorf("crlShard(%+v) = %d, %s, want 0, some error", cdp, n, err) + } + + cdp = []string{ + "https://example.com/abc/123", + } + n, err = crlShard(&x509.Certificate{CRLDistributionPoints: cdp}) + if err != nil || n != 123 { + t.Errorf("crlShard(%+v) = %d, %s, want 123, nil", cdp, n, err) + } + + cdp = []string{ + "https://example.com/abc/123.crl", + } + n, err = crlShard(&x509.Certificate{CRLDistributionPoints: cdp}) + if err != nil || n != 123 { + t.Errorf("crlShard(%+v) = %d, %s, want 123, nil", cdp, n, err) + } +} + +type mockSAWithOverrides struct { + sapb.StorageAuthorityClient + inserted *sapb.AddRateLimitOverrideRequest +} + +func (sa *mockSAWithOverrides) AddRateLimitOverride(ctx context.Context, req *sapb.AddRateLimitOverrideRequest, _ ...grpc.CallOption) (*sapb.AddRateLimitOverrideResponse, error) { + sa.inserted = req + return &sapb.AddRateLimitOverrideResponse{}, nil +} + +func TestAddRateLimitOverride(t *testing.T) { + _, _, ra, _, _, _, cleanUp := initAuthorities(t) + defer cleanUp() + + mockSA := mockSAWithOverrides{} + ra.SA = &mockSA + + expectBucketKey := core.RandomString(10) + ov := rapb.AddRateLimitOverrideRequest{ + LimitEnum: 1, + BucketKey: expectBucketKey, + Comment: "insert", + Period: durationpb.New(time.Hour), + Count: 100, + Burst: 100, + } + + _, err := ra.AddRateLimitOverride(ctx, &ov) + test.AssertNotError(t, err, "expected successful insert, got error") + test.AssertEquals(t, mockSA.inserted.Override.LimitEnum, ov.LimitEnum) + test.AssertEquals(t, mockSA.inserted.Override.BucketKey, expectBucketKey) + test.AssertEquals(t, mockSA.inserted.Override.Comment, ov.Comment) + test.AssertEquals(t, mockSA.inserted.Override.Period.AsDuration(), ov.Period.AsDuration()) + test.AssertEquals(t, mockSA.inserted.Override.Count, ov.Count) + test.AssertEquals(t, mockSA.inserted.Override.Burst, ov.Burst) +} diff --git a/ratelimit/rate-limits.go b/ratelimit/rate-limits.go deleted file mode 100644 index c199b11417c..00000000000 --- a/ratelimit/rate-limits.go +++ /dev/null @@ -1,234 +0,0 @@ -package ratelimit - -import ( - "sync" - "time" - - "gopkg.in/yaml.v2" - - "github.com/letsencrypt/boulder/cmd" -) - -// Limits is defined to allow mock implementations be provided during unit -// testing -type Limits interface { - CertificatesPerName() RateLimitPolicy - RegistrationsPerIP() RateLimitPolicy - RegistrationsPerIPRange() RateLimitPolicy - PendingAuthorizationsPerAccount() RateLimitPolicy - InvalidAuthorizationsPerAccount() RateLimitPolicy - CertificatesPerFQDNSet() RateLimitPolicy - CertificatesPerFQDNSetFast() RateLimitPolicy - PendingOrdersPerAccount() RateLimitPolicy - NewOrdersPerAccount() RateLimitPolicy - LoadPolicies(contents []byte) error -} - -// limitsImpl is an unexported implementation of the Limits interface. It acts -// as a container for a rateLimitConfig and a mutex. This allows the inner -// rateLimitConfig pointer to be updated safely when the overall configuration -// changes (e.g. due to a reload of the policy file) -type limitsImpl struct { - sync.RWMutex - rlPolicy *rateLimitConfig -} - -func (r *limitsImpl) CertificatesPerName() RateLimitPolicy { - r.RLock() - defer r.RUnlock() - if r.rlPolicy == nil { - return RateLimitPolicy{} - } - return r.rlPolicy.CertificatesPerName -} - -func (r *limitsImpl) RegistrationsPerIP() RateLimitPolicy { - r.RLock() - defer r.RUnlock() - if r.rlPolicy == nil { - return RateLimitPolicy{} - } - return r.rlPolicy.RegistrationsPerIP -} - -func (r *limitsImpl) RegistrationsPerIPRange() RateLimitPolicy { - r.RLock() - defer r.RUnlock() - if r.rlPolicy == nil { - return RateLimitPolicy{} - } - return r.rlPolicy.RegistrationsPerIPRange -} - -func (r *limitsImpl) PendingAuthorizationsPerAccount() RateLimitPolicy { - r.RLock() - defer r.RUnlock() - if r.rlPolicy == nil { - return RateLimitPolicy{} - } - return r.rlPolicy.PendingAuthorizationsPerAccount -} - -func (r *limitsImpl) InvalidAuthorizationsPerAccount() RateLimitPolicy { - r.RLock() - defer r.RUnlock() - if r.rlPolicy == nil { - return RateLimitPolicy{} - } - return r.rlPolicy.InvalidAuthorizationsPerAccount -} - -func (r *limitsImpl) CertificatesPerFQDNSet() RateLimitPolicy { - r.RLock() - defer r.RUnlock() - if r.rlPolicy == nil { - return RateLimitPolicy{} - } - return r.rlPolicy.CertificatesPerFQDNSet -} - -func (r *limitsImpl) CertificatesPerFQDNSetFast() RateLimitPolicy { - r.RLock() - defer r.RUnlock() - if r.rlPolicy == nil { - return RateLimitPolicy{} - } - return r.rlPolicy.CertificatesPerFQDNSetFast -} - -func (r *limitsImpl) PendingOrdersPerAccount() RateLimitPolicy { - r.RLock() - defer r.RUnlock() - if r.rlPolicy == nil { - return RateLimitPolicy{} - } - return r.rlPolicy.PendingOrdersPerAccount -} - -func (r *limitsImpl) NewOrdersPerAccount() RateLimitPolicy { - r.RLock() - defer r.RUnlock() - if r.rlPolicy == nil { - return RateLimitPolicy{} - } - return r.rlPolicy.NewOrdersPerAccount -} - -// LoadPolicies loads various rate limiting policies from a byte array of -// YAML configuration (typically read from disk by a reloader) -func (r *limitsImpl) LoadPolicies(contents []byte) error { - var newPolicy rateLimitConfig - err := yaml.Unmarshal(contents, &newPolicy) - if err != nil { - return err - } - - r.Lock() - r.rlPolicy = &newPolicy - r.Unlock() - return nil -} - -func New() Limits { - return &limitsImpl{} -} - -// rateLimitConfig contains all application layer rate limiting policies. It is -// unexported and clients are expected to use the exported container struct -type rateLimitConfig struct { - // Number of certificates that can be extant containing any given name. - // These are counted by "base domain" aka eTLD+1, so any entries in the - // overrides section must be an eTLD+1 according to the publicsuffix package. - CertificatesPerName RateLimitPolicy `yaml:"certificatesPerName"` - // Number of registrations that can be created per IP. - // Note: Since this is checked before a registration is created, setting a - // RegistrationOverride on it has no effect. - RegistrationsPerIP RateLimitPolicy `yaml:"registrationsPerIP"` - // Number of registrations that can be created per fuzzy IP range. Unlike - // RegistrationsPerIP this will apply to a /48 for IPv6 addresses to help curb - // abuse from easily obtained IPv6 ranges. - // Note: Like RegistrationsPerIP, setting a RegistrationOverride has no - // effect here. - RegistrationsPerIPRange RateLimitPolicy `yaml:"registrationsPerIPRange"` - // Number of pending authorizations that can exist per account. Overrides by - // key are not applied, but overrides by registration are. - PendingAuthorizationsPerAccount RateLimitPolicy `yaml:"pendingAuthorizationsPerAccount"` - // Number of invalid authorizations that can be failed per account within the - // given window. Overrides by key are not applied, but overrides by registration are. - // Note that this limit is actually "per account, per hostname," but that - // is too long for the variable name. - InvalidAuthorizationsPerAccount RateLimitPolicy `yaml:"invalidAuthorizationsPerAccount"` - // Number of pending orders that can exist per account. Overrides by key are - // not applied, but overrides by registration are. **DEPRECATED** - PendingOrdersPerAccount RateLimitPolicy `yaml:"pendingOrdersPerAccount"` - // Number of new orders that can be created per account within the given - // window. Overrides by key are not applied, but overrides by registration are. - NewOrdersPerAccount RateLimitPolicy `yaml:"newOrdersPerAccount"` - // Number of certificates that can be extant containing a specific set - // of DNS names. - CertificatesPerFQDNSet RateLimitPolicy `yaml:"certificatesPerFQDNSet"` - // Same as above, but intended to both trigger and reset faster (i.e. a - // lower threshold and smaller window), so that clients don't have to wait - // a long time after a small burst of accidental duplicate issuance. - CertificatesPerFQDNSetFast RateLimitPolicy `yaml:"certificatesPerFQDNSetFast"` -} - -// RateLimitPolicy describes a general limiting policy -type RateLimitPolicy struct { - // How long to count items for - Window cmd.ConfigDuration `yaml:"window"` - // The max number of items that can be present before triggering the rate - // limit. Zero means "no limit." - Threshold int64 `yaml:"threshold"` - // A per-key override setting different limits than the default (higher or lower). - // The key is defined on a per-limit basis and should match the key it counts on. - // For instance, a rate limit on the number of certificates per name uses name as - // a key, while a rate limit on the number of registrations per IP subnet would - // use subnet as a key. Note that a zero entry in the overrides map does not - // mean "no limit," it means a limit of zero. - Overrides map[string]int64 `yaml:"overrides"` - // A per-registration override setting. This can be used, e.g. if there are - // hosting providers that we would like to grant a higher rate of issuance - // than the default. If both key-based and registration-based overrides are - // available, whichever is larger takes priority. Note that a zero entry in - // the overrides map does not mean "no limit", it means a limit of zero. - RegistrationOverrides map[int64]int64 `yaml:"registrationOverrides"` -} - -// Enabled returns true iff the RateLimitPolicy is enabled. -func (rlp *RateLimitPolicy) Enabled() bool { - return rlp.Threshold != 0 -} - -// GetThreshold returns the threshold for this rate limit, taking into account -// any overrides for `key` or `regID`. If both `key` and `regID` have an -// override the largest of the two will be used. -func (rlp *RateLimitPolicy) GetThreshold(key string, regID int64) int64 { - regOverride, regOverrideExists := rlp.RegistrationOverrides[regID] - keyOverride, keyOverrideExists := rlp.Overrides[key] - - if regOverrideExists && !keyOverrideExists { - // If there is a regOverride and no keyOverride use the regOverride - return regOverride - } else if !regOverrideExists && keyOverrideExists { - // If there is a keyOverride and no regOverride use the keyOverride - return keyOverride - } else if regOverrideExists && keyOverrideExists { - // If there is both a regOverride and a keyOverride use whichever is larger. - if regOverride > keyOverride { - return regOverride - } else { - return keyOverride - } - } - - // Otherwise there was no regOverride and no keyOverride, use the base - // Threshold - return rlp.Threshold -} - -// WindowBegin returns the time that a RateLimitPolicy's window begins, given a -// particular end time (typically the current time). -func (rlp *RateLimitPolicy) WindowBegin(windowEnd time.Time) time.Time { - return windowEnd.Add(-1 * rlp.Window.Duration) -} diff --git a/ratelimit/rate-limits_test.go b/ratelimit/rate-limits_test.go deleted file mode 100644 index 449aa5e86bb..00000000000 --- a/ratelimit/rate-limits_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package ratelimit - -import ( - "io/ioutil" - "testing" - "time" - - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/test" -) - -func TestEnabled(t *testing.T) { - policy := RateLimitPolicy{ - Threshold: 10, - } - if !policy.Enabled() { - t.Errorf("Policy should have been enabled.") - } -} - -func TestNotEnabled(t *testing.T) { - policy := RateLimitPolicy{ - Threshold: 0, - } - if policy.Enabled() { - t.Errorf("Policy should not have been enabled.") - } -} - -func TestGetThreshold(t *testing.T) { - policy := RateLimitPolicy{ - Threshold: 1, - Overrides: map[string]int64{ - "key": 2, - "baz": 99, - }, - RegistrationOverrides: map[int64]int64{ - 101: 3, - }, - } - - testCases := []struct { - Name string - Key string - RegID int64 - Expected int64 - }{ - - { - Name: "No key or reg overrides", - Key: "foo", - RegID: 11, - Expected: 1, - }, - { - Name: "Key override, no reg override", - Key: "key", - RegID: 11, - Expected: 2, - }, - { - Name: "No key override, reg override", - Key: "foo", - RegID: 101, - Expected: 3, - }, - { - Name: "Key override, larger reg override", - Key: "foo", - RegID: 101, - Expected: 3, - }, - { - Name: "Key override, smaller reg override", - Key: "baz", - RegID: 101, - Expected: 99, - }, - } - - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - test.AssertEquals(t, - policy.GetThreshold(tc.Key, tc.RegID), - tc.Expected) - }) - } -} - -func TestWindowBegin(t *testing.T) { - policy := RateLimitPolicy{ - Window: cmd.ConfigDuration{Duration: 24 * time.Hour}, - } - now := time.Date(2015, 9, 22, 0, 0, 0, 0, time.UTC) - expected := time.Date(2015, 9, 21, 0, 0, 0, 0, time.UTC) - actual := policy.WindowBegin(now) - if actual != expected { - t.Errorf("Incorrect WindowBegin: %s, expected %s", actual, expected) - } -} - -func TestLoadPolicies(t *testing.T) { - policy := New() - - policyContent, readErr := ioutil.ReadFile("../test/rate-limit-policies.yml") - test.AssertNotError(t, readErr, "Failed to load rate-limit-policies.yml") - - // Test that loading a good policy from YAML doesn't error - err := policy.LoadPolicies(policyContent) - test.AssertNotError(t, err, "Failed to parse rate-limit-policies.yml") - - // Test that the CertificatesPerName section parsed correctly - certsPerName := policy.CertificatesPerName() - test.AssertEquals(t, certsPerName.Threshold, int64(2)) - test.AssertDeepEquals(t, certsPerName.Overrides, map[string]int64{ - "ratelimit.me": 1, - "lim.it": 0, - "le.wtf": 10000, - "le1.wtf": 10000, - "le2.wtf": 10000, - "le3.wtf": 10000, - "nginx.wtf": 10000, - "good-caa-reserved.com": 10000, - "bad-caa-reserved.com": 10000, - "ecdsa.le.wtf": 10000, - "must-staple.le.wtf": 10000, - }) - test.AssertDeepEquals(t, certsPerName.RegistrationOverrides, map[int64]int64{ - 101: 1000, - }) - - // Test that the RegistrationsPerIP section parsed correctly - regsPerIP := policy.RegistrationsPerIP() - test.AssertEquals(t, regsPerIP.Threshold, int64(10000)) - test.AssertDeepEquals(t, regsPerIP.Overrides, map[string]int64{ - "127.0.0.1": 1000000, - }) - test.AssertEquals(t, len(regsPerIP.RegistrationOverrides), 0) - - // Test that the PendingAuthorizationsPerAccount section parsed correctly - pendingAuthsPerAcct := policy.PendingAuthorizationsPerAccount() - test.AssertEquals(t, pendingAuthsPerAcct.Threshold, int64(150)) - test.AssertEquals(t, len(pendingAuthsPerAcct.Overrides), 0) - test.AssertEquals(t, len(pendingAuthsPerAcct.RegistrationOverrides), 0) - - // Test that the CertificatesPerFQDN section parsed correctly - certsPerFQDN := policy.CertificatesPerFQDNSet() - test.AssertEquals(t, certsPerFQDN.Threshold, int64(6)) - test.AssertDeepEquals(t, certsPerFQDN.Overrides, map[string]int64{ - "le.wtf": 10000, - "le1.wtf": 10000, - "le2.wtf": 10000, - "le3.wtf": 10000, - "le.wtf,le1.wtf": 10000, - "good-caa-reserved.com": 10000, - "nginx.wtf": 10000, - "ecdsa.le.wtf": 10000, - "must-staple.le.wtf": 10000, - }) - test.AssertEquals(t, len(certsPerFQDN.RegistrationOverrides), 0) - certsPerFQDNFast := policy.CertificatesPerFQDNSetFast() - test.AssertEquals(t, certsPerFQDNFast.Threshold, int64(2)) - test.AssertDeepEquals(t, certsPerFQDNFast.Overrides, map[string]int64{ - "le.wtf": 100, - }) - test.AssertEquals(t, len(certsPerFQDNFast.RegistrationOverrides), 0) - - // Test that loading invalid YAML generates an error - err = policy.LoadPolicies([]byte("err")) - test.AssertError(t, err, "Failed to generate error loading invalid yaml policy file") - // Re-check a field of policy to make sure a LoadPolicies error doesn't - // corrupt the existing policies - test.AssertDeepEquals(t, policy.RegistrationsPerIP().Overrides, map[string]int64{ - "127.0.0.1": 1000000, - }) - - // Test that the RateLimitConfig accessors do not panic when there has been no - // `LoadPolicy` call, and instead return empty RateLimitPolicy objects with default - // values. - emptyPolicy := New() - test.AssertEquals(t, emptyPolicy.CertificatesPerName().Threshold, int64(0)) - test.AssertEquals(t, emptyPolicy.RegistrationsPerIP().Threshold, int64(0)) - test.AssertEquals(t, emptyPolicy.RegistrationsPerIP().Threshold, int64(0)) - test.AssertEquals(t, emptyPolicy.PendingAuthorizationsPerAccount().Threshold, int64(0)) - test.AssertEquals(t, emptyPolicy.CertificatesPerFQDNSet().Threshold, int64(0)) -} diff --git a/ratelimits/README.md b/ratelimits/README.md new file mode 100644 index 00000000000..a16427d0a4e --- /dev/null +++ b/ratelimits/README.md @@ -0,0 +1,213 @@ +# Configuring and Storing Key-Value Rate Limits + +## Rate Limit Structure + +All rate limits use a token-bucket model. The metaphor is that each limit is +represented by a bucket which holds tokens. Each request removes some number of +tokens from the bucket, or is denied if there aren't enough tokens to remove. +Over time, new tokens are added to the bucket at a steady rate, until the bucket +is full. The _burst_ parameter of a rate limit indicates the maximum capacity of +a bucket: how many tokens can it hold before new ones stop being added. +Therefore, this also indicates how many requests can be made in a single burst +before a full bucket is completely emptied. The _count_ and _period_ parameters +indicate the rate at which new tokens are added to a bucket: every period, count +tokens will be added. Therefore, these also indicate the steady-state rate at +which a client which has exhausted its quota can make requests: one token every +(period / count) duration. + +## Default Limit Settings + +Each key directly corresponds to a `Name` enumeration as detailed in `//ratelimits/names.go`. +The `Name` enum is used to identify the particular limit. The parameters of a +default limit are the values that will be used for all buckets that do not have +an explicit override (see below). + +```yaml +NewRegistrationsPerIPAddress: + burst: 20 + count: 20 + period: 1s +NewOrdersPerAccount: + burst: 300 + count: 300 + period: 180m +``` + +## Override Limit Settings + +Each entry in the override list is a map, where the key is a limit name, +corresponding to the `Name` enum of the limit, and the value is a set of +overridden parameters. These parameters are applicable to a specific list of IDs +included in each entry. It's important that the formatting of these IDs matches +the ID format associated with their respective limit's `Name`. For more details on +the relationship of ID format to limit `Name`s, please refer to the documentation +of each `Name` in the `//ratelimits/names.go` file or the [ratelimits package +documentation](https://pkg.go.dev/github.com/letsencrypt/boulder/ratelimits#Name). + +```yaml +- NewRegistrationsPerIPAddress: + burst: 20 + count: 40 + period: 1s + ids: + - 10.0.0.2 + - 10.0.0.5 +- NewOrdersPerAccount: + burst: 300 + count: 600 + period: 180m + ids: + - 12345678 + - 87654321 +``` + +The above example overrides the default limits for specific subscribers. In both +cases the count of requests per period are doubled, but the burst capacity is +explicitly configured to match the default rate limit. + +### Id Formats in Limit Override Settings + +Id formats vary based on the `Name` enumeration. Below are examples for each +format: + +#### ipAddress + +A valid IPv4 or IPv6 address. + +Examples: + - `10.0.0.1` + - `2001:0db8:0000:0000:0000:ff00:0042:8329` + +#### ipv6RangeCIDR + +A valid IPv6 range in CIDR notation with a /48 mask. A /48 range is typically +assigned to a single subscriber. + +Example: `2001:0db8:0000::/48` + +#### regId + +An ACME account registration ID. + +Example: `12345678` + +#### identValue + +A valid ACME identifier value, i.e. an FQDN or IP address. + +Examples: + - `www.example.com` + - `192.168.1.1` + - `2001:db8:eeee::1` + +#### domainOrCIDR + +A valid eTLD+1 domain name, or an IP address. IPv6 addresses must be the lowest +address in their /64, i.e. their last 64 bits must be zero; the override will +apply to the entire /64. Do not include the CIDR mask. + +Examples: + - `example.com` + - `192.168.1.0` + - `2001:db8:eeee:eeee::` + +#### fqdnSet + +A comma-separated list of identifier values. + +Example: `192.168.1.1,example.com,example.org` + +## Bucket Key Definitions + +A bucket key is used to lookup the bucket for a given limit and +subscriber. Bucket keys are formatted similarly to the overrides but with a +slight difference: the limit Names do not carry the string form of each limit. +Instead, they apply the `Name` enum equivalent for every limit. + +So, instead of: + +``` +NewOrdersPerAccount:12345678 +``` + +The corresponding bucket key for regId 12345678 would look like this: + +``` +6:12345678 +``` + +When loaded from a file, the keys for the default/override limits undergo the +same interning process as the aforementioned subscriber bucket keys. This +eliminates the need for redundant conversions when fetching each +default/override limit. + +## How Limits are Applied + +Although rate limit buckets are configured in terms of tokens, we do not +actually keep track of the number of tokens in each bucket. Instead, we track +the Theoretical Arrival Time (TAT) at which the bucket will be full again. If +the TAT is in the past, the bucket is full. If the TAT is in the future, some +number of tokens have been spent and the bucket is slowly refilling. If the TAT +is far enough in the future (specifically, more than `burst * (period / count)`) +in the future), then the bucket is completely empty and requests will be denied. + +Additional terminology: + + - **burst offset** is the duration of time it takes for a bucket to go from + empty to full (`burst * (period / count)`). + - **emission interval** is the interval at which tokens are added to a bucket + (`period / count`). This is also the steady-state rate at which requests can + be made without being denied even once the burst has been exhausted. + - **cost** is the number of tokens removed from a bucket for a single request. + - **cost increment** is the duration of time the TAT is advanced to account + for the cost of the request (`cost * emission interval`). + +For the purposes of this example, subscribers originating from a specific IPv4 +address are allowed 20 requests to the newFoo endpoint per second, with a +maximum burst of 20 requests at any point-in-time, or: + +```yaml +- NewFoosPerIPAddress: + burst: 20 + count: 20 + period: 1s + ids: + - 172.23.45.22 +``` + +A subscriber calls the newFoo endpoint for the first time with an IP address of +172.23.45.22. Here's what happens: + +1. The subscriber's IP address is used to generate a bucket key in the form of + 'NewFoosPerIPAddress:172.23.45.22'. + +2. The request is approved and the 'NewFoosPerIPAddress:172.23.45.22' bucket is + initialized with 19 tokens, as 1 token has been removed to account for the + cost of the current request. To accomplish this, the initial TAT is set to + the current time plus the _cost increment_ (which is 1/20th of a second if we + are limiting to 20 requests per second). + +3. Bucket 'NewFoosPerIPAddress:172.23.45.22': + - will reset to full in 50ms (1/20th of a second), + - will allow another newFoo request immediately, + - will allow between 1 and 19 more requests in the next 50ms, + - will reject the 20th request made in the next 50ms, + - and will allow 1 request every 50ms, indefinitely. + +The subscriber makes another request 5ms later: + +4. The TAT at bucket key 'NewFoosPerIPAddress:172.23.45.22' is compared against + the current time and the _burst offset_. The current time is greater than the + TAT minus the cost increment. Therefore, the request is approved. + +5. The TAT at bucket key 'NewFoosPerIPAddress:172.23.45.22' is advanced by the + cost increment to account for the cost of the request. + +The subscriber makes a total of 18 requests over the next 44ms: + +6. The current time is less than the TAT at bucket key + 'NewFoosPerIPAddress:172.23.45.22' minus the burst offset, thus the request + is rejected. + +This mechanism allows for bursts of traffic but also ensures that the average +rate of requests stays within the prescribed limits over time. diff --git a/ratelimits/gcra.go b/ratelimits/gcra.go new file mode 100644 index 00000000000..7ef489dce10 --- /dev/null +++ b/ratelimits/gcra.go @@ -0,0 +1,107 @@ +package ratelimits + +import ( + "time" + + "github.com/jmhodges/clock" +) + +// maybeSpend uses the GCRA algorithm to decide whether to allow a request. It +// returns a Decision struct with the result of the decision and the updated +// TAT. The cost must be 0 or greater and <= the burst capacity of the limit. +func maybeSpend(clk clock.Clock, txn Transaction, tat time.Time) *Decision { + if txn.cost < 0 || txn.cost > txn.limit.Burst { + // The condition above is the union of the conditions checked in Check + // and Spend methods of Limiter. If this panic is reached, it means that + // the caller has introduced a bug. + panic("invalid cost for maybeSpend") + } + + // If the TAT is in the future, use it as the starting point for the + // calculation. Otherwise, use the current time. This is to prevent the + // bucket from being filled with capacity from the past. + nowUnix := clk.Now().UnixNano() + tatUnix := max(nowUnix, tat.UnixNano()) + + // Compute the cost increment. + costIncrement := txn.limit.emissionInterval * txn.cost + + // Deduct the cost to find the new TAT and residual capacity. + newTAT := tatUnix + costIncrement + difference := nowUnix - (newTAT - txn.limit.burstOffset) + + if difference < 0 { + // Too little capacity to satisfy the cost, deny the request. + residual := (nowUnix - (tatUnix - txn.limit.burstOffset)) / txn.limit.emissionInterval + return &Decision{ + allowed: false, + remaining: residual, + retryIn: -time.Duration(difference), + resetIn: time.Duration(tatUnix - nowUnix), + newTAT: time.Unix(0, tatUnix).UTC(), + transaction: txn, + } + } + + // There is enough capacity to satisfy the cost, allow the request. + var retryIn time.Duration + residual := difference / txn.limit.emissionInterval + if difference < costIncrement { + retryIn = time.Duration(costIncrement - difference) + } + return &Decision{ + allowed: true, + remaining: residual, + retryIn: retryIn, + resetIn: time.Duration(newTAT - nowUnix), + newTAT: time.Unix(0, newTAT).UTC(), + transaction: txn, + } +} + +// maybeRefund uses the Generic Cell Rate Algorithm (GCRA) to attempt to refund +// the cost of a request which was previously spent. The refund cost must be 0 +// or greater. A cost will only be refunded up to the burst capacity of the +// limit. A partial refund is still considered successful. +func maybeRefund(clk clock.Clock, txn Transaction, tat time.Time) *Decision { + if txn.cost < 0 || txn.cost > txn.limit.Burst { + // The condition above is checked in the Refund method of Limiter. If + // this panic is reached, it means that the caller has introduced a bug. + panic("invalid cost for maybeRefund") + } + nowUnix := clk.Now().UnixNano() + tatUnix := tat.UnixNano() + + // The TAT must be in the future to refund capacity. + if nowUnix > tatUnix { + // The TAT is in the past, therefore the bucket is full. + return &Decision{ + allowed: false, + remaining: txn.limit.Burst, + retryIn: time.Duration(0), + resetIn: time.Duration(0), + newTAT: tat, + transaction: txn, + } + } + + // Compute the refund increment. + refundIncrement := txn.limit.emissionInterval * txn.cost + + // Subtract the refund increment from the TAT to find the new TAT. + // Ensure the new TAT is not earlier than now. + newTAT := max(tatUnix-refundIncrement, nowUnix) + + // Calculate the new capacity. + difference := nowUnix - (newTAT - txn.limit.burstOffset) + residual := difference / txn.limit.emissionInterval + + return &Decision{ + allowed: newTAT != tatUnix, + remaining: residual, + retryIn: time.Duration(0), + resetIn: time.Duration(newTAT - nowUnix), + newTAT: time.Unix(0, newTAT).UTC(), + transaction: txn, + } +} diff --git a/ratelimits/gcra_test.go b/ratelimits/gcra_test.go new file mode 100644 index 00000000000..8c48eb23a92 --- /dev/null +++ b/ratelimits/gcra_test.go @@ -0,0 +1,236 @@ +package ratelimits + +import ( + "testing" + "time" + + "github.com/jmhodges/clock" + + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/test" +) + +func TestDecide(t *testing.T) { + clk := clock.NewFake() + limit := &Limit{Burst: 10, Count: 1, Period: config.Duration{Duration: time.Second}} + limit.precompute() + + // Begin by using 1 of our 10 requests. + d := maybeSpend(clk, Transaction{"test", limit, 1, true, true, false}, clk.Now()) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(9)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Second) + // Transaction is set when we're allowed. + test.AssertEquals(t, d.transaction, Transaction{"test", limit, 1, true, true, false}) + + // Immediately use another 9 of our remaining requests. + d = maybeSpend(clk, Transaction{"test", limit, 9, true, true, false}, d.newTAT) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + // We should have to wait 1 second before we can use another request but we + // used 9 so we should have to wait 9 seconds to make an identical request. + test.AssertEquals(t, d.retryIn, time.Second*9) + test.AssertEquals(t, d.resetIn, time.Second*10) + + // Our new TAT should be 10 seconds (limit.Burst) in the future. + test.AssertEquals(t, d.newTAT, clk.Now().Add(time.Second*10)) + + // Let's try using just 1 more request without waiting. + d = maybeSpend(clk, Transaction{"test", limit, 1, true, true, false}, d.newTAT) + test.Assert(t, !d.allowed, "should not be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.retryIn, time.Second) + test.AssertEquals(t, d.resetIn, time.Second*10) + // Transaction is set when we're denied. + test.AssertEquals(t, d.transaction, Transaction{"test", limit, 1, true, true, false}) + + // Let's try being exactly as patient as we're told to be. + clk.Add(d.retryIn) + d = maybeSpend(clk, Transaction{"test", limit, 0, true, true, false}, d.newTAT) + test.AssertEquals(t, d.remaining, int64(1)) + + // We are 1 second in the future, we should have 1 new request. + d = maybeSpend(clk, Transaction{"test", limit, 1, true, true, false}, d.newTAT) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.retryIn, time.Second) + test.AssertEquals(t, d.resetIn, time.Second*10) + + // Let's try waiting (10 seconds) for our whole bucket to refill. + clk.Add(d.resetIn) + + // We should have 10 new requests. If we use 1 we should have 9 remaining. + d = maybeSpend(clk, Transaction{"test", limit, 1, true, true, false}, d.newTAT) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(9)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Second) + + // Wait just shy of how long we're told to wait for refilling. + clk.Add(d.resetIn - time.Millisecond) + + // We should still have 9 remaining because we're still 1ms shy of the + // refill time. + d = maybeSpend(clk, Transaction{"test", limit, 0, true, true, false}, d.newTAT) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(9)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Millisecond) + + // Spending 0 simply informed us that we still have 9 remaining, let's see + // what we have after waiting 20 hours. + clk.Add(20 * time.Hour) + + // C'mon, big money, no whammies, no whammies, STOP! + d = maybeSpend(clk, Transaction{"test", limit, 0, true, true, false}, d.newTAT) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(10)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Duration(0)) + + // Turns out that the most we can accrue is 10 (limit.Burst). Let's empty + // this bucket out so we can try something else. + d = maybeSpend(clk, Transaction{"test", limit, 10, true, true, false}, d.newTAT) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + // We should have to wait 1 second before we can use another request but we + // used 10 so we should have to wait 10 seconds to make an identical + // request. + test.AssertEquals(t, d.retryIn, time.Second*10) + test.AssertEquals(t, d.resetIn, time.Second*10) + + // If you spend 0 while you have 0 you should get 0. + d = maybeSpend(clk, Transaction{"test", limit, 0, true, true, false}, d.newTAT) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Second*10) + + // We don't play by the rules, we spend 1 when we have 0. + d = maybeSpend(clk, Transaction{"test", limit, 1, true, true, false}, d.newTAT) + test.Assert(t, !d.allowed, "should not be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.retryIn, time.Second) + test.AssertEquals(t, d.resetIn, time.Second*10) + + // Okay, maybe we should play by the rules if we want to get anywhere. + clk.Add(d.retryIn) + + // Our patience pays off, we should have 1 new request. Let's use it. + d = maybeSpend(clk, Transaction{"test", limit, 1, true, true, false}, d.newTAT) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.retryIn, time.Second) + test.AssertEquals(t, d.resetIn, time.Second*10) + + // Refill from empty to 5. + clk.Add(d.resetIn / 2) + + // Attempt to spend 7 when we only have 5. We should be denied but the + // decision should reflect a retry of 2 seconds, the time it would take to + // refill from 5 to 7. + d = maybeSpend(clk, Transaction{"test", limit, 7, true, true, false}, d.newTAT) + test.Assert(t, !d.allowed, "should not be allowed") + test.AssertEquals(t, d.remaining, int64(5)) + test.AssertEquals(t, d.retryIn, time.Second*2) + test.AssertEquals(t, d.resetIn, time.Second*5) +} + +func TestMaybeRefund(t *testing.T) { + clk := clock.NewFake() + limit := &Limit{Burst: 10, Count: 1, Period: config.Duration{Duration: time.Second}} + limit.precompute() + + // Begin by using 1 of our 10 requests. + d := maybeSpend(clk, Transaction{"test", limit, 1, true, true, false}, clk.Now()) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(9)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Second) + // Transaction is set when we're refunding. + test.AssertEquals(t, d.transaction, Transaction{"test", limit, 1, true, true, false}) + + // Refund back to 10. + d = maybeRefund(clk, Transaction{"test", limit, 1, true, true, false}, d.newTAT) + test.AssertEquals(t, d.remaining, int64(10)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Duration(0)) + + // Refund 0, we should still have 10. + d = maybeRefund(clk, Transaction{"test", limit, 0, true, true, false}, d.newTAT) + test.AssertEquals(t, d.remaining, int64(10)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Duration(0)) + + // Spend 1 more of our 10 requests. + d = maybeSpend(clk, Transaction{"test", limit, 1, true, true, false}, d.newTAT) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(9)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Second) + + // Wait for our bucket to refill. + clk.Add(d.resetIn) + + // Attempt to refund from 10 to 11. + d = maybeRefund(clk, Transaction{"test", limit, 1, true, true, false}, d.newTAT) + test.Assert(t, !d.allowed, "should not be allowed") + test.AssertEquals(t, d.remaining, int64(10)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Duration(0)) + // Transaction is set when our bucket is full. + test.AssertEquals(t, d.transaction, Transaction{"test", limit, 1, true, true, false}) + + // Spend 10 all 10 of our requests. + d = maybeSpend(clk, Transaction{"test", limit, 10, true, true, false}, d.newTAT) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + // We should have to wait 1 second before we can use another request but we + // used 10 so we should have to wait 10 seconds to make an identical + // request. + test.AssertEquals(t, d.retryIn, time.Second*10) + test.AssertEquals(t, d.resetIn, time.Second*10) + + // Attempt a refund of 10. + d = maybeRefund(clk, Transaction{"test", limit, 10, true, true, false}, d.newTAT) + test.AssertEquals(t, d.remaining, int64(10)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Duration(0)) + + // Wait 11 seconds to catching up to TAT. + clk.Add(11 * time.Second) + + // Attempt to refund to 11, then ensure it's still 10. + d = maybeRefund(clk, Transaction{"test", limit, 1, true, true, false}, d.newTAT) + test.Assert(t, !d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(10)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Duration(0)) + // Transaction is set when our TAT is in the past. + test.AssertEquals(t, d.transaction, Transaction{"test", limit, 1, true, true, false}) + + // Spend 5 of our 10 requests, then refund 1. + d = maybeSpend(clk, Transaction{"test", limit, 5, true, true, false}, d.newTAT) + d = maybeRefund(clk, Transaction{"test", limit, 1, true, true, false}, d.newTAT) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(6)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + + // Wait, a 2.5 seconds to refill to 8.5 requests. + clk.Add(time.Millisecond * 2500) + + // Ensure we have 8.5 requests. + d = maybeSpend(clk, Transaction{"test", limit, 0, true, true, false}, d.newTAT) + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(8)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + // Check that ResetIn represents the fractional earned request. + test.AssertEquals(t, d.resetIn, time.Millisecond*1500) + + // Refund 2 requests, we should only have 10, not 10.5. + d = maybeRefund(clk, Transaction{"test", limit, 2, true, true, false}, d.newTAT) + test.AssertEquals(t, d.remaining, int64(10)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Duration(0)) +} diff --git a/ratelimits/limit.go b/ratelimits/limit.go new file mode 100644 index 00000000000..7f093a5387b --- /dev/null +++ b/ratelimits/limit.go @@ -0,0 +1,532 @@ +package ratelimits + +import ( + "context" + "encoding/csv" + "errors" + "fmt" + "net/netip" + "os" + "sort" + "strconv" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/core" + "github.com/letsencrypt/boulder/identifier" + blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/strictyaml" +) + +// errLimitDisabled indicates that the limit name specified is valid but is not +// currently configured. +var errLimitDisabled = errors.New("limit disabled") + +// LimitConfig defines the exportable configuration for a rate limit or a rate +// limit override, without a `limit`'s internal fields. +// +// The zero value of this struct is invalid, because some of the fields must be +// greater than zero. +type LimitConfig struct { + // Burst specifies maximum concurrent allowed requests at any given time. It + // must be greater than zero. + Burst int64 + + // Count is the number of requests allowed per period. It must be greater + // than zero. + Count int64 + + // Period is the duration of time in which the count (of requests) is + // allowed. It must be greater than zero. + Period config.Duration +} + +type LimitConfigs map[string]*LimitConfig + +// Limit defines the configuration for a rate limit or a rate limit override. +// +// The zero value of this struct is invalid, because some of the fields must be +// greater than zero. It and several of its fields are exported to support admin +// tooling used during the migration from overrides.yaml to the overrides +// database table. +type Limit struct { + // Burst specifies maximum concurrent allowed requests at any given time. It + // must be greater than zero. + Burst int64 + + // Count is the number of requests allowed per period. It must be greater + // than zero. + Count int64 + + // Period is the duration of time in which the count (of requests) is + // allowed. It must be greater than zero. + Period config.Duration + + // Name is the name of the limit. It must be one of the Name enums defined + // in this package. + Name Name + + // Comment is an optional field that can be used to provide additional + // context for an override. It is not used for default limits. + Comment string + + // emissionInterval is the interval, in nanoseconds, at which tokens are + // added to a bucket (period / count). This is also the steady-state rate at + // which requests can be made without being denied even once the burst has + // been exhausted. This is precomputed to avoid doing the same calculation + // on every request. + emissionInterval int64 + + // burstOffset is the duration of time, in nanoseconds, it takes for a + // bucket to go from empty to full (burst * (period / count)). This is + // precomputed to avoid doing the same calculation on every request. + burstOffset int64 + + // isOverride is true if the limit is an override. + isOverride bool +} + +// precompute calculates the emissionInterval and burstOffset for the limit. +func (l *Limit) precompute() { + l.emissionInterval = l.Period.Nanoseconds() / l.Count + l.burstOffset = l.emissionInterval * l.Burst +} + +func ValidateLimit(l *Limit) error { + if l.Burst <= 0 { + return fmt.Errorf("invalid burst '%d', must be > 0", l.Burst) + } + if l.Count <= 0 { + return fmt.Errorf("invalid count '%d', must be > 0", l.Count) + } + if l.Period.Duration <= 0 { + return fmt.Errorf("invalid period '%s', must be > 0", l.Period) + } + return nil +} + +type Limits map[string]*Limit + +// loadDefaultsFromFile unmarshals the defaults YAML file at path into a map of +// limits. +func loadDefaultsFromFile(path string) (LimitConfigs, error) { + lm := make(LimitConfigs) + data, err := os.ReadFile(path) + if err != nil { + return nil, err + } + err = strictyaml.Unmarshal(data, &lm) + if err != nil { + return nil, err + } + return lm, nil +} + +type overrideYAML struct { + LimitConfig `yaml:",inline"` + // Ids is a list of ids that this override applies to. + Ids []struct { + Id string `yaml:"id"` + // Comment is an optional field that can be used to provide additional + // context for the override. + Comment string `yaml:"comment,omitempty"` + } `yaml:"ids"` +} + +type overridesYAML []map[string]overrideYAML + +// loadOverridesFromFile unmarshals the YAML file at path into a map of +// overrides. +func loadOverridesFromFile(path string) (overridesYAML, error) { + ov := overridesYAML{} + data, err := os.ReadFile(path) + if err != nil { + return nil, err + } + err = strictyaml.Unmarshal(data, &ov) + if err != nil { + return nil, err + } + return ov, nil +} + +// parseOverrideNameId is broken out for ease of testing. +func parseOverrideNameId(key string) (Name, string, error) { + if !strings.Contains(key, ":") { + // Avoids a potential panic in strings.SplitN below. + return Unknown, "", fmt.Errorf("invalid override %q, must be formatted 'name:id'", key) + } + nameAndId := strings.SplitN(key, ":", 2) + nameStr := nameAndId[0] + if nameStr == "" { + return Unknown, "", fmt.Errorf("empty name in override %q, must be formatted 'name:id'", key) + } + + name, ok := StringToName[nameStr] + if !ok { + return Unknown, "", fmt.Errorf("unrecognized name %q in override limit %q, must be one of %v", nameStr, key, LimitNames) + } + id := nameAndId[1] + if id == "" { + return Unknown, "", fmt.Errorf("empty id in override %q, must be formatted 'name:id'", key) + } + return name, id, nil +} + +// parseOverrideNameEnumId is like parseOverrideNameId, but it expects the +// key to be formatted as 'name:id', where 'name' is a Name enum string and 'id' +// is a string identifier. It returns an error if either part is missing or invalid. +func parseOverrideNameEnumId(key string) (Name, string, error) { + if !strings.Contains(key, ":") { + // Avoids a potential panic in strings.SplitN below. + return Unknown, "", fmt.Errorf("invalid override %q, must be formatted 'name:id'", key) + } + nameStrAndId := strings.SplitN(key, ":", 2) + if len(nameStrAndId) != 2 { + return Unknown, "", fmt.Errorf("invalid override %q, must be formatted 'name:id'", key) + } + + nameInt, err := strconv.Atoi(nameStrAndId[0]) + if err != nil { + return Unknown, "", fmt.Errorf("invalid name %q in override limit %q, must be an integer", nameStrAndId[0], key) + } + name := Name(nameInt) + if !name.isValid() { + return Unknown, "", fmt.Errorf("invalid name %q in override limit %q, must be one of %v", nameStrAndId[0], key, LimitNames) + + } + id := nameStrAndId[1] + if id == "" { + return Unknown, "", fmt.Errorf("empty id in override %q, must be formatted 'name:id'", key) + } + return name, id, nil +} + +// parseOverrideLimits validates a YAML list of override limits. It must be +// formatted as a list of maps, where each map has a single key representing the +// limit name and a value that is a map containing the limit fields and an +// additional 'ids' field that is a list of ids that this override applies to. +func parseOverrideLimits(newOverridesYAML overridesYAML) (Limits, error) { + parsed := make(Limits) + + for _, ov := range newOverridesYAML { + for k, v := range ov { + name, ok := StringToName[k] + if !ok { + return nil, fmt.Errorf("unrecognized name %q in override limit, must be one of %v", k, LimitNames) + } + + for _, entry := range v.Ids { + id, err := hydrateOverrideLimit(entry.Id, name) + if err != nil { + return nil, fmt.Errorf( + "validating name %s and id %q for override limit %q: %w", name, id, k, err) + } + + lim := &Limit{ + Burst: v.Burst, + Count: v.Count, + Period: v.Period, + Name: name, + Comment: entry.Comment, + isOverride: true, + } + + err = ValidateLimit(lim) + if err != nil { + return nil, fmt.Errorf( + "validating name %s and id %q for override limit %q: %w", name, id, k, err) + } + + parsed[joinWithColon(name.EnumString(), id)] = lim + } + } + } + return parsed, nil +} + +// hydrateOverrideLimit validates the limit Name and override bucket key. It +// returns the correct bucket key to use in-memory. +func hydrateOverrideLimit(bucketKey string, limitName Name) (string, error) { + if !limitName.isValid() { + return "", fmt.Errorf("unrecognized limit name %d", limitName) + } + + err := validateIdForName(limitName, bucketKey) + if err != nil { + return "", err + } + + // Interpret and compute a new in-memory bucket key for two rate limits, + // since their keys aren't nice to store in a config file or database entry. + switch limitName { + case CertificatesPerDomain: + // Convert IP addresses to their covering /32 (IPv4) or /64 + // (IPv6) prefixes in CIDR notation. + ip, err := netip.ParseAddr(bucketKey) + if err == nil { + prefix, err := coveringIPPrefix(limitName, ip) + if err != nil { + return "", fmt.Errorf("computing prefix for IP address %q: %w", bucketKey, err) + } + bucketKey = prefix.String() + } + case CertificatesPerFQDNSet: + // Compute the hash of a comma-separated list of identifier values. + bucketKey = fmt.Sprintf("%x", core.HashIdentifiers(identifier.FromStringSlice(strings.Split(bucketKey, ",")))) + } + + return bucketKey, nil +} + +// parseDefaultLimits validates a map of default limits and rekeys it by 'Name'. +func parseDefaultLimits(newDefaultLimits LimitConfigs) (Limits, error) { + parsed := make(Limits) + + for k, v := range newDefaultLimits { + name, ok := StringToName[k] + if !ok { + return nil, fmt.Errorf("unrecognized name %q in default limit, must be one of %v", k, LimitNames) + } + + lim := &Limit{ + Burst: v.Burst, + Count: v.Count, + Period: v.Period, + Name: name, + } + + err := ValidateLimit(lim) + if err != nil { + return nil, fmt.Errorf("parsing default limit %q: %w", k, err) + } + + lim.precompute() + parsed[name.EnumString()] = lim + } + return parsed, nil +} + +type OverridesRefresher func(context.Context, prometheus.Gauge, blog.Logger) (Limits, error) + +type limitRegistry struct { + // defaults stores default limits by 'name'. + defaults Limits + + // overrides stores override limits by 'name:id'. + overrides Limits + overridesLoaded bool + + // refreshOverrides is a function to refresh override limits. + refreshOverrides OverridesRefresher + + overridesTimestamp prometheus.Gauge + overridesErrors prometheus.Gauge + overridesPerLimit prometheus.GaugeVec + + logger blog.Logger +} + +// getLimit returns the limit for the specified by name and bucketKey, name is +// required, bucketKey is optional. If bucketkey is empty, the default for the +// limit specified by name is returned. If no default limit exists for the +// specified name, errLimitDisabled is returned. +func (l *limitRegistry) getLimit(name Name, bucketKey string) (*Limit, error) { + if !name.isValid() { + // This should never happen. Callers should only be specifying the limit + // Name enums defined in this package. + return nil, fmt.Errorf("specified name enum %q, is invalid", name) + } + if bucketKey != "" { + // Check for override. + ol, ok := l.overrides[bucketKey] + if ok { + return ol, nil + } + } + dl, ok := l.defaults[name.EnumString()] + if ok { + return dl, nil + } + return nil, errLimitDisabled +} + +// loadOverrides replaces this registry's overrides with a new dataset. +func (l *limitRegistry) loadOverrides(ctx context.Context) error { + newOverrides, err := l.refreshOverrides(ctx, l.overridesErrors, l.logger) + if err != nil { + return err + } + l.overridesLoaded = true + + if len(newOverrides) < 1 { + l.logger.Warning("loading overrides: no valid overrides") + // If it's an empty set, don't replace any current overrides. + return nil + } + + newOverridesPerLimit := make(map[Name]float64) + for _, override := range newOverrides { + override.precompute() + newOverridesPerLimit[override.Name]++ + } + + l.overrides = newOverrides + l.overridesTimestamp.SetToCurrentTime() + for rlName, rlString := range nameToString { + l.overridesPerLimit.WithLabelValues(rlString).Set(newOverridesPerLimit[rlName]) + } + + return nil +} + +// loadOverridesWithRetry tries to loadOverrides, retrying at least every 30 +// seconds upon failure. +func (l *limitRegistry) loadOverridesWithRetry(ctx context.Context) error { + retries := 0 + for { + err := l.loadOverrides(ctx) + if err == nil { + return nil + } + l.logger.Errf("loading overrides: %v", err) + retries++ + select { + case <-time.After(core.RetryBackoff(retries, time.Second/6, time.Second*15, 2)): + case <-ctx.Done(): + return err + } + } +} + +// NewRefresher loads, and periodically refreshes, overrides using this +// registry's refreshOverrides function. +func (l *limitRegistry) NewRefresher(interval time.Duration) context.CancelFunc { + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + err := l.loadOverridesWithRetry(ctx) + if err != nil { + l.logger.Errf("loading overrides (initial): %v", err) + } + + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + err := l.loadOverridesWithRetry(ctx) + if err != nil { + l.logger.Errf("loading overrides (refresh): %v", err) + } + case <-ctx.Done(): + return + } + } + }() + + return cancel +} + +// LoadOverridesByBucketKey loads the overrides YAML at the supplied path, +// parses it with the existing helpers, and returns the resulting limits map +// keyed by ":". This function is exported to support admin tooling +// used during the migration from overrides.yaml to the overrides database +// table. +func LoadOverridesByBucketKey(path string) (Limits, error) { + ovs, err := loadOverridesFromFile(path) + if err != nil { + return nil, err + } + return parseOverrideLimits(ovs) +} + +// DumpOverrides writes the provided overrides to CSV at the supplied path. Each +// override is written as a single row, one per ID. Rows are sorted in the +// following order: +// - Name (ascending) +// - Count (descending) +// - Burst (descending) +// - Period (ascending) +// - Comment (ascending) +// - ID (ascending) +// +// This function supports admin tooling that routinely exports the overrides +// table for investigation or auditing. +func DumpOverrides(path string, overrides Limits) error { + type row struct { + name string + id string + count int64 + burst int64 + period string + comment string + } + + var rows []row + for bucketKey, limit := range overrides { + name, id, err := parseOverrideNameEnumId(bucketKey) + if err != nil { + return err + } + + rows = append(rows, row{ + name: name.String(), + id: id, + count: limit.Count, + burst: limit.Burst, + period: limit.Period.Duration.String(), + comment: limit.Comment, + }) + } + + sort.Slice(rows, func(i, j int) bool { + // Sort by limit name in ascending order. + if rows[i].name != rows[j].name { + return rows[i].name < rows[j].name + } + // Sort by count in descending order (higher counts first). + if rows[i].count != rows[j].count { + return rows[i].count > rows[j].count + } + // Sort by burst in descending order (higher bursts first). + if rows[i].burst != rows[j].burst { + return rows[i].burst > rows[j].burst + } + // Sort by period in ascending order (shorter durations first). + if rows[i].period != rows[j].period { + return rows[i].period < rows[j].period + } + // Sort by comment in ascending order. + if rows[i].comment != rows[j].comment { + return rows[i].comment < rows[j].comment + } + // Sort by ID in ascending order. + return rows[i].id < rows[j].id + }) + + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + + w := csv.NewWriter(f) + err = w.Write([]string{"name", "id", "count", "burst", "period", "comment"}) + if err != nil { + return err + } + + for _, r := range rows { + err := w.Write([]string{r.name, r.id, strconv.FormatInt(r.count, 10), strconv.FormatInt(r.burst, 10), r.period, r.comment}) + if err != nil { + return err + } + } + w.Flush() + + return w.Error() +} diff --git a/ratelimits/limit_test.go b/ratelimits/limit_test.go new file mode 100644 index 00000000000..933fbff1f55 --- /dev/null +++ b/ratelimits/limit_test.go @@ -0,0 +1,617 @@ +package ratelimits + +import ( + "context" + "errors" + "fmt" + "net/netip" + "os" + "path/filepath" + "slices" + "strings" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + io_prometheus_client "github.com/prometheus/client_model/go" + + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/core" + "github.com/letsencrypt/boulder/identifier" + blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/metrics" + "github.com/letsencrypt/boulder/test" +) + +// loadAndParseDefaultLimits is a helper that calls both loadDefaults and +// parseDefaultLimits to handle a YAML file. +// +// TODO(#7901): Update the tests to test these functions individually. +func loadAndParseDefaultLimits(path string) (Limits, error) { + fromFile, err := loadDefaultsFromFile(path) + if err != nil { + return nil, err + } + + return parseDefaultLimits(fromFile) +} + +// loadAndParseOverrideLimitsFromFile is a helper that calls both +// loadOverridesFromFile and parseOverrideLimits to handle a YAML file. +// +// TODO(#7901): Update the tests to test these functions individually. +func loadAndParseOverrideLimitsFromFile(path string) (Limits, error) { + fromFile, err := loadOverridesFromFile(path) + if err != nil { + return nil, err + } + + return parseOverrideLimits(fromFile) +} + +func TestParseOverrideNameId(t *testing.T) { + // 'enum:ipv4' + // Valid IPv4 address. + name, id, err := parseOverrideNameId(NewRegistrationsPerIPAddress.String() + ":10.0.0.1") + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, name, NewRegistrationsPerIPAddress) + test.AssertEquals(t, id, "10.0.0.1") + + // 'enum:ipv6range' + // Valid IPv6 address range. + name, id, err = parseOverrideNameId(NewRegistrationsPerIPv6Range.String() + ":2602:80a:6000::/48") + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, name, NewRegistrationsPerIPv6Range) + test.AssertEquals(t, id, "2602:80a:6000::/48") + + // Missing colon (this should never happen but we should avoid panicking). + _, _, err = parseOverrideNameId(NewRegistrationsPerIPAddress.String() + "10.0.0.1") + test.AssertError(t, err, "missing colon") + + // Empty string. + _, _, err = parseOverrideNameId("") + test.AssertError(t, err, "empty string") + + // Only a colon. + _, _, err = parseOverrideNameId(NewRegistrationsPerIPAddress.String() + ":") + test.AssertError(t, err, "only a colon") + + // Invalid enum. + _, _, err = parseOverrideNameId("lol:noexist") + test.AssertError(t, err, "invalid enum") +} + +func TestParseOverrideNameEnumId(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + input string + wantLimit Name + wantId string + expectError bool + }{ + { + name: "valid IPv4 address", + input: NewRegistrationsPerIPAddress.EnumString() + ":10.0.0.1", + wantLimit: NewRegistrationsPerIPAddress, + wantId: "10.0.0.1", + expectError: false, + }, + { + name: "valid IPv6 address range", + input: NewRegistrationsPerIPv6Range.EnumString() + ":2001:0db8:0000::/48", + wantLimit: NewRegistrationsPerIPv6Range, + wantId: "2001:0db8:0000::/48", + expectError: false, + }, + { + name: "missing colon", + input: NewRegistrationsPerIPAddress.EnumString() + "10.0.0.1", + expectError: true, + }, + { + name: "empty string", + input: "", + expectError: true, + }, + { + name: "only a colon", + input: NewRegistrationsPerIPAddress.EnumString() + ":", + expectError: true, + }, + { + name: "invalid enum", + input: "lol:noexist", + expectError: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + limit, id, err := parseOverrideNameEnumId(tc.input) + if tc.expectError { + if err == nil { + t.Errorf("expected error for input %q, but got none", tc.input) + } + } else { + test.AssertNotError(t, err, tc.name) + test.AssertEquals(t, limit, tc.wantLimit) + test.AssertEquals(t, id, tc.wantId) + } + }) + } +} + +func TestValidateLimit(t *testing.T) { + err := ValidateLimit(&Limit{Burst: 1, Count: 1, Period: config.Duration{Duration: time.Second}}) + test.AssertNotError(t, err, "valid limit") + + // All of the following are invalid. + for _, l := range []*Limit{ + {Burst: 0, Count: 1, Period: config.Duration{Duration: time.Second}}, + {Burst: 1, Count: 0, Period: config.Duration{Duration: time.Second}}, + {Burst: 1, Count: 1, Period: config.Duration{Duration: 0}}, + } { + err = ValidateLimit(l) + test.AssertError(t, err, "limit should be invalid") + } +} + +func TestLoadAndParseOverrideLimitsFromFile(t *testing.T) { + // Load a single valid override limit with Id formatted as 'enum:RegId'. + l, err := loadAndParseOverrideLimitsFromFile("testdata/working_override.yml") + test.AssertNotError(t, err, "valid single override limit") + expectKey := joinWithColon(NewRegistrationsPerIPAddress.EnumString(), "64.112.117.1") + test.AssertEquals(t, l[expectKey].Burst, int64(40)) + test.AssertEquals(t, l[expectKey].Count, int64(40)) + test.AssertEquals(t, l[expectKey].Period.Duration, time.Second) + + // Load single valid override limit with a 'domainOrCIDR' Id. + l, err = loadAndParseOverrideLimitsFromFile("testdata/working_override_regid_domainorcidr.yml") + test.AssertNotError(t, err, "valid single override limit with Id of regId:domainOrCIDR") + expectKey = joinWithColon(CertificatesPerDomain.EnumString(), "example.com") + test.AssertEquals(t, l[expectKey].Burst, int64(40)) + test.AssertEquals(t, l[expectKey].Count, int64(40)) + test.AssertEquals(t, l[expectKey].Period.Duration, time.Second) + + // Load multiple valid override limits with 'regId' Ids. + l, err = loadAndParseOverrideLimitsFromFile("testdata/working_overrides.yml") + test.AssertNotError(t, err, "multiple valid override limits") + expectKey1 := joinWithColon(NewRegistrationsPerIPAddress.EnumString(), "64.112.117.1") + test.AssertEquals(t, l[expectKey1].Burst, int64(40)) + test.AssertEquals(t, l[expectKey1].Count, int64(40)) + test.AssertEquals(t, l[expectKey1].Period.Duration, time.Second) + expectKey2 := joinWithColon(NewRegistrationsPerIPv6Range.EnumString(), "2602:80a:6000::/48") + test.AssertEquals(t, l[expectKey2].Burst, int64(50)) + test.AssertEquals(t, l[expectKey2].Count, int64(50)) + test.AssertEquals(t, l[expectKey2].Period.Duration, time.Second*2) + + // Load multiple valid override limits with 'fqdnSet' Ids, as follows: + // - CertificatesPerFQDNSet:example.com + // - CertificatesPerFQDNSet:example.com,example.net + // - CertificatesPerFQDNSet:example.com,example.net,example.org + entryKey1 := newFQDNSetBucketKey(CertificatesPerFQDNSet, identifier.NewDNSSlice([]string{"example.com"})) + entryKey2 := newFQDNSetBucketKey(CertificatesPerFQDNSet, identifier.NewDNSSlice([]string{"example.com", "example.net"})) + entryKey3 := newFQDNSetBucketKey(CertificatesPerFQDNSet, identifier.NewDNSSlice([]string{"example.com", "example.net", "example.org"})) + entryKey4 := newFQDNSetBucketKey(CertificatesPerFQDNSet, identifier.ACMEIdentifiers{ + identifier.NewIP(netip.MustParseAddr("2602:80a:6000::1")), + identifier.NewIP(netip.MustParseAddr("9.9.9.9")), + identifier.NewDNS("example.com"), + }) + + l, err = loadAndParseOverrideLimitsFromFile("testdata/working_overrides_regid_fqdnset.yml") + test.AssertNotError(t, err, "multiple valid override limits with 'fqdnSet' Ids") + test.AssertEquals(t, l[entryKey1].Burst, int64(40)) + test.AssertEquals(t, l[entryKey1].Count, int64(40)) + test.AssertEquals(t, l[entryKey1].Period.Duration, time.Second) + test.AssertEquals(t, l[entryKey2].Burst, int64(50)) + test.AssertEquals(t, l[entryKey2].Count, int64(50)) + test.AssertEquals(t, l[entryKey2].Period.Duration, time.Second*2) + test.AssertEquals(t, l[entryKey3].Burst, int64(60)) + test.AssertEquals(t, l[entryKey3].Count, int64(60)) + test.AssertEquals(t, l[entryKey3].Period.Duration, time.Second*3) + test.AssertEquals(t, l[entryKey4].Burst, int64(60)) + test.AssertEquals(t, l[entryKey4].Count, int64(60)) + test.AssertEquals(t, l[entryKey4].Period.Duration, time.Second*4) + + // Path is empty string. + _, err = loadAndParseOverrideLimitsFromFile("") + test.AssertError(t, err, "path is empty string") + test.Assert(t, os.IsNotExist(err), "path is empty string") + + // Path to file which does not exist. + _, err = loadAndParseOverrideLimitsFromFile("testdata/file_does_not_exist.yml") + test.AssertError(t, err, "a file that does not exist ") + test.Assert(t, os.IsNotExist(err), "test file should not exist") + + // Burst cannot be 0. + _, err = loadAndParseOverrideLimitsFromFile("testdata/busted_override_burst_0.yml") + test.AssertError(t, err, "single override limit with burst=0") + test.AssertContains(t, err.Error(), "invalid burst") + + // Id cannot be empty. + _, err = loadAndParseOverrideLimitsFromFile("testdata/busted_override_empty_id.yml") + test.AssertError(t, err, "single override limit with empty id") + test.Assert(t, !os.IsNotExist(err), "test file should exist") + + // Name cannot be empty. + _, err = loadAndParseOverrideLimitsFromFile("testdata/busted_override_empty_name.yml") + test.AssertError(t, err, "single override limit with empty name") + test.Assert(t, !os.IsNotExist(err), "test file should exist") + + // Name must be a string representation of a valid Name enumeration. + _, err = loadAndParseOverrideLimitsFromFile("testdata/busted_override_invalid_name.yml") + test.AssertError(t, err, "single override limit with invalid name") + test.Assert(t, !os.IsNotExist(err), "test file should exist") + + // Multiple entries, second entry has a bad name. + _, err = loadAndParseOverrideLimitsFromFile("testdata/busted_overrides_second_entry_bad_name.yml") + test.AssertError(t, err, "multiple override limits, second entry is bad") + test.Assert(t, !os.IsNotExist(err), "test file should exist") + + // Multiple entries, third entry has id of "lol", instead of an IPv4 address. + _, err = loadAndParseOverrideLimitsFromFile("testdata/busted_overrides_third_entry_bad_id.yml") + test.AssertError(t, err, "multiple override limits, third entry has bad Id value") + test.Assert(t, !os.IsNotExist(err), "test file should exist") +} + +func TestLoadOverrides(t *testing.T) { + mockLog := blog.NewMock() + + tb, err := NewTransactionBuilderFromFiles("../test/config-next/ratelimit-defaults.yml", "../test/config-next/ratelimit-overrides.yml", metrics.NoopRegisterer, mockLog) + test.AssertNotError(t, err, "creating TransactionBuilder") + err = tb.loadOverrides(context.Background()) + test.AssertNotError(t, err, "loading overrides in TransactionBuilder") + overridesData, err := loadOverridesFromFile("../test/config-next/ratelimit-overrides.yml") + test.AssertNotError(t, err, "loading overrides from file") + testOverrides, err := parseOverrideLimits(overridesData) + test.AssertNotError(t, err, "parsing overrides") + + newOverridesPerLimit := make(map[Name]float64) + for _, override := range testOverrides { + override.precompute() + newOverridesPerLimit[override.Name]++ + } + + test.AssertDeepEquals(t, tb.limitRegistry.overrides, testOverrides) + + var iom io_prometheus_client.Metric + + for rlName, rlString := range nameToString { + err = tb.limitRegistry.overridesPerLimit.WithLabelValues(rlString).Write(&iom) + test.AssertNotError(t, err, fmt.Sprintf("encoding overridesPerLimit metric with label %q", rlString)) + test.AssertEquals(t, iom.Gauge.GetValue(), newOverridesPerLimit[rlName]) + } + + err = tb.limitRegistry.overridesTimestamp.Write(&iom) + test.AssertNotError(t, err, "encoding overridesTimestamp metric") + test.Assert(t, int64(iom.Gauge.GetValue()) >= time.Now().Unix()-5, "overridesTimestamp too old") + + // A failure loading overrides should log and return an error, and not + // overwrite existing overrides. + mockLog.Clear() + tb.limitRegistry.refreshOverrides = func(context.Context, prometheus.Gauge, blog.Logger) (Limits, error) { + return nil, errors.New("mock failure") + } + err = tb.limitRegistry.loadOverrides(context.Background()) + test.AssertError(t, err, "fail to load overrides") + test.AssertDeepEquals(t, tb.limitRegistry.overrides, testOverrides) + + // An empty set of overrides should log a warning, return nil, and not + // overwrite existing overrides. + mockLog.Clear() + tb.limitRegistry.refreshOverrides = func(context.Context, prometheus.Gauge, blog.Logger) (Limits, error) { + return Limits{}, nil + } + err = tb.limitRegistry.loadOverrides(context.Background()) + test.AssertEquals(t, mockLog.GetAll()[0], "WARNING: loading overrides: no valid overrides") + test.AssertNotError(t, err, "load empty overrides") + test.AssertDeepEquals(t, tb.limitRegistry.overrides, testOverrides) +} + +func TestNewRefresher(t *testing.T) { + mockLog := blog.NewMock() + + reg := &limitRegistry{ + refreshOverrides: func(_ context.Context, _ prometheus.Gauge, logger blog.Logger) (Limits, error) { + logger.Info("refreshed") + return nil, nil + }, + logger: mockLog, + } + + // Create and simultaneously cancel a refresher. + reg.NewRefresher(time.Millisecond * 2)() + time.Sleep(time.Millisecond * 20) + // The refresher should have run once, but then been cancelled before the + // first tick. + test.AssertDeepEquals(t, mockLog.GetAll(), []string{"INFO: refreshed", "WARNING: loading overrides: no valid overrides"}) + + reg.NewRefresher(time.Nanosecond) + retries := 0 + for retries < 5 { + if slices.Contains(mockLog.GetAll(), "INFO: refreshed") { + break + } + retries++ + time.Sleep(core.RetryBackoff(retries, time.Millisecond*2, time.Millisecond*50, 2)) + } + test.AssertSliceContains(t, mockLog.GetAll(), "INFO: refreshed") + test.Assert(t, len(mockLog.GetAll()) > 1, "refresher didn't run more than once") +} + +func TestHydrateOverrideLimit(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + bucketKey string + limit Limit + expectBucketKey string + expectError string + }{ + { + name: "bad limit name", + bucketKey: "", + limit: Limit{Name: 37}, + expectBucketKey: "", + expectError: "unrecognized limit name 37", + }, + { + name: "CertificatesPerDomain with bad FQDN, should fail validateIdForName", + bucketKey: "VelociousVacherin", + limit: Limit{ + Name: StringToName["CertificatesPerDomain"], + Burst: 1, + Count: 1, + Period: config.Duration{Duration: time.Second}, + }, + expectBucketKey: "", + expectError: "\"VelociousVacherin\" is neither a domain (Domain name needs at least one dot) nor an IP address (ParseAddr(\"VelociousVacherin\"): unable to parse IP)", + }, + { + name: "CertificatesPerDomain with IPv4 address", + bucketKey: "64.112.117.1", + limit: Limit{ + Name: StringToName["CertificatesPerDomain"], + Burst: 1, + Count: 1, + Period: config.Duration{Duration: time.Second}, + }, + expectBucketKey: "64.112.117.1/32", + expectError: "", + }, + { + name: "CertificatesPerDomain with IPv6 address", + bucketKey: "2602:80a:6000:666::", + limit: Limit{ + Name: StringToName["CertificatesPerDomain"], + Burst: 1, + Count: 1, + Period: config.Duration{Duration: time.Second}, + }, + expectBucketKey: "2602:80a:6000:666::/64", + expectError: "", + }, + { + name: "CertificatesPerFQDNSet", + bucketKey: "example.com,example.net,example.org", + limit: Limit{ + Name: StringToName["CertificatesPerFQDNSet"], + Burst: 1, + Count: 1, + Period: config.Duration{Duration: time.Second}, + }, + expectBucketKey: "394e82811f52e2da38b970afdb21c9bc9af81060939c690183c00fce37408738", + expectError: "", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + bk, err := hydrateOverrideLimit(tc.bucketKey, tc.limit.Name) + if tc.expectError != "" { + if err == nil { + t.Errorf("expected error for test %q but got none", tc.name) + } + test.AssertContains(t, err.Error(), tc.expectError) + } else { + test.AssertNotError(t, err, tc.name) + test.AssertEquals(t, bk, tc.expectBucketKey) + } + }) + } +} + +func TestLoadAndParseDefaultLimits(t *testing.T) { + // Load a single valid default limit. + l, err := loadAndParseDefaultLimits("testdata/working_default.yml") + test.AssertNotError(t, err, "valid single default limit") + test.AssertEquals(t, l[NewRegistrationsPerIPAddress.EnumString()].Burst, int64(20)) + test.AssertEquals(t, l[NewRegistrationsPerIPAddress.EnumString()].Count, int64(20)) + test.AssertEquals(t, l[NewRegistrationsPerIPAddress.EnumString()].Period.Duration, time.Second) + + // Load multiple valid default limits. + l, err = loadAndParseDefaultLimits("testdata/working_defaults.yml") + test.AssertNotError(t, err, "multiple valid default limits") + test.AssertEquals(t, l[NewRegistrationsPerIPAddress.EnumString()].Burst, int64(20)) + test.AssertEquals(t, l[NewRegistrationsPerIPAddress.EnumString()].Count, int64(20)) + test.AssertEquals(t, l[NewRegistrationsPerIPAddress.EnumString()].Period.Duration, time.Second) + test.AssertEquals(t, l[NewRegistrationsPerIPv6Range.EnumString()].Burst, int64(30)) + test.AssertEquals(t, l[NewRegistrationsPerIPv6Range.EnumString()].Count, int64(30)) + test.AssertEquals(t, l[NewRegistrationsPerIPv6Range.EnumString()].Period.Duration, time.Second*2) + + // Path is empty string. + _, err = loadAndParseDefaultLimits("") + test.AssertError(t, err, "path is empty string") + test.Assert(t, os.IsNotExist(err), "path is empty string") + + // Path to file which does not exist. + _, err = loadAndParseDefaultLimits("testdata/file_does_not_exist.yml") + test.AssertError(t, err, "a file that does not exist") + test.Assert(t, os.IsNotExist(err), "test file should not exist") + + // Burst cannot be 0. + _, err = loadAndParseDefaultLimits("testdata/busted_default_burst_0.yml") + test.AssertError(t, err, "single default limit with burst=0") + test.AssertContains(t, err.Error(), "invalid burst") + + // Name cannot be empty. + _, err = loadAndParseDefaultLimits("testdata/busted_default_empty_name.yml") + test.AssertError(t, err, "single default limit with empty name") + test.Assert(t, !os.IsNotExist(err), "test file should exist") + + // Name must be a string representation of a valid Name enumeration. + _, err = loadAndParseDefaultLimits("testdata/busted_default_invalid_name.yml") + test.AssertError(t, err, "single default limit with invalid name") + test.Assert(t, !os.IsNotExist(err), "test file should exist") + + // Multiple entries, second entry has a bad name. + _, err = loadAndParseDefaultLimits("testdata/busted_defaults_second_entry_bad_name.yml") + test.AssertError(t, err, "multiple default limits, one is bad") + test.Assert(t, !os.IsNotExist(err), "test file should exist") +} + +func TestLoadAndDumpOverrides(t *testing.T) { + t.Parallel() + + input := ` +- CertificatesPerDomain: + burst: 5000 + count: 5000 + period: 168h0m0s + ids: + - id: example.com + comment: IN-10057 + - id: example.net + comment: IN-10057 +- CertificatesPerDomain: + burst: 300 + count: 300 + period: 168h0m0s + ids: + - id: example.org + comment: IN-10057 +- CertificatesPerDomainPerAccount: + burst: 12000 + count: 12000 + period: 168h0m0s + ids: + - id: "123456789" + comment: Affluent (IN-8322) +- CertificatesPerDomainPerAccount: + burst: 6000 + count: 6000 + period: 168h0m0s + ids: + - id: "543219876" + comment: Affluent (IN-8322) + - id: "987654321" + comment: Affluent (IN-8322) +- CertificatesPerFQDNSet: + burst: 50 + count: 50 + period: 168h0m0s + ids: + - id: example.co.uk,example.cn + comment: IN-6843 +- CertificatesPerFQDNSet: + burst: 24 + count: 24 + period: 168h0m0s + ids: + - id: example.org,example.com,example.net + comment: IN-6006 +- FailedAuthorizationsPerDomainPerAccount: + burst: 250 + count: 250 + period: 1h0m0s + ids: + - id: "123456789" + comment: Digital Lake (IN-6736) +- FailedAuthorizationsPerDomainPerAccount: + burst: 50 + count: 50 + period: 1h0m0s + ids: + - id: "987654321" + comment: Digital Lake (IN-6856) +- FailedAuthorizationsPerDomainPerAccount: + burst: 10 + count: 10 + period: 1h0m0s + ids: + - id: "543219876" + comment: Big Mart (IN-6949) +- NewOrdersPerAccount: + burst: 3000 + count: 3000 + period: 3h0m0s + ids: + - id: "123456789" + comment: Galaxy Hoster (IN-8180) +- NewOrdersPerAccount: + burst: 1000 + count: 1000 + period: 3h0m0s + ids: + - id: "543219876" + comment: Big Mart (IN-8180) + - id: "987654321" + comment: Buy More (IN-10057) +- NewRegistrationsPerIPAddress: + burst: 100000 + count: 100000 + period: 3h0m0s + ids: + - id: 2600:1f1c:5e0:e702:ca06:d2a3:c7ce:a02e + comment: example.org IN-2395 + - id: 55.66.77.88 + comment: example.org IN-2395 +- NewRegistrationsPerIPAddress: + burst: 200 + count: 200 + period: 3h0m0s + ids: + - id: 11.22.33.44 + comment: example.net (IN-1583)` + + expectCSV := ` +name,id,count,burst,period,comment +CertificatesPerDomain,example.com,5000,5000,168h0m0s,IN-10057 +CertificatesPerDomain,example.net,5000,5000,168h0m0s,IN-10057 +CertificatesPerDomain,example.org,300,300,168h0m0s,IN-10057 +CertificatesPerDomainPerAccount,123456789,12000,12000,168h0m0s,Affluent (IN-8322) +CertificatesPerDomainPerAccount,543219876,6000,6000,168h0m0s,Affluent (IN-8322) +CertificatesPerDomainPerAccount,987654321,6000,6000,168h0m0s,Affluent (IN-8322) +CertificatesPerFQDNSet,7c956936126b492845ddb48f4d220034509e7c0ad54ed2c1ba2650406846d9c3,50,50,168h0m0s,IN-6843 +CertificatesPerFQDNSet,394e82811f52e2da38b970afdb21c9bc9af81060939c690183c00fce37408738,24,24,168h0m0s,IN-6006 +FailedAuthorizationsPerDomainPerAccount,123456789,250,250,1h0m0s,Digital Lake (IN-6736) +FailedAuthorizationsPerDomainPerAccount,987654321,50,50,1h0m0s,Digital Lake (IN-6856) +FailedAuthorizationsPerDomainPerAccount,543219876,10,10,1h0m0s,Big Mart (IN-6949) +NewOrdersPerAccount,123456789,3000,3000,3h0m0s,Galaxy Hoster (IN-8180) +NewOrdersPerAccount,543219876,1000,1000,3h0m0s,Big Mart (IN-8180) +NewOrdersPerAccount,987654321,1000,1000,3h0m0s,Buy More (IN-10057) +NewRegistrationsPerIPAddress,2600:1f1c:5e0:e702:ca06:d2a3:c7ce:a02e,100000,100000,3h0m0s,example.org IN-2395 +NewRegistrationsPerIPAddress,55.66.77.88,100000,100000,3h0m0s,example.org IN-2395 +NewRegistrationsPerIPAddress,11.22.33.44,200,200,3h0m0s,example.net (IN-1583) +` + tempDir := t.TempDir() + tempFile := filepath.Join(tempDir, "overrides.yaml") + + err := os.WriteFile(tempFile, []byte(input), 0644) + test.AssertNotError(t, err, "writing temp overrides.yaml") + + original, err := LoadOverridesByBucketKey(tempFile) + test.AssertNotError(t, err, "loading overrides") + test.Assert(t, len(original) > 0, "expected at least one override loaded") + + dumpFile := filepath.Join(tempDir, "dumped.yaml") + err = DumpOverrides(dumpFile, original) + test.AssertNotError(t, err, "dumping overrides") + + dumped, err := os.ReadFile(dumpFile) + test.AssertNotError(t, err, "reading dumped overrides file") + test.AssertEquals(t, strings.TrimLeft(string(dumped), "\n"), strings.TrimLeft(expectCSV, "\n")) +} diff --git a/ratelimits/limiter.go b/ratelimits/limiter.go new file mode 100644 index 00000000000..ea5c2b64237 --- /dev/null +++ b/ratelimits/limiter.go @@ -0,0 +1,462 @@ +package ratelimits + +import ( + "context" + "errors" + "fmt" + "math" + "math/rand/v2" + "slices" + "strings" + "time" + + "github.com/jmhodges/clock" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + berrors "github.com/letsencrypt/boulder/errors" +) + +const ( + // Allowed is used for rate limit metrics, it's the value of the 'decision' + // label when a request was allowed. + Allowed = "allowed" + + // Denied is used for rate limit metrics, it's the value of the 'decision' + // label when a request was denied. + Denied = "denied" +) + +// allowedDecision is an "allowed" *Decision that should be returned when a +// checked limit is found to be disabled. +var allowedDecision = &Decision{allowed: true, remaining: math.MaxInt64} + +// Limiter provides a high-level interface for rate limiting requests by +// utilizing a token bucket-style approach. +type Limiter struct { + // source is used to store buckets. It must be safe for concurrent use. + source Source + clk clock.Clock + + spendLatency *prometheus.HistogramVec +} + +// NewLimiter returns a new *Limiter. The provided source must be safe for +// concurrent use. +func NewLimiter(clk clock.Clock, source Source, stats prometheus.Registerer) (*Limiter, error) { + spendLatency := promauto.With(stats).NewHistogramVec(prometheus.HistogramOpts{ + Name: "ratelimits_spend_latency", + Help: fmt.Sprintf("Latency of ratelimit checks labeled by limit=[name] and decision=[%s|%s], in seconds", Allowed, Denied), + // Exponential buckets ranging from 0.0005s to 3s. + Buckets: prometheus.ExponentialBuckets(0.0005, 3, 8), + }, []string{"limit", "decision"}) + + return &Limiter{ + source: source, + clk: clk, + spendLatency: spendLatency, + }, nil +} + +// Decision represents the result of a rate limit check or spend operation. To +// check the result of a *Decision, call the Result() method. +type Decision struct { + // allowed is true if the bucket possessed enough capacity to allow the + // request given the cost. + allowed bool + + // remaining is the number of requests the client is allowed to make before + // they're rate limited. + remaining int64 + + // retryIn is the duration the client MUST wait before they're allowed to + // make a request. + retryIn time.Duration + + // resetIn is the duration the bucket will take to refill to its maximum + // capacity, assuming no further requests are made. + resetIn time.Duration + + // newTAT indicates the time at which the bucket will be full. It is the + // theoretical arrival time (TAT) of next request. It must be no more than + // (burst * (period / count)) in the future at any single point in time. + newTAT time.Time + + // transaction is the Transaction that resulted in this Decision. It is + // included for the production of verbose Subscriber-facing errors. It is + // set by the Limiter before returning the Decision. + transaction Transaction +} + +// Result translates a denied *Decision into a berrors.RateLimitError for the +// Subscriber, or returns nil if the *Decision allows the request. The error +// message includes a human-readable description of the exceeded rate limit and +// a retry-after timestamp. +func (d *Decision) Result(now time.Time) error { + if d.allowed { + return nil + } + + // Add 0-3% jitter to the RetryIn duration to prevent thundering herd. + jitter := time.Duration(float64(d.retryIn) * 0.03 * rand.Float64()) + retryAfter := d.retryIn + jitter + retryAfterTs := now.UTC().Add(retryAfter).Format("2006-01-02 15:04:05 MST") + + // There is no case for FailedAuthorizationsForPausingPerDomainPerAccount + // because the RA will pause clients who exceed that ratelimit. + switch d.transaction.limit.Name { + case NewRegistrationsPerIPAddress: + return berrors.RegistrationsPerIPAddressError( + retryAfter, + "too many new registrations (%d) from this IP address in the last %s, retry after %s", + d.transaction.limit.Burst, + d.transaction.limit.Period.Duration, + retryAfterTs, + ) + + case NewRegistrationsPerIPv6Range: + return berrors.RegistrationsPerIPv6RangeError( + retryAfter, + "too many new registrations (%d) from this /48 subnet of IPv6 addresses in the last %s, retry after %s", + d.transaction.limit.Burst, + d.transaction.limit.Period.Duration, + retryAfterTs, + ) + case NewOrdersPerAccount: + return berrors.NewOrdersPerAccountError( + retryAfter, + "too many new orders (%d) from this account in the last %s, retry after %s", + d.transaction.limit.Burst, + d.transaction.limit.Period.Duration, + retryAfterTs, + ) + + case FailedAuthorizationsPerDomainPerAccount: + // Uses bucket key 'enum:regId:identValue'. + idx := strings.LastIndex(d.transaction.bucketKey, ":") + if idx == -1 { + return berrors.InternalServerError("unrecognized bucket key while generating error") + } + identValue := d.transaction.bucketKey[idx+1:] + return berrors.FailedAuthorizationsPerDomainPerAccountError( + retryAfter, + "too many failed authorizations (%d) for %q in the last %s, retry after %s", + d.transaction.limit.Burst, + identValue, + d.transaction.limit.Period.Duration, + retryAfterTs, + ) + + case CertificatesPerDomain, CertificatesPerDomainPerAccount: + // Uses bucket key 'enum:domainOrCIDR' or 'enum:regId:domainOrCIDR' respectively. + idx := strings.LastIndex(d.transaction.bucketKey, ":") + if idx == -1 { + return berrors.InternalServerError("unrecognized bucket key while generating error") + } + domainOrCIDR := d.transaction.bucketKey[idx+1:] + return berrors.CertificatesPerDomainError( + retryAfter, + "too many certificates (%d) already issued for %q in the last %s, retry after %s", + d.transaction.limit.Burst, + domainOrCIDR, + d.transaction.limit.Period.Duration, + retryAfterTs, + ) + + case CertificatesPerFQDNSet: + return berrors.CertificatesPerFQDNSetError( + retryAfter, + "too many certificates (%d) already issued for this exact set of identifiers in the last %s, retry after %s", + d.transaction.limit.Burst, + d.transaction.limit.Period.Duration, + retryAfterTs, + ) + + case LimitOverrideRequestsPerIPAddress: + return berrors.LimitOverrideRequestsPerIPAddressError( + retryAfter, + "too many override request form submissions (%d) from this IP address in the last %s, retry after %s", + d.transaction.limit.Burst, + d.transaction.limit.Period.Duration, + retryAfterTs, + ) + + default: + return berrors.InternalServerError("cannot generate error for unknown rate limit") + } +} + +// Check DOES NOT deduct the cost of the request from the provided bucket's +// capacity. The returned *Decision indicates whether the capacity exists to +// satisfy the cost and represents the hypothetical state of the bucket IF the +// cost WERE to be deducted. If no bucket exists it will NOT be created. No +// state is persisted to the underlying datastore. +func (l *Limiter) Check(ctx context.Context, txn Transaction) (*Decision, error) { + if txn.allowOnly() { + return allowedDecision, nil + } + // Remove cancellation from the request context so that transactions are not + // interrupted by a client disconnect. + ctx = context.WithoutCancel(ctx) + tat, err := l.source.Get(ctx, txn.bucketKey) + if err != nil { + if !errors.Is(err, ErrBucketNotFound) { + return nil, err + } + // First request from this client. No need to initialize the bucket + // because this is a check, not a spend. A TAT of "now" is equivalent to + // a full bucket. + return maybeSpend(l.clk, txn, l.clk.Now()), nil + } + return maybeSpend(l.clk, txn, tat), nil +} + +// Spend attempts to deduct the cost from the provided bucket's capacity. The +// returned *Decision indicates whether the capacity existed to satisfy the cost +// and represents the current state of the bucket. If no bucket exists it WILL +// be created WITH the cost factored into its initial state. The new bucket +// state is persisted to the underlying datastore, if applicable, before +// returning. +func (l *Limiter) Spend(ctx context.Context, txn Transaction) (*Decision, error) { + return l.BatchSpend(ctx, []Transaction{txn}) +} + +func prepareBatch(txns []Transaction) ([]Transaction, []string, error) { + var bucketKeys []string + var transactions []Transaction + for _, txn := range txns { + if txn.allowOnly() { + // Ignore allow-only transactions. + continue + } + if slices.Contains(bucketKeys, txn.bucketKey) { + return nil, nil, fmt.Errorf("found duplicate bucket %q in batch", txn.bucketKey) + } + bucketKeys = append(bucketKeys, txn.bucketKey) + transactions = append(transactions, txn) + } + return transactions, bucketKeys, nil +} + +func stricter(existing *Decision, incoming *Decision) *Decision { + if existing.retryIn == incoming.retryIn { + if existing.remaining < incoming.remaining { + return existing + } + return incoming + } + if existing.retryIn > incoming.retryIn { + return existing + } + return incoming +} + +// BatchSpend attempts to deduct the costs from the provided buckets' +// capacities. If applicable, new bucket states are persisted to the underlying +// datastore before returning. Non-existent buckets will be initialized WITH the +// cost factored into the initial state. The returned *Decision represents the +// strictest of all *Decisions reached in the batch. +func (l *Limiter) BatchSpend(ctx context.Context, txns []Transaction) (*Decision, error) { + start := l.clk.Now() + + batch, bucketKeys, err := prepareBatch(txns) + if err != nil { + return nil, err + } + if len(batch) == 0 { + // All Transactions were allow-only. + return allowedDecision, nil + } + + // Remove cancellation from the request context so that transactions are not + // interrupted by a client disconnect. + ctx = context.WithoutCancel(ctx) + tats, err := l.source.BatchGet(ctx, bucketKeys) + if err != nil { + return nil, fmt.Errorf("batch get for %d keys: %w", len(bucketKeys), err) + } + batchDecision := allowedDecision + newBuckets := make(map[string]time.Time) + incrBuckets := make(map[string]increment) + staleBuckets := make(map[string]time.Time) + txnOutcomes := make(map[Transaction]string) + + for _, txn := range batch { + storedTAT, bucketExists := tats[txn.bucketKey] + d := maybeSpend(l.clk, txn, storedTAT) + + if d.allowed && (storedTAT != d.newTAT) && txn.spend { + if !bucketExists { + newBuckets[txn.bucketKey] = d.newTAT + } else if storedTAT.After(l.clk.Now()) { + incrBuckets[txn.bucketKey] = increment{ + cost: time.Duration(txn.cost * txn.limit.emissionInterval), + ttl: time.Duration(txn.limit.burstOffset), + } + } else { + staleBuckets[txn.bucketKey] = d.newTAT + } + } + + if !txn.spendOnly() { + // Spend-only Transactions are best-effort and do not contribute to + // the batchDecision. + batchDecision = stricter(batchDecision, d) + } + + txnOutcomes[txn] = Denied + if d.allowed { + txnOutcomes[txn] = Allowed + } + } + + if batchDecision.allowed { + if len(newBuckets) > 0 { + // Use BatchSetNotExisting to create new buckets so that we detect + // if concurrent requests have created this bucket at the same time, + // which would result in overwriting if we used a plain "SET" + // command. If that happens, fall back to incrementing. + alreadyExists, err := l.source.BatchSetNotExisting(ctx, newBuckets) + if err != nil { + return nil, fmt.Errorf("batch set for %d keys: %w", len(newBuckets), err) + } + // Find the original transaction in order to compute the increment + // and set the TTL. + for _, txn := range batch { + if alreadyExists[txn.bucketKey] { + incrBuckets[txn.bucketKey] = increment{ + cost: time.Duration(txn.cost * txn.limit.emissionInterval), + ttl: time.Duration(txn.limit.burstOffset), + } + } + } + } + + if len(incrBuckets) > 0 { + err = l.source.BatchIncrement(ctx, incrBuckets) + if err != nil { + return nil, fmt.Errorf("batch increment for %d keys: %w", len(incrBuckets), err) + } + } + + if len(staleBuckets) > 0 { + // Incrementing a TAT in the past grants unintended burst capacity. + // So instead we overwrite it with a TAT of now + increment. This + // approach may cause a race condition where only the last spend is + // saved, but it's preferable to the alternative. + err = l.source.BatchSet(ctx, staleBuckets) + if err != nil { + return nil, fmt.Errorf("batch set for %d keys: %w", len(staleBuckets), err) + } + } + } + + // Observe latency equally across all transactions in the batch. + totalLatency := l.clk.Since(start) + perTxnLatency := totalLatency / time.Duration(len(txnOutcomes)) + for txn, outcome := range txnOutcomes { + l.spendLatency.WithLabelValues(txn.limit.Name.String(), outcome).Observe(perTxnLatency.Seconds()) + } + return batchDecision, nil +} + +// Refund attempts to refund all of the cost to the capacity of the specified +// bucket. The returned *Decision indicates whether the refund was successful +// and represents the current state of the bucket. The new bucket state is +// persisted to the underlying datastore, if applicable, before returning. If no +// bucket exists it will NOT be created. Spend-only Transactions are assumed to +// be refundable. Check-only Transactions are never refunded. +// +// Note: The amount refunded cannot cause the bucket to exceed its maximum +// capacity. Partial refunds are allowed and are considered successful. For +// instance, if a bucket has a maximum capacity of 10 and currently has 5 +// requests remaining, a refund request of 7 will result in the bucket reaching +// its maximum capacity of 10, not 12. +func (l *Limiter) Refund(ctx context.Context, txn Transaction) (*Decision, error) { + return l.BatchRefund(ctx, []Transaction{txn}) +} + +// BatchRefund attempts to refund all or some of the costs to the provided +// buckets' capacities. Non-existent buckets will NOT be initialized. The new +// bucket state is persisted to the underlying datastore, if applicable, before +// returning. Spend-only Transactions are assumed to be refundable. Check-only +// Transactions are never refunded. The returned *Decision represents the +// strictest of all *Decisions reached in the batch. +func (l *Limiter) BatchRefund(ctx context.Context, txns []Transaction) (*Decision, error) { + batch, bucketKeys, err := prepareBatch(txns) + if err != nil { + return nil, err + } + if len(batch) == 0 { + // All Transactions were allow-only. + return allowedDecision, nil + } + + // Remove cancellation from the request context so that transactions are not + // interrupted by a client disconnect. + ctx = context.WithoutCancel(ctx) + tats, err := l.source.BatchGet(ctx, bucketKeys) + if err != nil { + return nil, fmt.Errorf("batch get for %d keys: %w", len(bucketKeys), err) + } + + batchDecision := allowedDecision + incrBuckets := make(map[string]increment) + + for _, txn := range batch { + tat, bucketExists := tats[txn.bucketKey] + if !bucketExists { + // Ignore non-existent bucket. + continue + } + + if txn.checkOnly() { + // The cost of check-only transactions are never refunded. + txn.cost = 0 + } + d := maybeRefund(l.clk, txn, tat) + batchDecision = stricter(batchDecision, d) + if d.allowed && tat != d.newTAT { + // New bucket state should be persisted. + incrBuckets[txn.bucketKey] = increment{ + cost: time.Duration(-txn.cost * txn.limit.emissionInterval), + ttl: time.Duration(txn.limit.burstOffset), + } + } + } + + if len(incrBuckets) > 0 { + err = l.source.BatchIncrement(ctx, incrBuckets) + if err != nil { + return nil, fmt.Errorf("batch increment for %d keys: %w", len(incrBuckets), err) + } + } + return batchDecision, nil +} + +// BatchReset resets the specified buckets to their maximum capacity using the +// provided reset Transactions. The new bucket state is persisted to the +// underlying datastore before returning. +func (l *Limiter) BatchReset(ctx context.Context, txns []Transaction) error { + var bucketKeys []string + for _, txn := range txns { + if txn.allowOnly() { + // Ignore allow-only transactions. + continue + } + if !txn.resetOnly() { + return fmt.Errorf("found reset-only transaction, received check=%t spend=%t reset=%t", txn.check, txn.spend, txn.reset) + } + if slices.Contains(bucketKeys, txn.bucketKey) { + return fmt.Errorf("found duplicate bucket %q in batch", txn.bucketKey) + } + bucketKeys = append(bucketKeys, txn.bucketKey) + } + if len(bucketKeys) == 0 { + return nil + } + // Remove cancellation from the request context so that transactions are not + // interrupted by a client disconnect. + ctx = context.WithoutCancel(ctx) + return l.source.BatchDelete(ctx, bucketKeys) +} diff --git a/ratelimits/limiter_test.go b/ratelimits/limiter_test.go new file mode 100644 index 00000000000..af2a384309a --- /dev/null +++ b/ratelimits/limiter_test.go @@ -0,0 +1,612 @@ +package ratelimits + +import ( + "context" + "math/rand/v2" + "net" + "net/netip" + "testing" + "time" + + "github.com/jmhodges/clock" + + "github.com/letsencrypt/boulder/config" + berrors "github.com/letsencrypt/boulder/errors" + blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/metrics" + "github.com/letsencrypt/boulder/test" +) + +// overriddenIP is overridden in 'testdata/working_override.yml' to have higher +// burst and count values. +const overriddenIP = "64.112.117.1" + +// newTestLimiter constructs a new limiter. +func newTestLimiter(t *testing.T, s Source, clk clock.FakeClock) *Limiter { + l, err := NewLimiter(clk, s, metrics.NoopRegisterer) + test.AssertNotError(t, err, "should not error") + return l +} + +// newTestTransactionBuilder constructs a new *TransactionBuilder with the +// following configuration: +// - 'NewRegistrationsPerIPAddress' burst: 20 count: 20 period: 1s +// - 'NewRegistrationsPerIPAddress:64.112.117.1' burst: 40 count: 40 period: 1s +func newTestTransactionBuilder(t *testing.T) *TransactionBuilder { + c, err := NewTransactionBuilderFromFiles("testdata/working_default.yml", "testdata/working_override.yml", metrics.NoopRegisterer, blog.NewMock()) + test.AssertNotError(t, err, "should not error") + err = c.loadOverrides(context.Background()) + test.AssertNotError(t, err, "loading overrides") + + return c +} + +func setup(t *testing.T) (context.Context, map[string]*Limiter, *TransactionBuilder, clock.FakeClock, string) { + testCtx := context.Background() + clk := clock.NewFake() + + // Generate a random IP address to avoid collisions during and between test + // runs. + randIP := make(net.IP, 4) + for i := range 4 { + randIP[i] = byte(rand.IntN(256)) + } + + // Construct a limiter for each source. + return testCtx, map[string]*Limiter{ + "inmem": newInmemTestLimiter(t, clk), + "redis": newRedisTestLimiter(t, clk), + }, newTestTransactionBuilder(t), clk, randIP.String() +} + +func resetBucket(t *testing.T, l *Limiter, ctx context.Context, limit *Limit, bucketKey string) { + t.Helper() + txn, err := newResetTransaction(limit, bucketKey) + test.AssertNotError(t, err, "txn should be valid") + err = l.BatchReset(ctx, []Transaction{txn}) + test.AssertNotError(t, err, "should not error") +} + +func TestLimiter_CheckWithLimitOverrides(t *testing.T) { + t.Parallel() + testCtx, limiters, txnBuilder, clk, testIP := setup(t) + for name, l := range limiters { + t.Run(name, func(t *testing.T) { + overriddenBucketKey := newIPAddressBucketKey(NewRegistrationsPerIPAddress, netip.MustParseAddr(overriddenIP)) + overriddenLimit, err := txnBuilder.getLimit(NewRegistrationsPerIPAddress, overriddenBucketKey) + test.AssertNotError(t, err, "should not error") + + // Attempt to spend all 40 requests, this should succeed. + overriddenTxn40, err := newTransaction(overriddenLimit, overriddenBucketKey, 40) + test.AssertNotError(t, err, "txn should be valid") + d, err := l.Spend(testCtx, overriddenTxn40) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + + // Attempting to spend 1 more, this should fail. + overriddenTxn1, err := newTransaction(overriddenLimit, overriddenBucketKey, 1) + test.AssertNotError(t, err, "txn should be valid") + d, err = l.Spend(testCtx, overriddenTxn1) + test.AssertNotError(t, err, "should not error") + test.Assert(t, !d.allowed, "should not be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.resetIn, time.Second) + + // Verify our RetryIn is correct. 1 second == 1000 milliseconds and + // 1000/40 = 25 milliseconds per request. + test.AssertEquals(t, d.retryIn, time.Millisecond*25) + + // Wait 50 milliseconds and try again. + clk.Add(d.retryIn) + + // We should be allowed to spend 1 more request. + d, err = l.Spend(testCtx, overriddenTxn1) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.resetIn, time.Second) + + // Wait 1 second for a full bucket reset. + clk.Add(d.resetIn) + + // Quickly spend 40 requests in a row. + for i := range 40 { + d, err = l.Spend(testCtx, overriddenTxn1) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(39-i)) + } + + // Attempting to spend 1 more, this should fail. + d, err = l.Spend(testCtx, overriddenTxn1) + test.AssertNotError(t, err, "should not error") + test.Assert(t, !d.allowed, "should not be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.resetIn, time.Second) + + // Wait 1 second for a full bucket reset. + clk.Add(d.resetIn) + + normalBucketKey := newIPAddressBucketKey(NewRegistrationsPerIPAddress, netip.MustParseAddr(testIP)) + normalLimit, err := txnBuilder.getLimit(NewRegistrationsPerIPAddress, normalBucketKey) + test.AssertNotError(t, err, "should not error") + + // Spend the same bucket but in a batch with bucket subject to + // default limits. This should succeed, but the decision should + // reflect that of the default bucket. + defaultTxn1, err := newTransaction(normalLimit, normalBucketKey, 1) + test.AssertNotError(t, err, "txn should be valid") + d, err = l.BatchSpend(testCtx, []Transaction{overriddenTxn1, defaultTxn1}) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(19)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Millisecond*50) + + // Refund quota to both buckets. This should succeed, but the + // decision should reflect that of the default bucket. + d, err = l.BatchRefund(testCtx, []Transaction{overriddenTxn1, defaultTxn1}) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(20)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Duration(0)) + + // Once more. + d, err = l.BatchSpend(testCtx, []Transaction{overriddenTxn1, defaultTxn1}) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(19)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Millisecond*50) + + // Reset between tests. + resetBucket(t, l, testCtx, overriddenLimit, overriddenBucketKey) + resetBucket(t, l, testCtx, normalLimit, normalBucketKey) + + // Spend the same bucket but in a batch with a Transaction that is + // check-only. This should succeed, but the decision should reflect + // that of the default bucket. + defaultCheckOnlyTxn1, err := newCheckOnlyTransaction(normalLimit, normalBucketKey, 1) + test.AssertNotError(t, err, "txn should be valid") + d, err = l.BatchSpend(testCtx, []Transaction{overriddenTxn1, defaultCheckOnlyTxn1}) + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, d.remaining, int64(19)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Millisecond*50) + + // Check the remaining quota of the overridden bucket. + overriddenCheckOnlyTxn0, err := newCheckOnlyTransaction(overriddenLimit, overriddenBucketKey, 0) + test.AssertNotError(t, err, "txn should be valid") + d, err = l.Check(testCtx, overriddenCheckOnlyTxn0) + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, d.remaining, int64(39)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Millisecond*25) + + // Check the remaining quota of the default bucket. + defaultTxn0, err := newTransaction(normalLimit, normalBucketKey, 0) + test.AssertNotError(t, err, "txn should be valid") + d, err = l.Check(testCtx, defaultTxn0) + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, d.remaining, int64(20)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Duration(0)) + + // Spend the same bucket but in a batch with a Transaction that is + // spend-only. This should succeed, but the decision should reflect + // that of the overridden bucket. + defaultSpendOnlyTxn1, err := newSpendOnlyTransaction(normalLimit, normalBucketKey, 1) + test.AssertNotError(t, err, "txn should be valid") + d, err = l.BatchSpend(testCtx, []Transaction{overriddenTxn1, defaultSpendOnlyTxn1}) + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, d.remaining, int64(38)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Millisecond*50) + + // Check the remaining quota of the overridden bucket. + d, err = l.Check(testCtx, overriddenCheckOnlyTxn0) + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, d.remaining, int64(38)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Millisecond*50) + + // Check the remaining quota of the default bucket. + d, err = l.Check(testCtx, defaultTxn0) + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, d.remaining, int64(19)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Millisecond*50) + + // Once more, but in now the spend-only Transaction will attempt to + // spend 20 requests. The spend-only Transaction should fail, but + // the decision should reflect that of the overridden bucket. + defaultSpendOnlyTxn20, err := newSpendOnlyTransaction(normalLimit, normalBucketKey, 20) + test.AssertNotError(t, err, "txn should be valid") + d, err = l.BatchSpend(testCtx, []Transaction{overriddenTxn1, defaultSpendOnlyTxn20}) + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, d.remaining, int64(37)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Millisecond*75) + + // Check the remaining quota of the overridden bucket. + d, err = l.Check(testCtx, overriddenCheckOnlyTxn0) + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, d.remaining, int64(37)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Millisecond*75) + + // Check the remaining quota of the default bucket. + d, err = l.Check(testCtx, defaultTxn0) + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, d.remaining, int64(19)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + test.AssertEquals(t, d.resetIn, time.Millisecond*50) + + // Reset between tests. + resetBucket(t, l, testCtx, overriddenLimit, overriddenBucketKey) + }) + } +} + +func TestLimiter_InitializationViaCheckAndSpend(t *testing.T) { + t.Parallel() + testCtx, limiters, txnBuilder, _, testIP := setup(t) + for name, l := range limiters { + t.Run(name, func(t *testing.T) { + bucketKey := newIPAddressBucketKey(NewRegistrationsPerIPAddress, netip.MustParseAddr(testIP)) + limit, err := txnBuilder.getLimit(NewRegistrationsPerIPAddress, bucketKey) + test.AssertNotError(t, err, "should not error") + + // Check on an empty bucket should return the theoretical next state + // of that bucket if the cost were spent. + txn1, err := newTransaction(limit, bucketKey, 1) + test.AssertNotError(t, err, "txn should be valid") + d, err := l.Check(testCtx, txn1) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(19)) + // Verify our ResetIn timing is correct. 1 second == 1000 + // milliseconds and 1000/20 = 50 milliseconds per request. + test.AssertEquals(t, d.resetIn, time.Millisecond*50) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + + // However, that cost should not be spent yet, a 0 cost check should + // tell us that we actually have 20 remaining. + txn0, err := newTransaction(limit, bucketKey, 0) + test.AssertNotError(t, err, "txn should be valid") + d, err = l.Check(testCtx, txn0) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(20)) + test.AssertEquals(t, d.resetIn, time.Duration(0)) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + + // Reset our bucket. + resetBucket(t, l, testCtx, limit, bucketKey) + + // Similar to above, but we'll use Spend() to actually initialize + // the bucket. Spend should return the same result as Check. + d, err = l.Spend(testCtx, txn1) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(19)) + // Verify our ResetIn timing is correct. 1 second == 1000 + // milliseconds and 1000/20 = 50 milliseconds per request. + test.AssertEquals(t, d.resetIn, time.Millisecond*50) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + + // And that cost should have been spent; a 0 cost check should still + // tell us that we have 19 remaining. + d, err = l.Check(testCtx, txn0) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(19)) + // Verify our ResetIn is correct. 1 second == 1000 milliseconds and + // 1000/20 = 50 milliseconds per request. + test.AssertEquals(t, d.resetIn, time.Millisecond*50) + test.AssertEquals(t, d.retryIn, time.Duration(0)) + }) + } +} + +func TestLimiter_DefaultLimits(t *testing.T) { + t.Parallel() + testCtx, limiters, txnBuilder, clk, testIP := setup(t) + for name, l := range limiters { + t.Run(name, func(t *testing.T) { + bucketKey := newIPAddressBucketKey(NewRegistrationsPerIPAddress, netip.MustParseAddr(testIP)) + limit, err := txnBuilder.getLimit(NewRegistrationsPerIPAddress, bucketKey) + test.AssertNotError(t, err, "should not error") + + // Attempt to spend all 20 requests, this should succeed. + txn20, err := newTransaction(limit, bucketKey, 20) + test.AssertNotError(t, err, "txn should be valid") + d, err := l.Spend(testCtx, txn20) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.resetIn, time.Second) + + // Attempting to spend 1 more, this should fail. + txn1, err := newTransaction(limit, bucketKey, 1) + test.AssertNotError(t, err, "txn should be valid") + d, err = l.Spend(testCtx, txn1) + test.AssertNotError(t, err, "should not error") + test.Assert(t, !d.allowed, "should not be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.resetIn, time.Second) + + // Verify our ResetIn is correct. 1 second == 1000 milliseconds and + // 1000/20 = 50 milliseconds per request. + test.AssertEquals(t, d.retryIn, time.Millisecond*50) + + // Wait 50 milliseconds and try again. + clk.Add(d.retryIn) + + // We should be allowed to spend 1 more request. + d, err = l.Spend(testCtx, txn1) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.resetIn, time.Second) + + // Wait 1 second for a full bucket reset. + clk.Add(d.resetIn) + + // Quickly spend 20 requests in a row. + for i := range 20 { + d, err = l.Spend(testCtx, txn1) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(19-i)) + } + + // Attempting to spend 1 more, this should fail. + d, err = l.Spend(testCtx, txn1) + test.AssertNotError(t, err, "should not error") + test.Assert(t, !d.allowed, "should not be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.resetIn, time.Second) + }) + } +} + +func TestLimiter_RefundAndReset(t *testing.T) { + t.Parallel() + testCtx, limiters, txnBuilder, clk, testIP := setup(t) + for name, l := range limiters { + t.Run(name, func(t *testing.T) { + bucketKey := newIPAddressBucketKey(NewRegistrationsPerIPAddress, netip.MustParseAddr(testIP)) + limit, err := txnBuilder.getLimit(NewRegistrationsPerIPAddress, bucketKey) + test.AssertNotError(t, err, "should not error") + + // Attempt to spend all 20 requests, this should succeed. + txn20, err := newTransaction(limit, bucketKey, 20) + test.AssertNotError(t, err, "txn should be valid") + d, err := l.Spend(testCtx, txn20) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.resetIn, time.Second) + + // Refund 10 requests. + txn10, err := newTransaction(limit, bucketKey, 10) + test.AssertNotError(t, err, "txn should be valid") + d, err = l.Refund(testCtx, txn10) + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, d.remaining, int64(10)) + + // Spend 10 requests, this should succeed. + d, err = l.Spend(testCtx, txn10) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.resetIn, time.Second) + + resetBucket(t, l, testCtx, limit, bucketKey) + + // Attempt to spend 20 more requests, this should succeed. + d, err = l.Spend(testCtx, txn20) + test.AssertNotError(t, err, "should not error") + test.Assert(t, d.allowed, "should be allowed") + test.AssertEquals(t, d.remaining, int64(0)) + test.AssertEquals(t, d.resetIn, time.Second) + + // Reset to full. + clk.Add(d.resetIn) + + // Refund 1 requests above our limit, this should fail. + txn1, err := newTransaction(limit, bucketKey, 1) + test.AssertNotError(t, err, "txn should be valid") + d, err = l.Refund(testCtx, txn1) + test.AssertNotError(t, err, "should not error") + test.Assert(t, !d.allowed, "should not be allowed") + test.AssertEquals(t, d.remaining, int64(20)) + + // Spend so we can refund. + _, err = l.Spend(testCtx, txn1) + test.AssertNotError(t, err, "should not error") + + // Refund a spendOnly Transaction, which should succeed. + spendOnlyTxn1, err := newSpendOnlyTransaction(limit, bucketKey, 1) + test.AssertNotError(t, err, "txn should be valid") + _, err = l.Refund(testCtx, spendOnlyTxn1) + test.AssertNotError(t, err, "should not error") + + // Spend so we can refund. + expectedDecision, err := l.Spend(testCtx, txn1) + test.AssertNotError(t, err, "should not error") + + // Refund a checkOnly Transaction, which shouldn't error but should + // return the same TAT as the previous spend. + checkOnlyTxn1, err := newCheckOnlyTransaction(limit, bucketKey, 1) + test.AssertNotError(t, err, "txn should be valid") + newDecision, err := l.Refund(testCtx, checkOnlyTxn1) + test.AssertNotError(t, err, "should not error") + test.AssertEquals(t, newDecision.newTAT, expectedDecision.newTAT) + }) + } +} + +func TestRateLimitError(t *testing.T) { + t.Parallel() + now := clock.NewFake().Now() + + testCases := []struct { + name string + decision *Decision + expectedErr string + expectedErrType berrors.ErrorType + }{ + { + name: "Allowed decision", + decision: &Decision{ + allowed: true, + }, + }, + { + name: "RegistrationsPerIP limit reached", + decision: &Decision{ + allowed: false, + retryIn: 5 * time.Second, + transaction: Transaction{ + limit: &Limit{ + Name: NewRegistrationsPerIPAddress, + Burst: 10, + Period: config.Duration{Duration: time.Hour}, + }, + }, + }, + expectedErr: "too many new registrations (10) from this IP address in the last 1h0m0s, retry after 1970-01-01 00:00:05 UTC: see https://letsencrypt.org/docs/rate-limits/#new-registrations-per-ip-address", + expectedErrType: berrors.RateLimit, + }, + { + name: "RegistrationsPerIPv6Range limit reached", + decision: &Decision{ + allowed: false, + retryIn: 10 * time.Second, + transaction: Transaction{ + limit: &Limit{ + Name: NewRegistrationsPerIPv6Range, + Burst: 5, + Period: config.Duration{Duration: time.Hour}, + }, + }, + }, + expectedErr: "too many new registrations (5) from this /48 subnet of IPv6 addresses in the last 1h0m0s, retry after 1970-01-01 00:00:10 UTC: see https://letsencrypt.org/docs/rate-limits/#new-registrations-per-ipv6-range", + expectedErrType: berrors.RateLimit, + }, + { + name: "NewOrdersPerAccount limit reached", + decision: &Decision{ + allowed: false, + retryIn: 10 * time.Second, + transaction: Transaction{ + limit: &Limit{ + Name: NewOrdersPerAccount, + Burst: 2, + Period: config.Duration{Duration: time.Hour}, + }, + }, + }, + expectedErr: "too many new orders (2) from this account in the last 1h0m0s, retry after 1970-01-01 00:00:10 UTC: see https://letsencrypt.org/docs/rate-limits/#new-orders-per-account", + expectedErrType: berrors.RateLimit, + }, + { + name: "FailedAuthorizationsPerDomainPerAccount limit reached", + decision: &Decision{ + allowed: false, + retryIn: 15 * time.Second, + transaction: Transaction{ + limit: &Limit{ + Name: FailedAuthorizationsPerDomainPerAccount, + Burst: 7, + Period: config.Duration{Duration: time.Hour}, + }, + bucketKey: "4:12345:example.com", + }, + }, + expectedErr: "too many failed authorizations (7) for \"example.com\" in the last 1h0m0s, retry after 1970-01-01 00:00:15 UTC: see https://letsencrypt.org/docs/rate-limits/#authorization-failures-per-identifier-per-account", + expectedErrType: berrors.RateLimit, + }, + { + name: "CertificatesPerDomain limit reached", + decision: &Decision{ + allowed: false, + retryIn: 20 * time.Second, + transaction: Transaction{ + limit: &Limit{ + Name: CertificatesPerDomain, + Burst: 3, + Period: config.Duration{Duration: time.Hour}, + }, + bucketKey: "5:example.org", + }, + }, + expectedErr: "too many certificates (3) already issued for \"example.org\" in the last 1h0m0s, retry after 1970-01-01 00:00:20 UTC: see https://letsencrypt.org/docs/rate-limits/#new-certificates-per-registered-domain", + expectedErrType: berrors.RateLimit, + }, + { + name: "CertificatesPerDomainPerAccount limit reached", + decision: &Decision{ + allowed: false, + retryIn: 20 * time.Second, + transaction: Transaction{ + limit: &Limit{ + Name: CertificatesPerDomainPerAccount, + Burst: 3, + Period: config.Duration{Duration: time.Hour}, + }, + bucketKey: "6:12345678:example.net", + }, + }, + expectedErr: "too many certificates (3) already issued for \"example.net\" in the last 1h0m0s, retry after 1970-01-01 00:00:20 UTC: see https://letsencrypt.org/docs/rate-limits/#new-certificates-per-registered-domain", + expectedErrType: berrors.RateLimit, + }, + { + name: "LimitOverrideRequestsPerIPAddress limit reached", + decision: &Decision{ + allowed: false, + retryIn: 20 * time.Second, + transaction: Transaction{ + limit: &Limit{ + Name: LimitOverrideRequestsPerIPAddress, + Burst: 3, + Period: config.Duration{Duration: time.Hour}, + }, + }, + }, + expectedErr: "too many override request form submissions (3) from this IP address in the last 1h0m0s, retry after 1970-01-01 00:00:20 UTC: see https://letsencrypt.org/docs/rate-limits/#new-registrations-per-ip-address", + expectedErrType: berrors.RateLimit, + }, + { + name: "Unknown rate limit name", + decision: &Decision{ + allowed: false, + retryIn: 30 * time.Second, + transaction: Transaction{ + limit: &Limit{ + Name: 9999999, + }, + }, + }, + expectedErr: "cannot generate error for unknown rate limit", + expectedErrType: berrors.InternalServer, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + err := tc.decision.Result(now) + if tc.expectedErr == "" { + test.AssertNotError(t, err, "expected no error") + } else { + test.AssertError(t, err, "expected an error") + test.AssertEquals(t, err.Error(), tc.expectedErr) + test.AssertErrorIs(t, err, tc.expectedErrType) + } + }) + } +} diff --git a/ratelimits/names.go b/ratelimits/names.go new file mode 100644 index 00000000000..c31dcf94898 --- /dev/null +++ b/ratelimits/names.go @@ -0,0 +1,442 @@ +package ratelimits + +import ( + "fmt" + "net/netip" + "strconv" + "strings" + + "github.com/letsencrypt/boulder/iana" + "github.com/letsencrypt/boulder/identifier" + "github.com/letsencrypt/boulder/policy" +) + +// Name is an enumeration of all rate limit names. It is used to intern rate +// limit names as strings and to provide a type-safe way to refer to rate +// limits. +// +// IMPORTANT: If you add or remove a limit Name, you MUST update: +// - the string representation of the Name in nameToString, +// - the validators for that name in validateIdForName(), +// - the transaction constructors for that name in transaction.go +// - the Subscriber facing error message in Decision.Result(), and +// - the case in BuildBucketKey() for that name. +type Name int + +const ( + // Unknown is the zero value of Name and is used to indicate an unknown + // limit name. + Unknown Name = iota + + // NewRegistrationsPerIPAddress uses bucket key 'enum:ipAddress'. + NewRegistrationsPerIPAddress + + // NewRegistrationsPerIPv6Range uses bucket key 'enum:ipv6rangeCIDR'. The + // address range must be a /48. RFC 3177, which was published in 2001, + // advised operators to allocate a /48 block of IPv6 addresses for most end + // sites. RFC 6177, which was published in 2011 and obsoletes RFC 3177, + // advises allocating a smaller /56 block. We've chosen to use the larger + // /48 block for our IPv6 rate limiting. See: + // 1. https://tools.ietf.org/html/rfc3177#section-3 + // 2. https://datatracker.ietf.org/doc/html/rfc6177#section-2 + NewRegistrationsPerIPv6Range + + // NewOrdersPerAccount uses bucket key 'enum:regId'. + NewOrdersPerAccount + + // FailedAuthorizationsPerDomainPerAccount uses two different bucket keys + // depending on the context: + // - When referenced in an overrides file: uses bucket key 'enum:regId', + // where regId is the ACME registration Id of the account. + // - When referenced in a transaction: uses bucket key + // 'enum:regId:identValue', where regId is the ACME registration Id of + // the account and identValue is the value of an identifier in the + // certificate. + FailedAuthorizationsPerDomainPerAccount + + // CertificatesPerDomain uses bucket key 'enum:domainOrCIDR', where + // domainOrCIDR is a domain name or IP address in the certificate. It uses + // two different IP address formats depending on the context: + // - When referenced in an overrides file: uses a single IP address. + // - When referenced in a transaction: uses an IP address prefix in CIDR + // notation. IPv4 prefixes must be /32, and IPv6 prefixes must be /64. + // In both cases, IPv6 addresses must be the lowest address in their /64; + // i.e. their last 64 bits must be zero. + CertificatesPerDomain + + // CertificatesPerDomainPerAccount is only used for per-account overrides to + // the CertificatesPerDomain rate limit. If this limit is referenced in the + // default limits file, it will be ignored. It uses two different bucket + // keys depending on the context: + // - When referenced in an overrides file: uses bucket key 'enum:regId', + // where regId is the ACME registration Id of the account. + // - When referenced in a transaction: uses bucket key + // 'enum:regId:domainOrCIDR', where regId is the ACME registration Id of + // the account and domainOrCIDR is either a domain name in the + // certificate or an IP prefix in CIDR notation. + // - IP address formats vary by context, as for CertificatesPerDomain. + // + // When overrides to the CertificatesPerDomainPerAccount are configured for a + // subscriber, the cost: + // - MUST be consumed from each CertificatesPerDomainPerAccount bucket and + // - SHOULD be consumed from each CertificatesPerDomain bucket, if possible. + CertificatesPerDomainPerAccount + + // CertificatesPerFQDNSet uses bucket key 'enum:fqdnSet', where fqdnSet is a + // hashed set of unique identifier values in the certificate. + // + // Note: When this is referenced in an overrides file, the fqdnSet MUST be + // passed as a comma-separated list of identifier values. + CertificatesPerFQDNSet + + // FailedAuthorizationsForPausingPerDomainPerAccount is similar to + // FailedAuthorizationsPerDomainPerAccount in that it uses two different + // bucket keys depending on the context: + // - When referenced in an overrides file: uses bucket key 'enum:regId', + // where regId is the ACME registration Id of the account. + // - When referenced in a transaction: uses bucket key + // 'enum:regId:identValue', where regId is the ACME registration Id of + // the account and identValue is the value of an identifier in the + // certificate. + FailedAuthorizationsForPausingPerDomainPerAccount + + // LimitOverrideRequestsPerIPAddress is used to limit the number of requests + // to the rate limit override request endpoint per IP address. It uses + // bucket key 'enum:ipAddress'. + LimitOverrideRequestsPerIPAddress +) + +// nameToString is a map of Name values to string names. +var nameToString = map[Name]string{ + Unknown: "Unknown", + NewRegistrationsPerIPAddress: "NewRegistrationsPerIPAddress", + NewRegistrationsPerIPv6Range: "NewRegistrationsPerIPv6Range", + NewOrdersPerAccount: "NewOrdersPerAccount", + FailedAuthorizationsPerDomainPerAccount: "FailedAuthorizationsPerDomainPerAccount", + CertificatesPerDomain: "CertificatesPerDomain", + CertificatesPerDomainPerAccount: "CertificatesPerDomainPerAccount", + CertificatesPerFQDNSet: "CertificatesPerFQDNSet", + FailedAuthorizationsForPausingPerDomainPerAccount: "FailedAuthorizationsForPausingPerDomainPerAccount", + LimitOverrideRequestsPerIPAddress: "LimitOverrideRequestsPerIPAddress", +} + +// isValid returns true if the Name is a valid rate limit name. +func (n Name) isValid() bool { + return n > Unknown && n < Name(len(nameToString)) +} + +// String returns the string representation of the Name. It allows Name to +// satisfy the fmt.Stringer interface. +func (n Name) String() string { + if !n.isValid() { + return nameToString[Unknown] + } + return nameToString[n] +} + +// EnumString returns the string representation of the Name enumeration. +func (n Name) EnumString() string { + if !n.isValid() { + return nameToString[Unknown] + } + return strconv.Itoa(int(n)) +} + +// validIPAddress validates that the provided string is a valid IP address. +func validIPAddress(id string) error { + ip, err := netip.ParseAddr(id) + if err != nil { + return fmt.Errorf("invalid IP address, %q must be an IP address", id) + } + canon := ip.String() + if canon != id { + return fmt.Errorf( + "invalid IP address, %q must be in canonical form (%q)", id, canon) + } + return iana.IsReservedAddr(ip) +} + +// validIPv6RangeCIDR validates that the provided string is formatted as an IPv6 +// prefix in CIDR notation, with a /48 mask. +func validIPv6RangeCIDR(id string) error { + prefix, err := netip.ParsePrefix(id) + if err != nil { + return fmt.Errorf( + "invalid CIDR, %q must be an IPv6 CIDR range", id) + } + if prefix.Bits() != 48 { + // This also catches the case where the range is an IPv4 CIDR, since an + // IPv4 CIDR can't have a /48 subnet mask - the maximum is /32. + return fmt.Errorf( + "invalid CIDR, %q must be /48", id) + } + canon := prefix.Masked().String() + if canon != id { + return fmt.Errorf( + "invalid CIDR, %q must be in canonical form (%q)", id, canon) + } + return iana.IsReservedPrefix(prefix) +} + +// validateRegId validates that the provided string is a valid ACME regId. +func validateRegId(id string) error { + _, err := strconv.ParseUint(id, 10, 64) + if err != nil { + return fmt.Errorf("invalid regId, %q must be an ACME registration Id", id) + } + return nil +} + +// validateRegIdIdentValue validates that the provided string is formatted +// 'regId:identValue', where regId is an ACME registration Id and identValue is +// a valid identifier value. +func validateRegIdIdentValue(id string) error { + regIdIdentValue := strings.Split(id, ":") + if len(regIdIdentValue) != 2 { + return fmt.Errorf( + "invalid regId:identValue, %q must be formatted 'regId:identValue'", id) + } + err := validateRegId(regIdIdentValue[0]) + if err != nil { + return fmt.Errorf( + "invalid regId, %q must be formatted 'regId:identValue'", id) + } + domainErr := policy.ValidDomain(regIdIdentValue[1]) + if domainErr != nil { + ipErr := policy.ValidIP(regIdIdentValue[1]) + if ipErr != nil { + return fmt.Errorf("invalid identValue, %q must be formatted 'regId:identValue': %w as domain, %w as IP", id, domainErr, ipErr) + } + } + return nil +} + +// validateDomainOrCIDR validates that the provided string is either a domain +// name or an IP address. IPv6 addresses must be the lowest address in their +// /64, i.e. their last 64 bits must be zero. +func validateDomainOrCIDR(limit Name, id string) error { + domainErr := policy.ValidDomain(id) + if domainErr == nil { + // This is a valid domain. + return nil + } + + ip, ipErr := netip.ParseAddr(id) + if ipErr != nil { + return fmt.Errorf("%q is neither a domain (%w) nor an IP address (%w)", id, domainErr, ipErr) + } + + if ip.String() != id { + return fmt.Errorf("invalid IP address %q, must be in canonical form (%q)", id, ip.String()) + } + + prefix, prefixErr := coveringIPPrefix(limit, ip) + if prefixErr != nil { + return fmt.Errorf("invalid IP address %q, couldn't determine prefix: %w", id, prefixErr) + } + if prefix.Addr() != ip { + return fmt.Errorf("invalid IP address %q, must be the lowest address in its prefix (%q)", id, prefix.Addr().String()) + } + return iana.IsReservedPrefix(prefix) +} + +// validateRegIdDomainOrCIDR validates that the provided string is formatted +// 'regId:domainOrCIDR', where domainOrCIDR is either a domain name or an IP +// address. IPv6 addresses must be the lowest address in their /64, i.e. their +// last 64 bits must be zero. +func validateRegIdDomainOrCIDR(limit Name, id string) error { + regIdDomainOrCIDR := strings.Split(id, ":") + if len(regIdDomainOrCIDR) != 2 { + return fmt.Errorf( + "invalid regId:domainOrCIDR, %q must be formatted 'regId:domainOrCIDR'", id) + } + err := validateRegId(regIdDomainOrCIDR[0]) + if err != nil { + return fmt.Errorf( + "invalid regId, %q must be formatted 'regId:domainOrCIDR'", id) + } + err = validateDomainOrCIDR(limit, regIdDomainOrCIDR[1]) + if err != nil { + return fmt.Errorf("invalid domainOrCIDR, %q must be formatted 'regId:domainOrCIDR': %w", id, err) + } + return nil +} + +// validateFQDNSet validates that the provided string is formatted 'fqdnSet', +// where fqdnSet is a comma-separated list of identifier values. +func validateFQDNSet(id string) error { + values := strings.Split(id, ",") + if len(values) == 0 { + return fmt.Errorf( + "invalid fqdnSet, %q must be formatted 'fqdnSet'", id) + } + for _, value := range values { + domainErr := policy.ValidDomain(value) + if domainErr != nil { + ipErr := policy.ValidIP(value) + if ipErr != nil { + return fmt.Errorf("invalid fqdnSet member %q: %w as domain, %w as IP", id, domainErr, ipErr) + } + } + } + return nil +} + +func validateIdForName(name Name, id string) error { + switch name { + case NewRegistrationsPerIPAddress, LimitOverrideRequestsPerIPAddress: + // 'enum:ipaddress' + return validIPAddress(id) + + case NewRegistrationsPerIPv6Range: + // 'enum:ipv6rangeCIDR' + return validIPv6RangeCIDR(id) + + case NewOrdersPerAccount: + // 'enum:regId' + return validateRegId(id) + + case FailedAuthorizationsPerDomainPerAccount: + if strings.Contains(id, ":") { + // 'enum:regId:identValue' for transaction + return validateRegIdIdentValue(id) + } else { + // 'enum:regId' for overrides + return validateRegId(id) + } + + case CertificatesPerDomainPerAccount: + if strings.Contains(id, ":") { + // 'enum:regId:domainOrCIDR' for transaction + return validateRegIdDomainOrCIDR(name, id) + } else { + // 'enum:regId' for overrides + return validateRegId(id) + } + + case CertificatesPerDomain: + // 'enum:domainOrCIDR' + return validateDomainOrCIDR(name, id) + + case CertificatesPerFQDNSet: + // 'enum:fqdnSet' + return validateFQDNSet(id) + + case FailedAuthorizationsForPausingPerDomainPerAccount: + if strings.Contains(id, ":") { + // 'enum:regId:identValue' for transaction + return validateRegIdIdentValue(id) + } else { + // 'enum:regId' for overrides + return validateRegId(id) + } + + case Unknown: + fallthrough + + default: + // This should never happen. + return fmt.Errorf("unknown limit enum %q", name) + } +} + +// StringToName is a map of string names to Name values. +var StringToName = func() map[string]Name { + m := make(map[string]Name, len(nameToString)) + for k, v := range nameToString { + m[v] = k + } + return m +}() + +// LimitNames is a slice of all rate limit names. +var LimitNames = func() []string { + names := make([]string, 0, len(nameToString)) + for _, v := range nameToString { + names = append(names, v) + } + return names +}() + +// BuildBucketKey builds a bucketKey for the given rate limit name from the +// provided components. It returns an error if the name is not valid or if the +// components are not valid for the given name. +func BuildBucketKey(name Name, regId int64, singleIdent identifier.ACMEIdentifier, setOfIdents identifier.ACMEIdentifiers, subscriberIP netip.Addr) (string, error) { + makeMissingErr := func(field string) error { + return fmt.Errorf("%s is required for limit %s (enum: %s)", field, name, name.EnumString()) + } + + switch name { + case NewRegistrationsPerIPAddress, LimitOverrideRequestsPerIPAddress: + if !subscriberIP.IsValid() { + return "", makeMissingErr("subscriberIP") + } + return newIPAddressBucketKey(name, subscriberIP), nil + + case NewRegistrationsPerIPv6Range: + if !subscriberIP.IsValid() { + return "", makeMissingErr("subscriberIP") + } + prefix, err := coveringIPPrefix(name, subscriberIP) + if err != nil { + return "", err + } + return newIPv6RangeCIDRBucketKey(name, prefix), nil + + case NewOrdersPerAccount: + if regId == 0 { + return "", makeMissingErr("regId") + } + return newRegIdBucketKey(name, regId), nil + + case CertificatesPerDomain: + if singleIdent.Value == "" { + return "", makeMissingErr("singleIdent") + } + coveringIdent, err := coveringIdentifier(name, singleIdent) + if err != nil { + return "", err + } + return newDomainOrCIDRBucketKey(name, coveringIdent), nil + + case CertificatesPerDomainPerAccount: + if singleIdent.Value != "" { + if regId == 0 { + return "", makeMissingErr("regId") + } + // Default: use 'enum:regId:identValue' bucket key format. + coveringIdent, err := coveringIdentifier(name, singleIdent) + if err != nil { + return "", err + } + return newRegIdIdentValueBucketKey(name, regId, coveringIdent), nil + } + if regId == 0 { + return "", makeMissingErr("regId") + } + // Override: use 'enum:regId' bucket key format. + return newRegIdBucketKey(name, regId), nil + + case CertificatesPerFQDNSet: + if len(setOfIdents) == 0 { + return "", makeMissingErr("setOfIdents") + } + return newFQDNSetBucketKey(name, setOfIdents), nil + + case FailedAuthorizationsPerDomainPerAccount, FailedAuthorizationsForPausingPerDomainPerAccount: + if singleIdent.Value != "" { + if regId == 0 { + return "", makeMissingErr("regId") + } + // Default: use 'enum:regId:identValue' bucket key format. + return newRegIdIdentValueBucketKey(name, regId, singleIdent.Value), nil + } + if regId == 0 { + return "", makeMissingErr("regId") + } + // Override: use 'enum:regId' bucket key format. + return newRegIdBucketKey(name, regId), nil + } + + return "", fmt.Errorf("unknown limit enum %s", name.EnumString()) +} diff --git a/ratelimits/names_test.go b/ratelimits/names_test.go new file mode 100644 index 00000000000..1c65c936ab7 --- /dev/null +++ b/ratelimits/names_test.go @@ -0,0 +1,497 @@ +package ratelimits + +import ( + "fmt" + "net/netip" + "strings" + "testing" + + "github.com/letsencrypt/boulder/identifier" + "github.com/letsencrypt/boulder/test" +) + +func TestNameIsValid(t *testing.T) { + t.Parallel() + type args struct { + name Name + } + tests := []struct { + name string + args args + want bool + }{ + {name: "Unknown", args: args{name: Unknown}, want: false}, + {name: "9001", args: args{name: 9001}, want: false}, + {name: "NewRegistrationsPerIPAddress", args: args{name: NewRegistrationsPerIPAddress}, want: true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.args.name.isValid() + test.AssertEquals(t, tt.want, got) + }) + } +} + +func TestValidateIdForName(t *testing.T) { + t.Parallel() + + testCases := []struct { + limit Name + desc string + id string + err string + }{ + { + limit: NewRegistrationsPerIPAddress, + desc: "valid IPv4 address", + id: "64.112.117.1", + }, + { + limit: NewRegistrationsPerIPAddress, + desc: "reserved IPv4 address", + id: "10.0.0.1", + err: "in a reserved address block", + }, + { + limit: NewRegistrationsPerIPAddress, + desc: "valid IPv6 address", + id: "2602:80a:6000::42:42", + }, + { + limit: NewRegistrationsPerIPAddress, + desc: "IPv6 address in non-canonical form", + id: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + err: "must be in canonical form", + }, + { + limit: NewRegistrationsPerIPAddress, + desc: "empty string", + id: "", + err: "must be an IP address", + }, + { + limit: NewRegistrationsPerIPAddress, + desc: "one space", + id: " ", + err: "must be an IP address", + }, + { + limit: NewRegistrationsPerIPAddress, + desc: "invalid IPv4 address", + id: "10.0.0.9000", + err: "must be an IP address", + }, + { + limit: NewRegistrationsPerIPAddress, + desc: "invalid IPv6 address", + id: "2001:0db8:85a3:0000:0000:8a2e:0370:7334:9000", + err: "must be an IP address", + }, + { + limit: NewRegistrationsPerIPv6Range, + desc: "valid IPv6 address range", + id: "2602:80a:6000::/48", + }, + { + limit: NewRegistrationsPerIPv6Range, + desc: "IPv6 address range in non-canonical form", + id: "2602:080a:6000::/48", + err: "must be in canonical form", + }, + { + limit: NewRegistrationsPerIPv6Range, + desc: "IPv6 address range with low bits set", + id: "2602:080a:6000::1/48", + err: "must be in canonical form", + }, + { + limit: NewRegistrationsPerIPv6Range, + desc: "invalid IPv6 CIDR range", + id: "2001:0db8:0000::/128", + err: "must be /48", + }, + { + limit: NewRegistrationsPerIPv6Range, + desc: "invalid IPv6 CIDR", + id: "2001:0db8:0000::/48/48", + err: "must be an IPv6 CIDR range", + }, + { + limit: NewRegistrationsPerIPv6Range, + desc: "IPv4 CIDR when we expect IPv6 CIDR range", + id: "10.0.0.0/16", + err: "must be /48", + }, + { + limit: NewRegistrationsPerIPv6Range, + desc: "IPv4 CIDR with invalid long mask", + id: "10.0.0.0/48", + err: "must be an IPv6 CIDR range", + }, + { + limit: NewOrdersPerAccount, + desc: "valid regId", + id: "1234567890", + }, + { + limit: NewOrdersPerAccount, + desc: "invalid regId", + id: "lol", + err: "must be an ACME registration Id", + }, + { + limit: FailedAuthorizationsPerDomainPerAccount, + desc: "transaction: valid regId and domain", + id: "12345:example.com", + }, + { + limit: FailedAuthorizationsPerDomainPerAccount, + desc: "transaction: invalid regId", + id: "12ea5:example.com", + err: "invalid regId", + }, + { + limit: FailedAuthorizationsPerDomainPerAccount, + desc: "transaction: invalid domain", + id: "12345:examplecom", + err: "name needs at least one dot", + }, + { + limit: FailedAuthorizationsPerDomainPerAccount, + desc: "override: valid regId", + id: "12345", + }, + { + limit: FailedAuthorizationsPerDomainPerAccount, + desc: "override: invalid regId", + id: "12ea5", + err: "invalid regId", + }, + { + limit: FailedAuthorizationsForPausingPerDomainPerAccount, + desc: "transaction: valid regId and domain", + id: "12345:example.com", + }, + { + limit: FailedAuthorizationsForPausingPerDomainPerAccount, + desc: "transaction: invalid regId", + id: "12ea5:example.com", + err: "invalid regId", + }, + { + limit: FailedAuthorizationsForPausingPerDomainPerAccount, + desc: "transaction: invalid domain", + id: "12345:examplecom", + err: "name needs at least one dot", + }, + { + limit: FailedAuthorizationsForPausingPerDomainPerAccount, + desc: "override: valid regId", + id: "12345", + }, + { + limit: FailedAuthorizationsForPausingPerDomainPerAccount, + desc: "override: invalid regId", + id: "12ea5", + err: "invalid regId", + }, + { + limit: CertificatesPerDomainPerAccount, + desc: "transaction: valid regId and domain", + id: "12345:example.com", + }, + { + limit: CertificatesPerDomainPerAccount, + desc: "transaction: invalid regId", + id: "12ea5:example.com", + err: "invalid regId", + }, + { + limit: CertificatesPerDomainPerAccount, + desc: "transaction: invalid domain", + id: "12345:examplecom", + err: "name needs at least one dot", + }, + { + limit: CertificatesPerDomainPerAccount, + desc: "override: valid regId", + id: "12345", + }, + { + limit: CertificatesPerDomainPerAccount, + desc: "override: invalid regId", + id: "12ea5", + err: "invalid regId", + }, + { + limit: CertificatesPerDomain, + desc: "valid domain", + id: "example.com", + }, + { + limit: CertificatesPerDomain, + desc: "valid IPv4 address", + id: "64.112.117.1", + }, + { + limit: CertificatesPerDomain, + desc: "valid IPv6 address", + id: "2602:80a:6000::", + }, + { + limit: CertificatesPerDomain, + desc: "IPv6 address with subnet", + id: "2602:80a:6000::/64", + err: "nor an IP address", + }, + { + limit: CertificatesPerDomain, + desc: "malformed domain", + id: "example:.com", + err: "name contains an invalid character", + }, + { + limit: CertificatesPerDomain, + desc: "empty domain", + id: "", + err: "Identifier value (name) is empty", + }, + { + limit: CertificatesPerFQDNSet, + desc: "valid fqdnSet containing a single domain", + id: "example.com", + }, + { + limit: CertificatesPerFQDNSet, + desc: "valid fqdnSet containing a single IPv4 address", + id: "64.112.117.1", + }, + { + limit: CertificatesPerFQDNSet, + desc: "valid fqdnSet containing a single IPv6 address", + id: "2602:80a:6000::1", + }, + { + limit: CertificatesPerFQDNSet, + desc: "valid fqdnSet containing multiple domains", + id: "example.com,example.org", + }, + { + limit: CertificatesPerFQDNSet, + desc: "valid fqdnSet containing multiple domains and IPs", + id: "2602:80a:6000::1,64.112.117.1,example.com,example.org", + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s/%s", tc.limit, tc.desc), func(t *testing.T) { + t.Parallel() + err := validateIdForName(tc.limit, tc.id) + if tc.err != "" { + test.AssertError(t, err, "should have failed") + test.AssertContains(t, err.Error(), tc.err) + } else { + test.AssertNotError(t, err, "should have succeeded") + } + }) + } +} + +func TestBuildBucketKey(t *testing.T) { + t.Parallel() + + tests := []struct { + name Name + desc string + regId int64 + singleIdent identifier.ACMEIdentifier + setOfIdents identifier.ACMEIdentifiers + subscriberIP netip.Addr + expectErrContains string + outputTest func(t *testing.T, key string) + }{ + // NewRegistrationsPerIPAddress + { + name: NewRegistrationsPerIPAddress, + desc: "valid subscriber IPv4 address", + subscriberIP: netip.MustParseAddr("1.2.3.4"), + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:1.2.3.4", NewRegistrationsPerIPAddress), key) + }, + }, + { + name: NewRegistrationsPerIPAddress, + desc: "valid subscriber IPv6 address", + subscriberIP: netip.MustParseAddr("2001:db8::1"), + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:2001:db8::1", NewRegistrationsPerIPAddress), key) + }, + }, + // NewRegistrationsPerIPv6Range + { + name: NewRegistrationsPerIPv6Range, + desc: "valid subscriber IPv6 address", + subscriberIP: netip.MustParseAddr("2001:db8:abcd:12::1"), + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:2001:db8:abcd::/48", NewRegistrationsPerIPv6Range), key) + }, + }, + { + name: NewRegistrationsPerIPv6Range, + desc: "subscriber IPv4 given for subscriber IPv6 range limit", + subscriberIP: netip.MustParseAddr("1.2.3.4"), + expectErrContains: "requires an IPv6 address", + }, + + // NewOrdersPerAccount + { + name: NewOrdersPerAccount, + desc: "valid registration ID", + regId: 1337, + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:1337", NewOrdersPerAccount), key) + }, + }, + { + name: NewOrdersPerAccount, + desc: "registration ID missing", + expectErrContains: "regId is required", + }, + + // CertificatesPerDomain + { + name: CertificatesPerDomain, + desc: "DNS identifier to eTLD+1", + singleIdent: identifier.NewDNS("www.example.com"), + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:example.com", CertificatesPerDomain), key) + }, + }, + { + name: CertificatesPerDomain, + desc: "valid IPv4 address used as identifier", + singleIdent: identifier.NewIP(netip.MustParseAddr("5.6.7.8")), + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:5.6.7.8/32", CertificatesPerDomain), key) + }, + }, + { + name: CertificatesPerDomain, + desc: "valid IPv6 address used as identifier", + singleIdent: identifier.NewIP(netip.MustParseAddr("2001:db8::1")), + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:2001:db8::/64", CertificatesPerDomain), key) + }, + }, + { + name: CertificatesPerDomain, + desc: "identifier missing", + expectErrContains: "singleIdent is required", + }, + + // CertificatesPerFQDNSet + { + name: CertificatesPerFQDNSet, + desc: "multiple valid DNS identifiers", + setOfIdents: identifier.NewDNSSlice([]string{"example.com", "example.org"}), + outputTest: func(t *testing.T, key string) { + if !strings.HasPrefix(key, fmt.Sprintf("%d:", CertificatesPerFQDNSet)) { + t.Errorf("expected key to start with %d: got %s", CertificatesPerFQDNSet, key) + } + }, + }, + { + name: CertificatesPerFQDNSet, + desc: "multiple valid DNS and IP identifiers", + setOfIdents: identifier.ACMEIdentifiers{identifier.NewDNS("example.net"), identifier.NewIP(netip.MustParseAddr("5.6.7.8")), identifier.NewIP(netip.MustParseAddr("2001:db8::1"))}, + outputTest: func(t *testing.T, key string) { + if !strings.HasPrefix(key, fmt.Sprintf("%d:", CertificatesPerFQDNSet)) { + t.Errorf("expected key to start with %d: got %s", CertificatesPerFQDNSet, key) + } + }, + }, + { + name: CertificatesPerFQDNSet, + desc: "identifiers missing", + expectErrContains: "setOfIdents is required", + }, + + // CertificatesPerDomainPerAccount + { + name: CertificatesPerDomainPerAccount, + desc: "only registration ID", + regId: 1337, + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:1337", CertificatesPerDomainPerAccount), key) + }, + }, + { + name: CertificatesPerDomainPerAccount, + desc: "registration ID and single DNS identifier provided", + regId: 1337, + singleIdent: identifier.NewDNS("example.com"), + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:1337:example.com", CertificatesPerDomainPerAccount), key) + }, + }, + { + name: CertificatesPerDomainPerAccount, + desc: "single DNS identifier provided without registration ID", + singleIdent: identifier.NewDNS("example.com"), + expectErrContains: "regId is required", + }, + + // FailedAuthorizationsPerDomainPerAccount + { + name: FailedAuthorizationsPerDomainPerAccount, + desc: "registration ID and single DNS identifier", + regId: 1337, + singleIdent: identifier.NewDNS("example.com"), + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:1337:example.com", FailedAuthorizationsPerDomainPerAccount), key) + }, + }, + { + name: FailedAuthorizationsPerDomainPerAccount, + desc: "only registration ID", + regId: 1337, + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:1337", FailedAuthorizationsPerDomainPerAccount), key) + }, + }, + + // FailedAuthorizationsForPausingPerDomainPerAccount + { + name: FailedAuthorizationsForPausingPerDomainPerAccount, + desc: "registration ID and single DNS identifier", + regId: 1337, + singleIdent: identifier.NewDNS("example.com"), + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:1337:example.com", FailedAuthorizationsForPausingPerDomainPerAccount), key) + }, + }, + { + name: FailedAuthorizationsForPausingPerDomainPerAccount, + desc: "only registration ID", + regId: 1337, + outputTest: func(t *testing.T, key string) { + test.AssertEquals(t, fmt.Sprintf("%d:1337", FailedAuthorizationsForPausingPerDomainPerAccount), key) + }, + }, + } + + for _, tc := range tests { + t.Run(fmt.Sprintf("%s/%s", tc.name, tc.desc), func(t *testing.T) { + t.Parallel() + + key, err := BuildBucketKey(tc.name, tc.regId, tc.singleIdent, tc.setOfIdents, tc.subscriberIP) + if tc.expectErrContains != "" { + test.AssertError(t, err, "expected error") + test.AssertContains(t, err.Error(), tc.expectErrContains) + return + } + test.AssertNotError(t, err, "unexpected error") + tc.outputTest(t, key) + }) + } +} diff --git a/ratelimits/source.go b/ratelimits/source.go new file mode 100644 index 00000000000..7e070948218 --- /dev/null +++ b/ratelimits/source.go @@ -0,0 +1,141 @@ +package ratelimits + +import ( + "context" + "fmt" + "maps" + "sync" + "time" +) + +// ErrBucketNotFound indicates that the bucket was not found. +var ErrBucketNotFound = fmt.Errorf("bucket not found") + +// Source is an interface for creating and modifying TATs. +type Source interface { + // BatchSet stores the TATs at the specified bucketKeys (formatted as + // 'name:id'). Implementations MUST ensure non-blocking operations by + // either: + // a) applying a deadline or timeout to the context WITHIN the method, or + // b) guaranteeing the operation will not block indefinitely (e.g. via + // the underlying storage client implementation). + BatchSet(ctx context.Context, bucketKeys map[string]time.Time) error + + // BatchSetNotExisting attempts to set TATs for the specified bucketKeys if + // they do not already exist. Returns a map indicating which keys already + // exist. + BatchSetNotExisting(ctx context.Context, buckets map[string]time.Time) (map[string]bool, error) + + // BatchIncrement updates the TATs for the specified bucketKeys, similar to + // BatchSet. Implementations MUST ensure non-blocking operations by either: + // a) applying a deadline or timeout to the context WITHIN the method, or + // b) guaranteeing the operation will not block indefinitely (e.g. via + // the underlying storage client implementation). + BatchIncrement(ctx context.Context, buckets map[string]increment) error + + // Get retrieves the TAT associated with the specified bucketKey (formatted + // as 'name:id'). Implementations MUST ensure non-blocking operations by + // either: + // a) applying a deadline or timeout to the context WITHIN the method, or + // b) guaranteeing the operation will not block indefinitely (e.g. via + // the underlying storage client implementation). + Get(ctx context.Context, bucketKey string) (time.Time, error) + + // BatchGet retrieves the TATs associated with the specified bucketKeys + // (formatted as 'name:id'). Implementations MUST ensure non-blocking + // operations by either: + // a) applying a deadline or timeout to the context WITHIN the method, or + // b) guaranteeing the operation will not block indefinitely (e.g. via + // the underlying storage client implementation). + BatchGet(ctx context.Context, bucketKeys []string) (map[string]time.Time, error) + + // BatchDelete removes the TATs associated with the specified bucketKeys + // (formatted as 'name:id'). Implementations MUST ensure non-blocking + // operations by either: + // a) applying a deadline or timeout to the context WITHIN the method, or + // b) guaranteeing the operation will not block indefinitely (e.g. via + // the underlying storage client implementation). + BatchDelete(ctx context.Context, bucketKeys []string) error +} + +type increment struct { + cost time.Duration + ttl time.Duration +} + +// inmem is an in-memory implementation of the source interface used for +// testing. +type inmem struct { + sync.RWMutex + m map[string]time.Time +} + +var _ Source = (*inmem)(nil) + +func NewInmemSource() *inmem { + return &inmem{m: make(map[string]time.Time)} +} + +func (in *inmem) BatchSet(_ context.Context, bucketKeys map[string]time.Time) error { + in.Lock() + defer in.Unlock() + maps.Copy(in.m, bucketKeys) + return nil +} + +func (in *inmem) BatchSetNotExisting(_ context.Context, bucketKeys map[string]time.Time) (map[string]bool, error) { + in.Lock() + defer in.Unlock() + alreadyExists := make(map[string]bool, len(bucketKeys)) + for k, v := range bucketKeys { + _, ok := in.m[k] + if ok { + alreadyExists[k] = true + } else { + in.m[k] = v + } + } + return alreadyExists, nil +} + +func (in *inmem) BatchIncrement(_ context.Context, bucketKeys map[string]increment) error { + in.Lock() + defer in.Unlock() + for k, v := range bucketKeys { + in.m[k] = in.m[k].Add(v.cost) + } + return nil +} + +func (in *inmem) Get(_ context.Context, bucketKey string) (time.Time, error) { + in.RLock() + defer in.RUnlock() + tat, ok := in.m[bucketKey] + if !ok { + return time.Time{}, ErrBucketNotFound + } + return tat, nil +} + +func (in *inmem) BatchGet(_ context.Context, bucketKeys []string) (map[string]time.Time, error) { + in.RLock() + defer in.RUnlock() + tats := make(map[string]time.Time, len(bucketKeys)) + for _, k := range bucketKeys { + tat, ok := in.m[k] + if !ok { + continue + } + tats[k] = tat + } + return tats, nil +} + +func (in *inmem) BatchDelete(_ context.Context, bucketKeys []string) error { + in.Lock() + defer in.Unlock() + for _, bucketKey := range bucketKeys { + delete(in.m, bucketKey) + } + return nil +} diff --git a/ratelimits/source_redis.go b/ratelimits/source_redis.go new file mode 100644 index 00000000000..05562bb8869 --- /dev/null +++ b/ratelimits/source_redis.go @@ -0,0 +1,260 @@ +package ratelimits + +import ( + "context" + "errors" + "net" + "time" + + "github.com/jmhodges/clock" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/redis/go-redis/v9" +) + +// Compile-time check that RedisSource implements the source interface. +var _ Source = (*RedisSource)(nil) + +// RedisSource is a ratelimits source backed by sharded Redis. +type RedisSource struct { + client *redis.Ring + clk clock.Clock + latency *prometheus.HistogramVec +} + +// NewRedisSource returns a new Redis backed source using the provided +// *redis.Ring client. +func NewRedisSource(client *redis.Ring, clk clock.Clock, stats prometheus.Registerer) *RedisSource { + latency := promauto.With(stats).NewHistogramVec(prometheus.HistogramOpts{ + Name: "ratelimits_latency", + Help: "Histogram of Redis call latencies labeled by call=[set|get|delete|ping] and result=[success|error]", + // Exponential buckets ranging from 0.0005s to 3s. + Buckets: prometheus.ExponentialBucketsRange(0.0005, 3, 8), + }, []string{"call", "result"}) + + return &RedisSource{ + client: client, + clk: clk, + latency: latency, + } +} + +var errMixedSuccess = errors.New("some keys not found") + +// resultForError returns a string representing the result of the operation +// based on the provided error. +func resultForError(err error) string { + if errors.Is(errMixedSuccess, err) { + // Indicates that some of the keys in a batchset operation were not found. + return "mixedSuccess" + } else if errors.Is(redis.Nil, err) { + // Bucket key does not exist. + return "notFound" + } else if errors.Is(err, context.DeadlineExceeded) { + // Client read or write deadline exceeded. + return "deadlineExceeded" + } else if errors.Is(err, context.Canceled) { + // Caller canceled the operation. + return "canceled" + } + var netErr net.Error + if errors.As(err, &netErr) && netErr.Timeout() { + // Dialer timed out connecting to Redis. + return "timeout" + } + var redisErr redis.Error + if errors.Is(err, redisErr) { + // An internal error was returned by the Redis server. + return "redisError" + } + return "failed" +} + +func (r *RedisSource) observeLatency(call string, latency time.Duration, err error) { + result := "success" + if err != nil { + result = resultForError(err) + } + r.latency.With(prometheus.Labels{"call": call, "result": result}).Observe(latency.Seconds()) +} + +// BatchSet stores TATs at the specified bucketKeys using a pipelined Redis +// Transaction in order to reduce the number of round-trips to each Redis shard. +func (r *RedisSource) BatchSet(ctx context.Context, buckets map[string]time.Time) error { + start := r.clk.Now() + + pipeline := r.client.Pipeline() + for bucketKey, tat := range buckets { + // Set a TTL of TAT + 10 minutes to account for clock skew. + ttl := tat.UTC().Sub(r.clk.Now()) + 10*time.Minute + pipeline.Set(ctx, bucketKey, tat.UTC().UnixNano(), ttl) + } + _, err := pipeline.Exec(ctx) + if err != nil { + r.observeLatency("batchset", r.clk.Since(start), err) + return err + } + + totalLatency := r.clk.Since(start) + + r.observeLatency("batchset", totalLatency, nil) + return nil +} + +// BatchSetNotExisting attempts to set TATs for the specified bucketKeys if they +// do not already exist. Returns a map indicating which keys already existed. +func (r *RedisSource) BatchSetNotExisting(ctx context.Context, buckets map[string]time.Time) (map[string]bool, error) { + start := r.clk.Now() + + pipeline := r.client.Pipeline() + cmds := make(map[string]*redis.BoolCmd, len(buckets)) + for bucketKey, tat := range buckets { + // Set a TTL of TAT + 10 minutes to account for clock skew. + ttl := tat.UTC().Sub(r.clk.Now()) + 10*time.Minute + cmds[bucketKey] = pipeline.SetNX(ctx, bucketKey, tat.UTC().UnixNano(), ttl) + } + _, err := pipeline.Exec(ctx) + if err != nil { + r.observeLatency("batchsetnotexisting", r.clk.Since(start), err) + return nil, err + } + + alreadyExists := make(map[string]bool, len(buckets)) + totalLatency := r.clk.Since(start) + for bucketKey, cmd := range cmds { + success, err := cmd.Result() + if err != nil { + return nil, err + } + if !success { + alreadyExists[bucketKey] = true + } + } + + r.observeLatency("batchsetnotexisting", totalLatency, nil) + return alreadyExists, nil +} + +// BatchIncrement updates TATs for the specified bucketKeys using a pipelined +// Redis Transaction in order to reduce the number of round-trips to each Redis +// shard. +func (r *RedisSource) BatchIncrement(ctx context.Context, buckets map[string]increment) error { + start := r.clk.Now() + + pipeline := r.client.Pipeline() + for bucketKey, incr := range buckets { + pipeline.IncrBy(ctx, bucketKey, incr.cost.Nanoseconds()) + pipeline.Expire(ctx, bucketKey, incr.ttl) + } + _, err := pipeline.Exec(ctx) + if err != nil { + r.observeLatency("batchincrby", r.clk.Since(start), err) + return err + } + + totalLatency := r.clk.Since(start) + r.observeLatency("batchincrby", totalLatency, nil) + return nil +} + +// Get retrieves the TAT at the specified bucketKey. If the bucketKey does not +// exist, ErrBucketNotFound is returned. +func (r *RedisSource) Get(ctx context.Context, bucketKey string) (time.Time, error) { + start := r.clk.Now() + + tatNano, err := r.client.Get(ctx, bucketKey).Int64() + if err != nil { + if errors.Is(err, redis.Nil) { + // Bucket key does not exist. + r.observeLatency("get", r.clk.Since(start), err) + return time.Time{}, ErrBucketNotFound + } + // An error occurred while retrieving the TAT. + r.observeLatency("get", r.clk.Since(start), err) + return time.Time{}, err + } + + r.observeLatency("get", r.clk.Since(start), nil) + return time.Unix(0, tatNano).UTC(), nil +} + +// BatchGet retrieves the TATs at the specified bucketKeys using a pipelined +// Redis Transaction in order to reduce the number of round-trips to each Redis +// shard. If a bucketKey does not exist, it WILL NOT be included in the returned +// map. +func (r *RedisSource) BatchGet(ctx context.Context, bucketKeys []string) (map[string]time.Time, error) { + start := r.clk.Now() + + pipeline := r.client.Pipeline() + for _, bucketKey := range bucketKeys { + pipeline.Get(ctx, bucketKey) + } + results, err := pipeline.Exec(ctx) + if err != nil && !errors.Is(err, redis.Nil) { + r.observeLatency("batchget", r.clk.Since(start), err) + return nil, err + } + + totalLatency := r.clk.Since(start) + + tats := make(map[string]time.Time, len(bucketKeys)) + notFoundCount := 0 + for i, result := range results { + tatNano, err := result.(*redis.StringCmd).Int64() + if err != nil { + if !errors.Is(err, redis.Nil) { + // This should never happen as any errors should have been + // caught after the pipeline.Exec() call. + r.observeLatency("batchget", r.clk.Since(start), err) + return nil, err + } + notFoundCount++ + continue + } + tats[bucketKeys[i]] = time.Unix(0, tatNano).UTC() + } + + var batchErr error + if notFoundCount < len(results) { + // Some keys were not found. + batchErr = errMixedSuccess + } else if notFoundCount == len(results) { + // All keys were not found. + batchErr = redis.Nil + } + + r.observeLatency("batchget", totalLatency, batchErr) + return tats, nil +} + +// BatchDelete deletes the TATs at the specified bucketKeys ('name:id'). A nil +// return value does not indicate that the bucketKeys existed. +func (r *RedisSource) BatchDelete(ctx context.Context, bucketKeys []string) error { + start := r.clk.Now() + + err := r.client.Del(ctx, bucketKeys...).Err() + if err != nil { + r.observeLatency("delete", r.clk.Since(start), err) + return err + } + + r.observeLatency("delete", r.clk.Since(start), nil) + return nil +} + +// Ping checks that each shard of the *redis.Ring is reachable using the PING +// command. +func (r *RedisSource) Ping(ctx context.Context) error { + start := r.clk.Now() + + err := r.client.ForEachShard(ctx, func(ctx context.Context, shard *redis.Client) error { + return shard.Ping(ctx).Err() + }) + if err != nil { + r.observeLatency("ping", r.clk.Since(start), err) + return err + } + + r.observeLatency("ping", r.clk.Since(start), nil) + return nil +} diff --git a/ratelimits/source_redis_test.go b/ratelimits/source_redis_test.go new file mode 100644 index 00000000000..56b7deacd6e --- /dev/null +++ b/ratelimits/source_redis_test.go @@ -0,0 +1,116 @@ +package ratelimits + +import ( + "context" + "testing" + "time" + + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/metrics" + "github.com/letsencrypt/boulder/test" + + "github.com/jmhodges/clock" + "github.com/redis/go-redis/v9" +) + +func newTestRedisSource(clk clock.FakeClock, addrs map[string]string) *RedisSource { + CACertFile := "../test/certs/ipki/minica.pem" + CertFile := "../test/certs/ipki/localhost/cert.pem" + KeyFile := "../test/certs/ipki/localhost/key.pem" + tlsConfig := cmd.TLSConfig{ + CACertFile: CACertFile, + CertFile: CertFile, + KeyFile: KeyFile, + } + tlsConfig2, err := tlsConfig.Load(metrics.NoopRegisterer) + if err != nil { + panic(err) + } + + client := redis.NewRing(&redis.RingOptions{ + Addrs: addrs, + Username: "boulder", + Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d", + TLSConfig: tlsConfig2, + }) + return NewRedisSource(client, clk, metrics.NoopRegisterer) +} + +func newRedisTestLimiter(t *testing.T, clk clock.FakeClock) *Limiter { + return newTestLimiter(t, newTestRedisSource(clk, map[string]string{ + "shard1": "10.77.77.4:4218", + "shard2": "10.77.77.5:4218", + }), clk) +} + +func TestRedisSource_Ping(t *testing.T) { + clk := clock.NewFake() + workingSource := newTestRedisSource(clk, map[string]string{ + "shard1": "10.77.77.4:4218", + "shard2": "10.77.77.5:4218", + }) + + err := workingSource.Ping(context.Background()) + test.AssertNotError(t, err, "Ping should not error") + + missingFirstShardSource := newTestRedisSource(clk, map[string]string{ + "shard1": "10.77.77.4:1337", + "shard2": "10.77.77.5:4218", + }) + + err = missingFirstShardSource.Ping(context.Background()) + test.AssertError(t, err, "Ping should not error") + + missingSecondShardSource := newTestRedisSource(clk, map[string]string{ + "shard1": "10.77.77.4:4218", + "shard2": "10.77.77.5:1337", + }) + + err = missingSecondShardSource.Ping(context.Background()) + test.AssertError(t, err, "Ping should not error") +} + +func TestRedisSource_BatchSetAndGet(t *testing.T) { + clk := clock.NewFake() + s := newTestRedisSource(clk, map[string]string{ + "shard1": "10.77.77.4:4218", + "shard2": "10.77.77.5:4218", + }) + + set := map[string]time.Time{ + "test1": clk.Now().Add(time.Second), + "test2": clk.Now().Add(time.Second * 2), + "test3": clk.Now().Add(time.Second * 3), + } + + incr := map[string]increment{ + "test1": {time.Second, time.Minute}, + "test2": {time.Second * 2, time.Minute}, + "test3": {time.Second * 3, time.Minute}, + } + + err := s.BatchSet(context.Background(), set) + test.AssertNotError(t, err, "BatchSet() should not error") + + got, err := s.BatchGet(context.Background(), []string{"test1", "test2", "test3"}) + test.AssertNotError(t, err, "BatchGet() should not error") + + for k, v := range set { + test.AssertEquals(t, got[k], v) + } + + err = s.BatchIncrement(context.Background(), incr) + test.AssertNotError(t, err, "BatchIncrement() should not error") + + got, err = s.BatchGet(context.Background(), []string{"test1", "test2", "test3"}) + test.AssertNotError(t, err, "BatchGet() should not error") + + for k := range set { + test.AssertEquals(t, got[k], set[k].Add(incr[k].cost)) + } + + // Test that BatchGet() returns a zero time for a key that does not exist. + got, err = s.BatchGet(context.Background(), []string{"test1", "test4", "test3"}) + test.AssertNotError(t, err, "BatchGet() should not error when a key isn't found") + test.Assert(t, got["test4"].IsZero(), "BatchGet() should return a zero time for a key that does not exist") +} diff --git a/ratelimits/source_test.go b/ratelimits/source_test.go new file mode 100644 index 00000000000..a2347c8bc21 --- /dev/null +++ b/ratelimits/source_test.go @@ -0,0 +1,11 @@ +package ratelimits + +import ( + "testing" + + "github.com/jmhodges/clock" +) + +func newInmemTestLimiter(t *testing.T, clk clock.FakeClock) *Limiter { + return newTestLimiter(t, NewInmemSource(), clk) +} diff --git a/ratelimits/testdata/busted_default_burst_0.yml b/ratelimits/testdata/busted_default_burst_0.yml new file mode 100644 index 00000000000..26a2466ad02 --- /dev/null +++ b/ratelimits/testdata/busted_default_burst_0.yml @@ -0,0 +1,4 @@ +NewRegistrationsPerIPAddress: + burst: 0 + count: 20 + period: 1s diff --git a/ratelimits/testdata/busted_default_empty_name.yml b/ratelimits/testdata/busted_default_empty_name.yml new file mode 100644 index 00000000000..981c58536f0 --- /dev/null +++ b/ratelimits/testdata/busted_default_empty_name.yml @@ -0,0 +1,4 @@ +"": + burst: 20 + count: 20 + period: 1s diff --git a/ratelimits/testdata/busted_default_invalid_name.yml b/ratelimits/testdata/busted_default_invalid_name.yml new file mode 100644 index 00000000000..bf41b326d7e --- /dev/null +++ b/ratelimits/testdata/busted_default_invalid_name.yml @@ -0,0 +1,4 @@ +UsageRequestsPerIPv10Address: + burst: 20 + count: 20 + period: 1s diff --git a/ratelimits/testdata/busted_defaults_second_entry_bad_name.yml b/ratelimits/testdata/busted_defaults_second_entry_bad_name.yml new file mode 100644 index 00000000000..cc276a869b9 --- /dev/null +++ b/ratelimits/testdata/busted_defaults_second_entry_bad_name.yml @@ -0,0 +1,8 @@ +NewRegistrationsPerIPAddress: + burst: 20 + count: 20 + period: 1s +UsageRequestsPerIPv10Address: + burst: 20 + count: 20 + period: 1s diff --git a/ratelimits/testdata/busted_override_burst_0.yml b/ratelimits/testdata/busted_override_burst_0.yml new file mode 100644 index 00000000000..9110fc1aaa9 --- /dev/null +++ b/ratelimits/testdata/busted_override_burst_0.yml @@ -0,0 +1,7 @@ +- NewRegistrationsPerIPAddress: + burst: 0 + count: 40 + period: 1s + ids: + - id: 55.66.77.88 + comment: Foo diff --git a/ratelimits/testdata/busted_override_empty_id.yml b/ratelimits/testdata/busted_override_empty_id.yml new file mode 100644 index 00000000000..2db8c8de587 --- /dev/null +++ b/ratelimits/testdata/busted_override_empty_id.yml @@ -0,0 +1,5 @@ +- UsageRequestsPerIPv10Address: + burst: 40 + count: 40 + period: 1s + ids: [] diff --git a/ratelimits/testdata/busted_override_empty_name.yml b/ratelimits/testdata/busted_override_empty_name.yml new file mode 100644 index 00000000000..27825eee5db --- /dev/null +++ b/ratelimits/testdata/busted_override_empty_name.yml @@ -0,0 +1,7 @@ +- "": + burst: 40 + count: 40 + period: 1s + ids: + - id: 10.0.0.2 + comment: Foo diff --git a/ratelimits/testdata/busted_override_invalid_name.yml b/ratelimits/testdata/busted_override_invalid_name.yml new file mode 100644 index 00000000000..6160de758f1 --- /dev/null +++ b/ratelimits/testdata/busted_override_invalid_name.yml @@ -0,0 +1,7 @@ +- UsageRequestsPerIPv10Address: + burst: 40 + count: 40 + period: 1s + ids: + - id: 10.0.0.2 + comment: Foo diff --git a/ratelimits/testdata/busted_overrides_second_entry_bad_name.yml b/ratelimits/testdata/busted_overrides_second_entry_bad_name.yml new file mode 100644 index 00000000000..147ab5b1a9e --- /dev/null +++ b/ratelimits/testdata/busted_overrides_second_entry_bad_name.yml @@ -0,0 +1,14 @@ +- NewRegistrationsPerIPAddress: + burst: 40 + count: 40 + period: 1s + ids: + - id: 10.0.0.2 + comment: Foo +- UsageRequestsPerIPv10Address: + burst: 40 + count: 40 + period: 1s + ids: + - id: 10.0.0.5 + comment: Bar diff --git a/ratelimits/testdata/busted_overrides_third_entry_bad_id.yml b/ratelimits/testdata/busted_overrides_third_entry_bad_id.yml new file mode 100644 index 00000000000..e46b8d690ba --- /dev/null +++ b/ratelimits/testdata/busted_overrides_third_entry_bad_id.yml @@ -0,0 +1,11 @@ +- NewRegistrationsPerIPAddress: + burst: 40 + count: 40 + period: 1s + ids: + - id: 10.0.0.5 + comment: Foo + - id: 10.0.0.2 + comment: Bar + - id: lol + comment: Baz diff --git a/ratelimits/testdata/working_default.yml b/ratelimits/testdata/working_default.yml new file mode 100644 index 00000000000..1c0c63bce5e --- /dev/null +++ b/ratelimits/testdata/working_default.yml @@ -0,0 +1,4 @@ +NewRegistrationsPerIPAddress: + burst: 20 + count: 20 + period: 1s diff --git a/ratelimits/testdata/working_defaults.yml b/ratelimits/testdata/working_defaults.yml new file mode 100644 index 00000000000..be5988b7a2c --- /dev/null +++ b/ratelimits/testdata/working_defaults.yml @@ -0,0 +1,8 @@ +NewRegistrationsPerIPAddress: + burst: 20 + count: 20 + period: 1s +NewRegistrationsPerIPv6Range: + burst: 30 + count: 30 + period: 2s diff --git a/ratelimits/testdata/working_override.yml b/ratelimits/testdata/working_override.yml new file mode 100644 index 00000000000..447658d9a65 --- /dev/null +++ b/ratelimits/testdata/working_override.yml @@ -0,0 +1,7 @@ +- NewRegistrationsPerIPAddress: + burst: 40 + count: 40 + period: 1s + ids: + - id: 64.112.117.1 + comment: Foo diff --git a/ratelimits/testdata/working_override_13371338.yml b/ratelimits/testdata/working_override_13371338.yml new file mode 100644 index 00000000000..97327e510d6 --- /dev/null +++ b/ratelimits/testdata/working_override_13371338.yml @@ -0,0 +1,21 @@ +- CertificatesPerDomainPerAccount: + burst: 1337 + count: 1337 + period: 2160h + ids: + - id: 13371338 + comment: Used to test the TransactionBuilder +- FailedAuthorizationsPerDomainPerAccount: + burst: 1337 + count: 1337 + period: 5m + ids: + - id: 13371338 + comment: Used to test the TransactionBuilder +- FailedAuthorizationsForPausingPerDomainPerAccount: + burst: 1337 + count: 1 + period: 24h + ids: + - id: 13371338 + comment: Used to test the TransactionBuilder diff --git a/ratelimits/testdata/working_override_regid_domainorcidr.yml b/ratelimits/testdata/working_override_regid_domainorcidr.yml new file mode 100644 index 00000000000..81ac3a56147 --- /dev/null +++ b/ratelimits/testdata/working_override_regid_domainorcidr.yml @@ -0,0 +1,7 @@ +- CertificatesPerDomain: + burst: 40 + count: 40 + period: 1s + ids: + - id: example.com + comment: Foo diff --git a/ratelimits/testdata/working_overrides.yml b/ratelimits/testdata/working_overrides.yml new file mode 100644 index 00000000000..be1479f12d5 --- /dev/null +++ b/ratelimits/testdata/working_overrides.yml @@ -0,0 +1,33 @@ +- NewRegistrationsPerIPAddress: + burst: 40 + count: 40 + period: 1s + ids: + - id: 64.112.117.1 + comment: Foo +- NewRegistrationsPerIPv6Range: + burst: 50 + count: 50 + period: 2s + ids: + - id: 2602:80a:6000::/48 + comment: Foo +- FailedAuthorizationsPerDomainPerAccount: + burst: 60 + count: 60 + period: 3s + ids: + - id: 1234 + comment: Foo + - id: 5678 + comment: Foo + +- FailedAuthorizationsForPausingPerDomainPerAccount: + burst: 60 + count: 60 + period: 3s + ids: + - id: 1234 + comment: Foo + - id: 5678 + comment: Foo diff --git a/ratelimits/testdata/working_overrides_regid_fqdnset.yml b/ratelimits/testdata/working_overrides_regid_fqdnset.yml new file mode 100644 index 00000000000..ef98663fb78 --- /dev/null +++ b/ratelimits/testdata/working_overrides_regid_fqdnset.yml @@ -0,0 +1,28 @@ +- CertificatesPerFQDNSet: + burst: 40 + count: 40 + period: 1s + ids: + - id: example.com + comment: Foo +- CertificatesPerFQDNSet: + burst: 50 + count: 50 + period: 2s + ids: + - id: "example.com,example.net" + comment: Foo +- CertificatesPerFQDNSet: + burst: 60 + count: 60 + period: 3s + ids: + - id: "example.com,example.net,example.org" + comment: Foo +- CertificatesPerFQDNSet: + burst: 60 + count: 60 + period: 4s + ids: + - id: "2602:80a:6000::1,9.9.9.9,example.com" + comment: Foo diff --git a/ratelimits/transaction.go b/ratelimits/transaction.go new file mode 100644 index 00000000000..877198ad3bc --- /dev/null +++ b/ratelimits/transaction.go @@ -0,0 +1,758 @@ +package ratelimits + +import ( + "context" + "errors" + "fmt" + "io" + "net/netip" + "strconv" + "time" + + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/core" + "github.com/letsencrypt/boulder/identifier" + blog "github.com/letsencrypt/boulder/log" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +// ErrInvalidCost indicates that the cost specified was < 0. +var ErrInvalidCost = fmt.Errorf("invalid cost, must be >= 0") + +// ErrInvalidCostOverLimit indicates that the cost specified was > limit.Burst. +var ErrInvalidCostOverLimit = fmt.Errorf("invalid cost, must be <= limit.Burst") + +// newIPAddressBucketKey returns a bucketKey for limits that use +// the 'enum:ipAddress' bucket key format. +func newIPAddressBucketKey(name Name, ip netip.Addr) string { + return joinWithColon(name.EnumString(), ip.String()) +} + +// newIPv6RangeCIDRBucketKey returns a bucketKey for limits that +// use the 'enum:ipv6RangeCIDR' bucket key format. +func newIPv6RangeCIDRBucketKey(name Name, prefix netip.Prefix) string { + return joinWithColon(name.EnumString(), prefix.String()) +} + +// newRegIdBucketKey returns a bucketKey for limits that use the +// 'enum:regId' bucket key format. +func newRegIdBucketKey(name Name, regId int64) string { + return joinWithColon(name.EnumString(), strconv.FormatInt(regId, 10)) +} + +// newDomainOrCIDRBucketKey returns a bucketKey for limits that use +// the 'enum:domainOrCIDR' bucket key formats. +func newDomainOrCIDRBucketKey(name Name, domainOrCIDR string) string { + return joinWithColon(name.EnumString(), domainOrCIDR) +} + +// newRegIdIdentValueBucketKey returns a bucketKey for limits that use the +// 'enum:regId:identValue' bucket key format. +func newRegIdIdentValueBucketKey(name Name, regId int64, orderIdent string) string { + return joinWithColon(name.EnumString(), strconv.FormatInt(regId, 10), orderIdent) +} + +// newFQDNSetBucketKey validates and returns a bucketKey for limits that use the +// 'enum:fqdnSet' bucket key format. +func newFQDNSetBucketKey(name Name, orderIdents identifier.ACMEIdentifiers) string { + return joinWithColon(name.EnumString(), fmt.Sprintf("%x", core.HashIdentifiers(orderIdents))) +} + +// Transaction represents a single rate limit operation. It includes a +// bucketKey, which combines the specific rate limit enum with a unique +// identifier to form the key where the state of the "bucket" can be referenced +// or stored by the Limiter, the rate limit being enforced, a cost which MUST be +// >= 0, and check/spend fields, which indicate how the Transaction should be +// processed. The following are acceptable combinations of check/spend: +// - check-and-spend: when check and spend are both true, the cost will be +// checked against the bucket's capacity and spent/refunded, when possible. +// - check-only: when only check is true, the cost will be checked against the +// bucket's capacity, but will never be spent/refunded. +// - spend-only: when only spend is true, spending is best-effort. Regardless +// of the bucket's capacity, the transaction will be considered "allowed". +// - reset-only: when reset is true, the bucket will be reset to full capacity. +// - allow-only: when neither check nor spend are true, the transaction will +// be considered "allowed" regardless of the bucket's capacity. This is +// useful for limits that are disabled. +// +// The zero value of Transaction is an allow-only transaction and is valid even if +// it would fail validateTransaction (for instance because cost and burst are zero). +type Transaction struct { + bucketKey string + limit *Limit + cost int64 + check bool + spend bool + reset bool +} + +func (txn Transaction) checkOnly() bool { + return txn.check && !txn.spend && !txn.reset +} + +func (txn Transaction) spendOnly() bool { + return txn.spend && !txn.check && !txn.reset +} + +func (txn Transaction) allowOnly() bool { + return !txn.check && !txn.spend && !txn.reset +} + +func (txn Transaction) resetOnly() bool { + return txn.reset && !txn.check && !txn.spend +} + +func validateTransaction(txn Transaction) (Transaction, error) { + if txn.limit == nil { + return Transaction{}, fmt.Errorf("invalid limit, must not be nil") + } + if txn.reset { + if txn.check || txn.spend { + return Transaction{}, fmt.Errorf("invalid reset transaction, check and spend must be false") + } + if txn.limit.Burst == 0 { + return Transaction{}, fmt.Errorf("invalid limit, burst must be > 0") + } + return txn, nil + } + if txn.cost < 0 { + return Transaction{}, ErrInvalidCost + } + if txn.limit.Burst == 0 { + // This should never happen. If the limit was loaded from a file, + // Burst was validated then. If this is a zero-valued Transaction + // (that is, an allow-only transaction), then validateTransaction + // shouldn't be called because zero-valued transactions are automatically + // valid. + return Transaction{}, fmt.Errorf("invalid limit, burst must be > 0") + } + if txn.cost > txn.limit.Burst { + return Transaction{}, ErrInvalidCostOverLimit + } + return txn, nil +} + +func newTransaction(limit *Limit, bucketKey string, cost int64) (Transaction, error) { + return validateTransaction(Transaction{ + bucketKey: bucketKey, + limit: limit, + cost: cost, + check: true, + spend: true, + }) +} + +func newCheckOnlyTransaction(limit *Limit, bucketKey string, cost int64) (Transaction, error) { + return validateTransaction(Transaction{ + bucketKey: bucketKey, + limit: limit, + cost: cost, + check: true, + }) +} + +func newSpendOnlyTransaction(limit *Limit, bucketKey string, cost int64) (Transaction, error) { + return validateTransaction(Transaction{ + bucketKey: bucketKey, + limit: limit, + cost: cost, + spend: true, + }) +} + +func newResetTransaction(limit *Limit, bucketKey string) (Transaction, error) { + return validateTransaction(Transaction{ + bucketKey: bucketKey, + limit: limit, + reset: true, + }) +} + +func newAllowOnlyTransaction() Transaction { + // Zero values are sufficient. + return Transaction{} +} + +// TransactionBuilder is used to build Transactions for various rate limits. +// Each rate limit has a corresponding method that returns a Transaction for +// that limit. Call NewTransactionBuilder to create a new *TransactionBuilder. +type TransactionBuilder struct { + *limitRegistry +} + +func (builder *TransactionBuilder) Ready() bool { + return builder.limitRegistry.overridesLoaded +} + +// GetOverridesFunc is used to pass in the sa.GetEnabledRateLimitOverrides +// method to NewTransactionBuilderFromDatabase, rather than storing a full +// sa.SQLStorageAuthority. This makes testing significantly simpler. +type GetOverridesFunc func(context.Context, *emptypb.Empty, ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.RateLimitOverrideResponse], error) + +// NewTransactionBuilderFromDatabase returns a new *TransactionBuilder. The +// provided defaults path is expected to be a path to a YAML file that contains +// the default limits. The provided overrides function is expected to be an SA's +// GetEnabledRateLimitOverrides. Both are required. +func NewTransactionBuilderFromDatabase(defaults string, overrides GetOverridesFunc, stats prometheus.Registerer, logger blog.Logger) (*TransactionBuilder, error) { + defaultsData, err := loadDefaultsFromFile(defaults) + if err != nil { + return nil, err + } + + refresher := func(ctx context.Context, errorGauge prometheus.Gauge, logger blog.Logger) (Limits, error) { + ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + defer cancel() + + stream, err := overrides(ctx, &emptypb.Empty{}) + if err != nil { + return nil, fmt.Errorf("fetching enabled overrides: %w", err) + } + + overrides := make(Limits) + var errorCount float64 + for { + resp, err := stream.Recv() + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("reading overrides stream: %w", err) + } + + override := &Limit{ + Burst: resp.Override.Burst, + Count: resp.Override.Count, + Period: config.Duration{Duration: resp.Override.Period.AsDuration()}, + Name: Name(resp.Override.LimitEnum), + isOverride: true, + } + + err = ValidateLimit(override) + if err != nil { + logger.Errf("hydrating %s override with key %q: %s", override.Name.String(), resp.Override.BucketKey, err) + errorCount++ + continue + } + + overrides[resp.Override.BucketKey] = override + } + errorGauge.Set(errorCount) + return overrides, nil + } + + return NewTransactionBuilder(defaultsData, refresher, stats, logger) +} + +// NewTransactionBuilderFromFiles returns a new *TransactionBuilder. The +// provided defaults and overrides paths are expected to be paths to YAML files +// that contain the default and override limits, respectively. Overrides is +// optional, defaults is required. +func NewTransactionBuilderFromFiles(defaults string, overrides string, stats prometheus.Registerer, logger blog.Logger) (*TransactionBuilder, error) { + defaultsData, err := loadDefaultsFromFile(defaults) + if err != nil { + return nil, err + } + + if overrides == "" { + return NewTransactionBuilder(defaultsData, nil, stats, logger) + } + + refresher := func(ctx context.Context, _ prometheus.Gauge, _ blog.Logger) (Limits, error) { + overridesData, err := loadOverridesFromFile(overrides) + if err != nil { + return nil, err + } + return parseOverrideLimits(overridesData) + } + + return NewTransactionBuilder(defaultsData, refresher, stats, logger) +} + +// NewTransactionBuilder returns a new *TransactionBuilder. A defaults map is +// required. +func NewTransactionBuilder(defaultConfigs LimitConfigs, refresher OverridesRefresher, stats prometheus.Registerer, logger blog.Logger) (*TransactionBuilder, error) { + defaults, err := parseDefaultLimits(defaultConfigs) + if err != nil { + return nil, err + } + + if refresher == nil { + refresher = func(context.Context, prometheus.Gauge, blog.Logger) (Limits, error) { + return nil, nil + } + } + + overridesTimestamp := promauto.With(stats).NewGauge(prometheus.GaugeOpts{ + Namespace: "ratelimits", + Subsystem: "overrides", + Name: "timestamp_seconds", + Help: "A gauge with the last timestamp when overrides were successfully loaded", + }) + + overridesErrors := promauto.With(stats).NewGauge(prometheus.GaugeOpts{ + Namespace: "ratelimits", + Subsystem: "overrides", + Name: "errors", + Help: "A gauge with the number of errors while last trying to load overrides", + }) + + overridesPerLimit := promauto.With(stats).NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "ratelimits", + Subsystem: "overrides", + Name: "active", + Help: "A gauge with the number of overrides, partitioned by rate limit", + }, []string{"limit"}) + + registry := &limitRegistry{ + defaults: defaults, + refreshOverrides: refresher, + logger: logger, + + overridesTimestamp: overridesTimestamp, + overridesErrors: overridesErrors, + overridesPerLimit: *overridesPerLimit, + } + + return &TransactionBuilder{registry}, nil +} + +// registrationsPerIPAddressTransaction returns a Transaction for the +// NewRegistrationsPerIPAddress limit for the provided IP address. +func (builder *TransactionBuilder) registrationsPerIPAddressTransaction(ip netip.Addr) (Transaction, error) { + bucketKey := newIPAddressBucketKey(NewRegistrationsPerIPAddress, ip) + limit, err := builder.getLimit(NewRegistrationsPerIPAddress, bucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + return newAllowOnlyTransaction(), nil + } + return Transaction{}, err + } + return newTransaction(limit, bucketKey, 1) +} + +// registrationsPerIPv6RangeTransaction returns a Transaction for the +// NewRegistrationsPerIPv6Range limit for the /48 IPv6 range which contains the +// provided IPv6 address. +func (builder *TransactionBuilder) registrationsPerIPv6RangeTransaction(ip netip.Addr) (Transaction, error) { + prefix, err := coveringIPPrefix(NewRegistrationsPerIPv6Range, ip) + if err != nil { + return Transaction{}, fmt.Errorf("computing covering prefix for %q: %w", ip, err) + } + bucketKey := newIPv6RangeCIDRBucketKey(NewRegistrationsPerIPv6Range, prefix) + + limit, err := builder.getLimit(NewRegistrationsPerIPv6Range, bucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + return newAllowOnlyTransaction(), nil + } + return Transaction{}, err + } + return newTransaction(limit, bucketKey, 1) +} + +// ordersPerAccountTransaction returns a Transaction for the NewOrdersPerAccount +// limit for the provided ACME registration Id. +func (builder *TransactionBuilder) ordersPerAccountTransaction(regId int64) (Transaction, error) { + bucketKey := newRegIdBucketKey(NewOrdersPerAccount, regId) + limit, err := builder.getLimit(NewOrdersPerAccount, bucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + return newAllowOnlyTransaction(), nil + } + return Transaction{}, err + } + return newTransaction(limit, bucketKey, 1) +} + +// FailedAuthorizationsPerDomainPerAccountCheckOnlyTransactions returns a slice +// of Transactions for the provided order identifiers. An error is returned if +// any of the order identifiers' values are invalid. This method should be used +// for checking capacity, before allowing more authorizations to be created. +// +// Precondition: len(orderIdents) < maxNames. +func (builder *TransactionBuilder) FailedAuthorizationsPerDomainPerAccountCheckOnlyTransactions(regId int64, orderIdents identifier.ACMEIdentifiers) ([]Transaction, error) { + // FailedAuthorizationsPerDomainPerAccount limit uses the 'enum:regId' + // bucket key format for overrides. + perAccountBucketKey := newRegIdBucketKey(FailedAuthorizationsPerDomainPerAccount, regId) + limit, err := builder.getLimit(FailedAuthorizationsPerDomainPerAccount, perAccountBucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + return []Transaction{newAllowOnlyTransaction()}, nil + } + return nil, err + } + + var txns []Transaction + for _, ident := range orderIdents { + // FailedAuthorizationsPerDomainPerAccount limit uses the + // 'enum:regId:identValue' bucket key format for transactions. + perIdentValuePerAccountBucketKey := newRegIdIdentValueBucketKey(FailedAuthorizationsPerDomainPerAccount, regId, ident.Value) + + // Add a check-only transaction for each per identValue per account + // bucket. + txn, err := newCheckOnlyTransaction(limit, perIdentValuePerAccountBucketKey, 1) + if err != nil { + return nil, err + } + txns = append(txns, txn) + } + return txns, nil +} + +// FailedAuthorizationsPerDomainPerAccountSpendOnlyTransaction returns a spend- +// only Transaction for the provided order identifier. An error is returned if +// the order identifier's value is invalid. This method should be used for +// spending capacity, as a result of a failed authorization. +func (builder *TransactionBuilder) FailedAuthorizationsPerDomainPerAccountSpendOnlyTransaction(regId int64, orderIdent identifier.ACMEIdentifier) (Transaction, error) { + // FailedAuthorizationsPerDomainPerAccount limit uses the 'enum:regId' + // bucket key format for overrides. + perAccountBucketKey := newRegIdBucketKey(FailedAuthorizationsPerDomainPerAccount, regId) + limit, err := builder.getLimit(FailedAuthorizationsPerDomainPerAccount, perAccountBucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + return newAllowOnlyTransaction(), nil + } + return Transaction{}, err + } + + // FailedAuthorizationsPerDomainPerAccount limit uses the + // 'enum:regId:identValue' bucket key format for transactions. + perIdentValuePerAccountBucketKey := newRegIdIdentValueBucketKey(FailedAuthorizationsPerDomainPerAccount, regId, orderIdent.Value) + txn, err := newSpendOnlyTransaction(limit, perIdentValuePerAccountBucketKey, 1) + if err != nil { + return Transaction{}, err + } + + return txn, nil +} + +// FailedAuthorizationsForPausingPerDomainPerAccountTransaction returns a +// Transaction for the provided order identifier. An error is returned if the +// order identifier's value is invalid. This method should be used for spending +// capacity, as a result of a failed authorization. +func (builder *TransactionBuilder) FailedAuthorizationsForPausingPerDomainPerAccountTransaction(regId int64, orderIdent identifier.ACMEIdentifier) (Transaction, error) { + // FailedAuthorizationsForPausingPerDomainPerAccount limit uses the 'enum:regId' + // bucket key format for overrides. + perAccountBucketKey := newRegIdBucketKey(FailedAuthorizationsForPausingPerDomainPerAccount, regId) + limit, err := builder.getLimit(FailedAuthorizationsForPausingPerDomainPerAccount, perAccountBucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + return newAllowOnlyTransaction(), nil + } + return Transaction{}, err + } + + // FailedAuthorizationsForPausingPerDomainPerAccount limit uses the + // 'enum:regId:identValue' bucket key format for transactions. + perIdentValuePerAccountBucketKey := newRegIdIdentValueBucketKey(FailedAuthorizationsForPausingPerDomainPerAccount, regId, orderIdent.Value) + txn, err := newTransaction(limit, perIdentValuePerAccountBucketKey, 1) + if err != nil { + return Transaction{}, err + } + + return txn, nil +} + +// certificatesPerDomainCheckOnlyTransactions returns a slice of Transactions +// for the provided order identifiers. It returns an error if any of the order +// identifiers' values are invalid. This method should be used for checking +// capacity, before allowing more orders to be created. If a +// CertificatesPerDomainPerAccount override is active, a check-only Transaction +// is created for each per account per domainOrCIDR bucket. Otherwise, a +// check-only Transaction is generated for each global per domainOrCIDR bucket. +// This method should be used for checking capacity, before allowing more orders +// to be created. +// +// Precondition: All orderIdents must comply with policy.WellFormedIdentifiers. +func (builder *TransactionBuilder) certificatesPerDomainCheckOnlyTransactions(regId int64, orderIdents identifier.ACMEIdentifiers) ([]Transaction, error) { + if len(orderIdents) > 100 { + return nil, fmt.Errorf("unwilling to process more than 100 rate limit transactions, got %d", len(orderIdents)) + } + + perAccountLimitBucketKey := newRegIdBucketKey(CertificatesPerDomainPerAccount, regId) + accountOverride := true + perAccountLimit, err := builder.getLimit(CertificatesPerDomainPerAccount, perAccountLimitBucketKey) + if err != nil { + // The CertificatesPerDomainPerAccount limit never has a default. If there is an override for it, + // the above call will return the override. But if there is none, it will return errLimitDisabled. + // In that case we want to continue, but make sure we don't reference `perAccountLimit` because it + // is not a valid limit. + if errors.Is(err, errLimitDisabled) { + accountOverride = false + } else { + return nil, err + } + } + + coveringIdents, err := coveringIdentifiers(orderIdents) + if err != nil { + return nil, err + } + + var txns []Transaction + for _, ident := range coveringIdents { + perDomainOrCIDRBucketKey := newDomainOrCIDRBucketKey(CertificatesPerDomain, ident) + if accountOverride { + if !perAccountLimit.isOverride { + return nil, fmt.Errorf("shouldn't happen: CertificatesPerDomainPerAccount limit is not an override") + } + perAccountPerDomainOrCIDRBucketKey := newRegIdIdentValueBucketKey(CertificatesPerDomainPerAccount, regId, ident) + // Add a check-only transaction for each per account per identValue + // bucket. + txn, err := newCheckOnlyTransaction(perAccountLimit, perAccountPerDomainOrCIDRBucketKey, 1) + if err != nil { + if errors.Is(err, errLimitDisabled) { + continue + } + return nil, err + } + txns = append(txns, txn) + } else { + // Use the per domainOrCIDR bucket key when no per account per + // domainOrCIDR override is configured. + perDomainOrCIDRLimit, err := builder.getLimit(CertificatesPerDomain, perDomainOrCIDRBucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + continue + } + return nil, err + } + // Add a check-only transaction for each per domainOrCIDR bucket. + txn, err := newCheckOnlyTransaction(perDomainOrCIDRLimit, perDomainOrCIDRBucketKey, 1) + if err != nil { + return nil, err + } + txns = append(txns, txn) + } + } + return txns, nil +} + +// CertificatesPerDomainSpendOnlyTransactions returns a slice of Transactions +// for the provided order identifiers. It returns an error if any of the order +// identifiers' values are invalid. If a CertificatesPerDomainPerAccount +// override is configured, it generates two types of Transactions: +// - A spend-only Transaction for each per-account, per-domainOrCIDR bucket, +// which enforces the limit on certificates issued per domainOrCIDR for +// each account. +// - A spend-only Transaction for each per-domainOrCIDR bucket, which +// enforces the global limit on certificates issued per domainOrCIDR. +// +// If no CertificatesPerDomainPerAccount override is present, it returns a +// spend-only Transaction for each global per-domainOrCIDR bucket. This method +// should be used for spending capacity, when a certificate is issued. +// +// Precondition: orderIdents must all pass policy.WellFormedIdentifiers. +func (builder *TransactionBuilder) CertificatesPerDomainSpendOnlyTransactions(regId int64, orderIdents identifier.ACMEIdentifiers) ([]Transaction, error) { + if len(orderIdents) > 100 { + return nil, fmt.Errorf("unwilling to process more than 100 rate limit transactions, got %d", len(orderIdents)) + } + + perAccountLimitBucketKey := newRegIdBucketKey(CertificatesPerDomainPerAccount, regId) + accountOverride := true + perAccountLimit, err := builder.getLimit(CertificatesPerDomainPerAccount, perAccountLimitBucketKey) + if err != nil { + // The CertificatesPerDomainPerAccount limit never has a default. If there is an override for it, + // the above call will return the override. But if there is none, it will return errLimitDisabled. + // In that case we want to continue, but make sure we don't reference `perAccountLimit` because it + // is not a valid limit. + if errors.Is(err, errLimitDisabled) { + accountOverride = false + } else { + return nil, err + } + } + + coveringIdents, err := coveringIdentifiers(orderIdents) + if err != nil { + return nil, err + } + + var txns []Transaction + for _, ident := range coveringIdents { + perDomainOrCIDRBucketKey := newDomainOrCIDRBucketKey(CertificatesPerDomain, ident) + if accountOverride { + if !perAccountLimit.isOverride { + return nil, fmt.Errorf("shouldn't happen: CertificatesPerDomainPerAccount limit is not an override") + } + perAccountPerDomainOrCIDRBucketKey := newRegIdIdentValueBucketKey(CertificatesPerDomainPerAccount, regId, ident) + // Add a spend-only transaction for each per account per + // domainOrCIDR bucket. + txn, err := newSpendOnlyTransaction(perAccountLimit, perAccountPerDomainOrCIDRBucketKey, 1) + if err != nil { + return nil, err + } + txns = append(txns, txn) + + perDomainOrCIDRLimit, err := builder.getLimit(CertificatesPerDomain, perDomainOrCIDRBucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + continue + } + return nil, err + } + + // Add a spend-only transaction for each per domainOrCIDR bucket. + txn, err = newSpendOnlyTransaction(perDomainOrCIDRLimit, perDomainOrCIDRBucketKey, 1) + if err != nil { + return nil, err + } + txns = append(txns, txn) + } else { + // Use the per domainOrCIDR bucket key when no per account per + // domainOrCIDR override is configured. + perDomainOrCIDRLimit, err := builder.getLimit(CertificatesPerDomain, perDomainOrCIDRBucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + continue + } + return nil, err + } + // Add a spend-only transaction for each per domainOrCIDR bucket. + txn, err := newSpendOnlyTransaction(perDomainOrCIDRLimit, perDomainOrCIDRBucketKey, 1) + if err != nil { + return nil, err + } + txns = append(txns, txn) + } + } + return txns, nil +} + +// certificatesPerFQDNSetCheckOnlyTransaction returns a check-only Transaction +// for the provided order identifiers. This method should only be used for +// checking capacity, before allowing more orders to be created. +func (builder *TransactionBuilder) certificatesPerFQDNSetCheckOnlyTransaction(orderIdents identifier.ACMEIdentifiers) (Transaction, error) { + bucketKey := newFQDNSetBucketKey(CertificatesPerFQDNSet, orderIdents) + limit, err := builder.getLimit(CertificatesPerFQDNSet, bucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + return newAllowOnlyTransaction(), nil + } + return Transaction{}, err + } + return newCheckOnlyTransaction(limit, bucketKey, 1) +} + +// CertificatesPerFQDNSetSpendOnlyTransaction returns a spend-only Transaction +// for the provided order identifiers. This method should only be used for +// spending capacity, when a certificate is issued. +func (builder *TransactionBuilder) CertificatesPerFQDNSetSpendOnlyTransaction(orderIdents identifier.ACMEIdentifiers) (Transaction, error) { + bucketKey := newFQDNSetBucketKey(CertificatesPerFQDNSet, orderIdents) + limit, err := builder.getLimit(CertificatesPerFQDNSet, bucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + return newAllowOnlyTransaction(), nil + } + return Transaction{}, err + } + return newSpendOnlyTransaction(limit, bucketKey, 1) +} + +// NewOrderLimitTransactions takes in values from a new-order request and +// returns the set of rate limit transactions that should be evaluated before +// allowing the request to proceed. +// +// Precondition: idents must be a list of identifiers that all pass +// policy.WellFormedIdentifiers. +func (builder *TransactionBuilder) NewOrderLimitTransactions(regId int64, idents identifier.ACMEIdentifiers, isRenewal bool) ([]Transaction, error) { + makeTxnError := func(err error, limit Name) error { + return fmt.Errorf("error constructing rate limit transaction for %s rate limit: %w", limit, err) + } + + var transactions []Transaction + if !isRenewal { + txn, err := builder.ordersPerAccountTransaction(regId) + if err != nil { + return nil, makeTxnError(err, NewOrdersPerAccount) + } + transactions = append(transactions, txn) + } + + txns, err := builder.FailedAuthorizationsPerDomainPerAccountCheckOnlyTransactions(regId, idents) + if err != nil { + return nil, makeTxnError(err, FailedAuthorizationsPerDomainPerAccount) + } + transactions = append(transactions, txns...) + + if !isRenewal { + txns, err := builder.certificatesPerDomainCheckOnlyTransactions(regId, idents) + if err != nil { + return nil, makeTxnError(err, CertificatesPerDomain) + } + transactions = append(transactions, txns...) + } + + txn, err := builder.certificatesPerFQDNSetCheckOnlyTransaction(idents) + if err != nil { + return nil, makeTxnError(err, CertificatesPerFQDNSet) + } + return append(transactions, txn), nil +} + +// NewAccountLimitTransactions takes in an IP address from a new-account request +// and returns the set of rate limit transactions that should be evaluated +// before allowing the request to proceed. +func (builder *TransactionBuilder) NewAccountLimitTransactions(ip netip.Addr) ([]Transaction, error) { + makeTxnError := func(err error, limit Name) error { + return fmt.Errorf("error constructing rate limit transaction for %s rate limit: %w", limit, err) + } + + var transactions []Transaction + txn, err := builder.registrationsPerIPAddressTransaction(ip) + if err != nil { + return nil, makeTxnError(err, NewRegistrationsPerIPAddress) + } + transactions = append(transactions, txn) + + if ip.Is4() { + // This request was made from an IPv4 address. + return transactions, nil + } + + txn, err = builder.registrationsPerIPv6RangeTransaction(ip) + if err != nil { + return nil, makeTxnError(err, NewRegistrationsPerIPv6Range) + } + return append(transactions, txn), nil +} + +func (builder *TransactionBuilder) NewPausingResetTransactions(regId int64, orderIdent identifier.ACMEIdentifier) ([]Transaction, error) { + perAccountBucketKey := newRegIdBucketKey(FailedAuthorizationsForPausingPerDomainPerAccount, regId) + limit, err := builder.getLimit(FailedAuthorizationsForPausingPerDomainPerAccount, perAccountBucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + return []Transaction{newAllowOnlyTransaction()}, nil + } + return nil, err + } + + perIdentValuePerAccountBucketKey := newRegIdIdentValueBucketKey(FailedAuthorizationsForPausingPerDomainPerAccount, regId, orderIdent.Value) + txn, err := newResetTransaction(limit, perIdentValuePerAccountBucketKey) + if err != nil { + return nil, err + } + + return []Transaction{txn}, nil +} + +// LimitOverrideRequestsPerIPAddressTransaction returns a Transaction for the +// LimitOverrideRequestsPerIPAddress limit for the provided IP address. This +// limit is used to rate limit requests to the SFE override request endpoint. +func (builder *TransactionBuilder) LimitOverrideRequestsPerIPAddressTransaction(ip netip.Addr) (Transaction, error) { + bucketKey := newIPAddressBucketKey(LimitOverrideRequestsPerIPAddress, ip) + limit, err := builder.getLimit(LimitOverrideRequestsPerIPAddress, bucketKey) + if err != nil { + if errors.Is(err, errLimitDisabled) { + return newAllowOnlyTransaction(), nil + } + return Transaction{}, err + } + return newTransaction(limit, bucketKey, 1) +} diff --git a/ratelimits/transaction_test.go b/ratelimits/transaction_test.go new file mode 100644 index 00000000000..76d1dad71ae --- /dev/null +++ b/ratelimits/transaction_test.go @@ -0,0 +1,344 @@ +package ratelimits + +import ( + "context" + "errors" + "fmt" + "net/netip" + "sort" + "testing" + "time" + + io_prometheus_client "github.com/prometheus/client_model/go" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/letsencrypt/boulder/config" + "github.com/letsencrypt/boulder/core" + "github.com/letsencrypt/boulder/identifier" + blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/metrics" + "github.com/letsencrypt/boulder/mocks" + sapb "github.com/letsencrypt/boulder/sa/proto" + "github.com/letsencrypt/boulder/test" +) + +func TestNewTransactionBuilderFromFiles_WithBadLimitsPath(t *testing.T) { + t.Parallel() + _, err := NewTransactionBuilderFromFiles("testdata/does-not-exist.yml", "", metrics.NoopRegisterer, blog.NewMock()) + test.AssertError(t, err, "should error") + + _, err = NewTransactionBuilderFromFiles("testdata/defaults.yml", "testdata/does-not-exist.yml", metrics.NoopRegisterer, blog.NewMock()) + test.AssertError(t, err, "should error") +} + +func sortTransactions(txns []Transaction) []Transaction { + sort.Slice(txns, func(i, j int) bool { + return txns[i].bucketKey < txns[j].bucketKey + }) + return txns +} + +func TestNewRegistrationsPerIPAddressTransactions(t *testing.T) { + t.Parallel() + + tb, err := NewTransactionBuilderFromFiles("../test/config-next/ratelimit-defaults.yml", "", metrics.NoopRegisterer, blog.NewMock()) + test.AssertNotError(t, err, "creating TransactionBuilder") + + // A check-and-spend transaction for the global limit. + txn, err := tb.registrationsPerIPAddressTransaction(netip.MustParseAddr("1.2.3.4")) + test.AssertNotError(t, err, "creating transaction") + test.AssertEquals(t, txn.bucketKey, "1:1.2.3.4") + test.Assert(t, txn.check && txn.spend, "should be check-and-spend") +} + +func TestNewRegistrationsPerIPv6AddressTransactions(t *testing.T) { + t.Parallel() + + tb, err := NewTransactionBuilderFromFiles("../test/config-next/ratelimit-defaults.yml", "", metrics.NoopRegisterer, blog.NewMock()) + test.AssertNotError(t, err, "creating TransactionBuilder") + + // A check-and-spend transaction for the global limit. + txn, err := tb.registrationsPerIPv6RangeTransaction(netip.MustParseAddr("2001:db8::1")) + test.AssertNotError(t, err, "creating transaction") + test.AssertEquals(t, txn.bucketKey, "2:2001:db8::/48") + test.Assert(t, txn.check && txn.spend, "should be check-and-spend") +} + +func TestNewOrdersPerAccountTransactions(t *testing.T) { + t.Parallel() + + tb, err := NewTransactionBuilderFromFiles("../test/config-next/ratelimit-defaults.yml", "", metrics.NoopRegisterer, blog.NewMock()) + test.AssertNotError(t, err, "creating TransactionBuilder") + + // A check-and-spend transaction for the global limit. + txn, err := tb.ordersPerAccountTransaction(123456789) + test.AssertNotError(t, err, "creating transaction") + test.AssertEquals(t, txn.bucketKey, "3:123456789") + test.Assert(t, txn.check && txn.spend, "should be check-and-spend") +} + +func TestFailedAuthorizationsPerDomainPerAccountTransactions(t *testing.T) { + t.Parallel() + + tb, err := NewTransactionBuilderFromFiles("../test/config-next/ratelimit-defaults.yml", "testdata/working_override_13371338.yml", metrics.NoopRegisterer, blog.NewMock()) + test.AssertNotError(t, err, "creating TransactionBuilder") + err = tb.loadOverrides(context.Background()) + test.AssertNotError(t, err, "loading overrides") + + // A check-only transaction for the default per-account limit. + txns, err := tb.FailedAuthorizationsPerDomainPerAccountCheckOnlyTransactions(123456789, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com"})) + test.AssertNotError(t, err, "creating transactions") + test.AssertEquals(t, len(txns), 1) + test.AssertEquals(t, txns[0].bucketKey, "4:123456789:so.many.labels.here.example.com") + test.Assert(t, txns[0].checkOnly(), "should be check-only") + test.Assert(t, !txns[0].limit.isOverride, "should not be an override") + + // A spend-only transaction for the default per-account limit. + txn, err := tb.FailedAuthorizationsPerDomainPerAccountSpendOnlyTransaction(123456789, identifier.NewDNS("so.many.labels.here.example.com")) + test.AssertNotError(t, err, "creating transaction") + test.AssertEquals(t, txn.bucketKey, "4:123456789:so.many.labels.here.example.com") + test.Assert(t, txn.spendOnly(), "should be spend-only") + test.Assert(t, !txn.limit.isOverride, "should not be an override") + + // A check-only transaction for the per-account limit override. + txns, err = tb.FailedAuthorizationsPerDomainPerAccountCheckOnlyTransactions(13371338, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com"})) + test.AssertNotError(t, err, "creating transactions") + test.AssertEquals(t, len(txns), 1) + test.AssertEquals(t, txns[0].bucketKey, "4:13371338:so.many.labels.here.example.com") + test.Assert(t, txns[0].checkOnly(), "should be check-only") + test.Assert(t, txns[0].limit.isOverride, "should be an override") + + // A spend-only transaction for the per-account limit override. + txn, err = tb.FailedAuthorizationsPerDomainPerAccountSpendOnlyTransaction(13371338, identifier.NewDNS("so.many.labels.here.example.com")) + test.AssertNotError(t, err, "creating transaction") + test.AssertEquals(t, txn.bucketKey, "4:13371338:so.many.labels.here.example.com") + test.Assert(t, txn.spendOnly(), "should be spend-only") + test.Assert(t, txn.limit.isOverride, "should be an override") +} + +func TestFailedAuthorizationsForPausingPerDomainPerAccountTransactions(t *testing.T) { + t.Parallel() + + tb, err := NewTransactionBuilderFromFiles("../test/config-next/ratelimit-defaults.yml", "testdata/working_override_13371338.yml", metrics.NoopRegisterer, blog.NewMock()) + test.AssertNotError(t, err, "creating TransactionBuilder") + err = tb.loadOverrides(context.Background()) + test.AssertNotError(t, err, "loading overrides") + + // A transaction for the per-account limit override. + txn, err := tb.FailedAuthorizationsForPausingPerDomainPerAccountTransaction(13371338, identifier.NewDNS("so.many.labels.here.example.com")) + test.AssertNotError(t, err, "creating transaction") + test.AssertEquals(t, txn.bucketKey, "8:13371338:so.many.labels.here.example.com") + test.Assert(t, txn.check && txn.spend, "should be check and spend") + test.Assert(t, txn.limit.isOverride, "should be an override") +} + +func TestCertificatesPerDomainTransactions(t *testing.T) { + t.Parallel() + + tb, err := NewTransactionBuilderFromFiles("../test/config-next/ratelimit-defaults.yml", "", metrics.NoopRegisterer, blog.NewMock()) + test.AssertNotError(t, err, "creating TransactionBuilder") + + // One check-only transaction for the global limit. + txns, err := tb.certificatesPerDomainCheckOnlyTransactions(123456789, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com"})) + test.AssertNotError(t, err, "creating transactions") + test.AssertEquals(t, len(txns), 1) + test.AssertEquals(t, txns[0].bucketKey, "5:example.com") + test.Assert(t, txns[0].checkOnly(), "should be check-only") + + // One spend-only transaction for the global limit. + txns, err = tb.CertificatesPerDomainSpendOnlyTransactions(123456789, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com"})) + test.AssertNotError(t, err, "creating transactions") + test.AssertEquals(t, len(txns), 1) + test.AssertEquals(t, txns[0].bucketKey, "5:example.com") + test.Assert(t, txns[0].spendOnly(), "should be spend-only") +} + +func TestCertificatesPerDomainPerAccountTransactions(t *testing.T) { + t.Parallel() + + tb, err := NewTransactionBuilderFromFiles("../test/config-next/ratelimit-defaults.yml", "testdata/working_override_13371338.yml", metrics.NoopRegisterer, blog.NewMock()) + test.AssertNotError(t, err, "creating TransactionBuilder") + err = tb.loadOverrides(context.Background()) + test.AssertNotError(t, err, "loading overrides") + + // We only expect a single check-only transaction for the per-account limit + // override. We can safely ignore the global limit when an override is + // present. + txns, err := tb.certificatesPerDomainCheckOnlyTransactions(13371338, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com"})) + test.AssertNotError(t, err, "creating transactions") + test.AssertEquals(t, len(txns), 1) + test.AssertEquals(t, txns[0].bucketKey, "6:13371338:example.com") + test.Assert(t, txns[0].checkOnly(), "should be check-only") + test.Assert(t, txns[0].limit.isOverride, "should be an override") + + // Same as above, but with multiple example.com domains. + txns, err = tb.certificatesPerDomainCheckOnlyTransactions(13371338, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com", "z.example.com"})) + test.AssertNotError(t, err, "creating transactions") + test.AssertEquals(t, len(txns), 1) + test.AssertEquals(t, txns[0].bucketKey, "6:13371338:example.com") + test.Assert(t, txns[0].checkOnly(), "should be check-only") + test.Assert(t, txns[0].limit.isOverride, "should be an override") + + // Same as above, but with different domains. + txns, err = tb.certificatesPerDomainCheckOnlyTransactions(13371338, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com", "z.example.net"})) + test.AssertNotError(t, err, "creating transactions") + txns = sortTransactions(txns) + test.AssertEquals(t, len(txns), 2) + test.AssertEquals(t, txns[0].bucketKey, "6:13371338:example.com") + test.Assert(t, txns[0].checkOnly(), "should be check-only") + test.Assert(t, txns[0].limit.isOverride, "should be an override") + test.AssertEquals(t, txns[1].bucketKey, "6:13371338:example.net") + test.Assert(t, txns[1].checkOnly(), "should be check-only") + test.Assert(t, txns[1].limit.isOverride, "should be an override") + + // Two spend-only transactions, one for the global limit and one for the + // per-account limit override. + txns, err = tb.CertificatesPerDomainSpendOnlyTransactions(13371338, identifier.NewDNSSlice([]string{"so.many.labels.here.example.com"})) + test.AssertNotError(t, err, "creating TransactionBuilder") + test.AssertEquals(t, len(txns), 2) + txns = sortTransactions(txns) + test.AssertEquals(t, txns[0].bucketKey, "5:example.com") + test.Assert(t, txns[0].spendOnly(), "should be spend-only") + test.Assert(t, !txns[0].limit.isOverride, "should not be an override") + + test.AssertEquals(t, txns[1].bucketKey, "6:13371338:example.com") + test.Assert(t, txns[1].spendOnly(), "should be spend-only") + test.Assert(t, txns[1].limit.isOverride, "should be an override") +} + +func TestCertificatesPerFQDNSetTransactions(t *testing.T) { + t.Parallel() + + tb, err := NewTransactionBuilderFromFiles("../test/config-next/ratelimit-defaults.yml", "", metrics.NoopRegisterer, blog.NewMock()) + test.AssertNotError(t, err, "creating TransactionBuilder") + + // A single check-only transaction for the global limit. + txn, err := tb.certificatesPerFQDNSetCheckOnlyTransaction(identifier.NewDNSSlice([]string{"example.com", "example.net", "example.org"})) + test.AssertNotError(t, err, "creating transaction") + namesHash := fmt.Sprintf("%x", core.HashIdentifiers(identifier.NewDNSSlice([]string{"example.com", "example.net", "example.org"}))) + test.AssertEquals(t, txn.bucketKey, "7:"+namesHash) + test.Assert(t, txn.checkOnly(), "should be check-only") + test.Assert(t, !txn.limit.isOverride, "should not be an override") +} + +// NewTransactionBuilder's metrics are tested in TestLoadOverrides. +func TestNewTransactionBuilder(t *testing.T) { + t.Parallel() + + expectedBurst := int64(10000) + expectedCount := int64(10000) + expectedPeriod := config.Duration{Duration: time.Hour * 168} + + tb, err := NewTransactionBuilder(LimitConfigs{ + NewRegistrationsPerIPAddress.String(): &LimitConfig{ + Burst: expectedBurst, + Count: expectedCount, + Period: expectedPeriod}, + }, nil, metrics.NoopRegisterer, blog.NewMock()) + test.AssertNotError(t, err, "creating TransactionBuilder") + + newRegDefault, ok := tb.limitRegistry.defaults[NewRegistrationsPerIPAddress.EnumString()] + test.Assert(t, ok, "NewRegistrationsPerIPAddress was not populated in registry") + test.AssertEquals(t, newRegDefault.Burst, expectedBurst) + test.AssertEquals(t, newRegDefault.Count, expectedCount) + test.AssertEquals(t, newRegDefault.Period, expectedPeriod) +} + +func TestNewTransactionBuilderFromDatabase(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + overrides GetOverridesFunc + expectOverrides map[string]Limit + expectError string + expectLog string + expectOverrideErrors float64 + }{ + { + name: "error fetching enabled overrides", + overrides: func(context.Context, *emptypb.Empty, ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.RateLimitOverrideResponse], error) { + return nil, errors.New("lol no") + }, + expectError: "fetching enabled overrides: lol no", + }, + { + name: "empty results", + overrides: func(context.Context, *emptypb.Empty, ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.RateLimitOverrideResponse], error) { + return &mocks.ServerStreamClient[sapb.RateLimitOverrideResponse]{Results: []*sapb.RateLimitOverrideResponse{}}, nil + }, + }, + { + name: "gRPC error", + overrides: func(context.Context, *emptypb.Empty, ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.RateLimitOverrideResponse], error) { + return &mocks.ServerStreamClient[sapb.RateLimitOverrideResponse]{Err: errors.New("i ate ur toast m8")}, nil + }, + expectError: "reading overrides stream: i ate ur toast m8", + }, + { + name: "2 valid overrides", + overrides: func(context.Context, *emptypb.Empty, ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.RateLimitOverrideResponse], error) { + return &mocks.ServerStreamClient[sapb.RateLimitOverrideResponse]{Results: []*sapb.RateLimitOverrideResponse{ + {Override: &sapb.RateLimitOverride{LimitEnum: int64(StringToName["CertificatesPerDomain"]), BucketKey: joinWithColon(CertificatesPerDomain.EnumString(), "example.com"), Period: &durationpb.Duration{Seconds: 1}, Count: 1, Burst: 1}}, + {Override: &sapb.RateLimitOverride{LimitEnum: int64(StringToName["CertificatesPerDomain"]), BucketKey: joinWithColon(CertificatesPerDomain.EnumString(), "example.net"), Period: &durationpb.Duration{Seconds: 1}, Count: 1, Burst: 1}}, + }}, nil + }, + expectOverrides: map[string]Limit{ + joinWithColon(CertificatesPerDomain.EnumString(), "example.com"): {Burst: 1, Count: 1, Period: config.Duration{Duration: time.Second}, Name: CertificatesPerDomain, emissionInterval: 1000000000, burstOffset: 1000000000, isOverride: true}, + joinWithColon(CertificatesPerDomain.EnumString(), "example.net"): {Burst: 1, Count: 1, Period: config.Duration{Duration: time.Second}, Name: CertificatesPerDomain, emissionInterval: 1000000000, burstOffset: 1000000000, isOverride: true}, + }, + }, + { + name: "2 valid & 4 incomplete overrides", + overrides: func(context.Context, *emptypb.Empty, ...grpc.CallOption) (grpc.ServerStreamingClient[sapb.RateLimitOverrideResponse], error) { + return &mocks.ServerStreamClient[sapb.RateLimitOverrideResponse]{Results: []*sapb.RateLimitOverrideResponse{ + {Override: &sapb.RateLimitOverride{LimitEnum: int64(StringToName["CertificatesPerDomain"]), BucketKey: joinWithColon(CertificatesPerDomain.EnumString(), "example.com"), Period: &durationpb.Duration{Seconds: 1}, Count: 1, Burst: 1}}, + {Override: &sapb.RateLimitOverride{LimitEnum: int64(StringToName["CertificatesPerDomain"]), BucketKey: joinWithColon(CertificatesPerDomain.EnumString(), "example.net"), Period: &durationpb.Duration{Seconds: 1}, Count: 1, Burst: 1}}, + {Override: &sapb.RateLimitOverride{LimitEnum: int64(StringToName["CertificatesPerDomain"]), BucketKey: joinWithColon(CertificatesPerDomain.EnumString(), "bad-example.com")}}, + {Override: &sapb.RateLimitOverride{LimitEnum: int64(StringToName["CertificatesPerDomain"]), BucketKey: joinWithColon(CertificatesPerDomain.EnumString(), "bad-example.net")}}, + {Override: &sapb.RateLimitOverride{LimitEnum: int64(StringToName["CertificatesPerDomain"]), BucketKey: joinWithColon(CertificatesPerDomain.EnumString(), "worse-example.com")}}, + {Override: &sapb.RateLimitOverride{LimitEnum: int64(StringToName["CertificatesPerDomain"]), BucketKey: joinWithColon(CertificatesPerDomain.EnumString(), "even-worse-example.xyz")}}, + }}, nil + }, + expectOverrides: map[string]Limit{ + joinWithColon(CertificatesPerDomain.EnumString(), "example.com"): {Burst: 1, Count: 1, Period: config.Duration{Duration: time.Second}, Name: CertificatesPerDomain, emissionInterval: 1000000000, burstOffset: 1000000000, isOverride: true}, + joinWithColon(CertificatesPerDomain.EnumString(), "example.net"): {Burst: 1, Count: 1, Period: config.Duration{Duration: time.Second}, Name: CertificatesPerDomain, emissionInterval: 1000000000, burstOffset: 1000000000, isOverride: true}, + }, + expectLog: fmt.Sprintf("ERR: hydrating CertificatesPerDomain override with key %q: invalid burst '0', must be > 0", joinWithColon(CertificatesPerDomain.EnumString(), "bad-example.com")), + expectOverrideErrors: 4, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockLog := blog.NewMock() + tb, err := NewTransactionBuilderFromDatabase("../test/config-next/ratelimit-defaults.yml", tc.overrides, metrics.NoopRegisterer, mockLog) + test.AssertNotError(t, err, "creating TransactionBuilder") + err = tb.limitRegistry.loadOverrides(context.Background()) + if tc.expectError != "" { + if err == nil { + t.Errorf("expected error for test %q but got none", tc.name) + } + test.AssertContains(t, err.Error(), tc.expectError) + } else { + test.AssertNotError(t, err, tc.name) + + if tc.expectLog != "" { + test.AssertSliceContains(t, mockLog.GetAll(), tc.expectLog) + } + + for bucketKey, limit := range tc.expectOverrides { + test.AssertDeepEquals(t, tb.overrides[bucketKey], &limit) + } + test.AssertEquals(t, len(tb.overrides), len(tc.expectOverrides)) + + var iom io_prometheus_client.Metric + err = tb.limitRegistry.overridesErrors.Write(&iom) + test.AssertNotError(t, err, "encoding overridesErrors metric") + test.AssertEquals(t, iom.Gauge.GetValue(), tc.expectOverrideErrors) + } + }) + } +} diff --git a/ratelimits/utilities.go b/ratelimits/utilities.go new file mode 100644 index 00000000000..17921c5ad06 --- /dev/null +++ b/ratelimits/utilities.go @@ -0,0 +1,121 @@ +package ratelimits + +import ( + "fmt" + "net/netip" + "strings" + + "github.com/weppos/publicsuffix-go/publicsuffix" + + "github.com/letsencrypt/boulder/core" + "github.com/letsencrypt/boulder/identifier" +) + +// joinWithColon joins the provided args with a colon. +func joinWithColon(args ...string) string { + return strings.Join(args, ":") +} + +// coveringIdentifiers returns the set of "covering" identifiers used to enforce +// the CertificatesPerDomain rate limit. For DNS names, this is the eTLD+1 as +// determined by the Public Suffix List; exact public suffix matches are +// preserved. For IP addresses, the covering prefix is /32 for IPv4 and /64 for +// IPv6. This groups requests by registered domain or address block to match the +// scope of the limit. The result is deduplicated and lowercased. If the +// identifier type is unsupported, an error is returned. +func coveringIdentifiers(idents identifier.ACMEIdentifiers) ([]string, error) { + var covers []string + for _, ident := range idents { + cover, err := coveringIdentifier(CertificatesPerDomain, ident) + if err != nil { + return nil, err + } + covers = append(covers, cover) + } + return core.UniqueLowerNames(covers), nil +} + +// coveringIdentifier returns the "covering" identifier used to enforce the +// CertificatesPerDomain, CertificatesPerDomainPerAccount, and +// NewRegistrationsPerIPv6Range rate limits. For DNS names, this is the eTLD+1 +// as determined by the Public Suffix List; exact public suffix matches are +// preserved. For IP addresses, the covering prefix depends on the limit: +// +// - CertificatesPerDomain and CertificatesPerDomainPerAccount: +// - /32 for IPv4 +// - /64 for IPv6 +// +// - NewRegistrationsPerIPv6Range: +// - /48 for IPv6 only +// +// This groups requests by registered domain or address block to match the scope +// of each limit. The result is deduplicated and lowercased. If the identifier +// type or limit is unsupported, an error is returned. +func coveringIdentifier(limit Name, ident identifier.ACMEIdentifier) (string, error) { + switch ident.Type { + case identifier.TypeDNS: + domain, err := publicsuffix.Domain(ident.Value) + if err != nil { + if err.Error() == fmt.Sprintf("%s is a suffix", ident.Value) { + // If the public suffix is the domain itself, that's fine. + // Include the original name in the result. + return ident.Value, nil + } + return "", err + } + return domain, nil + case identifier.TypeIP: + ip, err := netip.ParseAddr(ident.Value) + if err != nil { + return "", err + } + prefix, err := coveringIPPrefix(limit, ip) + if err != nil { + return "", err + } + return prefix.String(), nil + } + return "", fmt.Errorf("unsupported identifier type: %s", ident.Type) +} + +// coveringIPPrefix returns the "covering" IP prefix used to enforce the +// CertificatesPerDomain, CertificatesPerDomainPerAccount, and +// NewRegistrationsPerIPv6Range rate limits. The prefix length depends on the +// limit and IP version: +// +// - CertificatesPerDomain and CertificatesPerDomainPerAccount: +// - /32 for IPv4 +// - /64 for IPv6 +// +// - NewRegistrationsPerIPv6Range: +// - /48 for IPv6 only +// +// This groups requests by address block to match the scope of each limit. If +// the limit does not require a covering prefix, an error is returned. +func coveringIPPrefix(limit Name, addr netip.Addr) (netip.Prefix, error) { + switch limit { + case CertificatesPerDomain, CertificatesPerDomainPerAccount: + var bits int + if addr.Is4() { + bits = 32 + } else { + bits = 64 + } + prefix, err := addr.Prefix(bits) + if err != nil { + return netip.Prefix{}, fmt.Errorf("building covering prefix for %s: %w", addr, err) + } + return prefix, nil + + case NewRegistrationsPerIPv6Range: + if !addr.Is6() { + return netip.Prefix{}, fmt.Errorf("limit %s requires an IPv6 address, got %s", limit, addr) + } + prefix, err := addr.Prefix(48) + if err != nil { + return netip.Prefix{}, fmt.Errorf("building covering prefix for %s: %w", addr, err) + } + return prefix, nil + } + return netip.Prefix{}, fmt.Errorf("limit %s does not require a covering prefix", limit) +} diff --git a/ratelimits/utilities_test.go b/ratelimits/utilities_test.go new file mode 100644 index 00000000000..28c6f037a53 --- /dev/null +++ b/ratelimits/utilities_test.go @@ -0,0 +1,93 @@ +package ratelimits + +import ( + "net/netip" + "slices" + "testing" + + "github.com/letsencrypt/boulder/identifier" +) + +func TestCoveringIdentifiers(t *testing.T) { + cases := []struct { + name string + idents identifier.ACMEIdentifiers + wantErr string + want []string + }{ + { + name: "empty string", + idents: identifier.ACMEIdentifiers{ + identifier.NewDNS(""), + }, + wantErr: "name is blank", + want: nil, + }, + { + name: "two subdomains of same domain", + idents: identifier.NewDNSSlice([]string{"www.example.com", "example.com"}), + want: []string{"example.com"}, + }, + { + name: "three subdomains across two domains", + idents: identifier.NewDNSSlice([]string{"www.example.com", "example.com", "www.example.co.uk"}), + want: []string{"example.co.uk", "example.com"}, + }, + { + name: "three subdomains across two domains, plus a bare TLD", + idents: identifier.NewDNSSlice([]string{"www.example.com", "example.com", "www.example.co.uk", "co.uk"}), + want: []string{"co.uk", "example.co.uk", "example.com"}, + }, + { + name: "two subdomains of same domain, one of them long", + idents: identifier.NewDNSSlice([]string{"foo.bar.baz.www.example.com", "baz.example.com"}), + want: []string{"example.com"}, + }, + { + name: "a domain and two of its subdomains", + idents: identifier.NewDNSSlice([]string{"github.io", "foo.github.io", "bar.github.io"}), + want: []string{"bar.github.io", "foo.github.io", "github.io"}, + }, + { + name: "a domain and an IPv4 address", + idents: identifier.ACMEIdentifiers{ + identifier.NewDNS("example.com"), + identifier.NewIP(netip.MustParseAddr("127.0.0.1")), + }, + want: []string{"127.0.0.1/32", "example.com"}, + }, + { + name: "an IPv6 address", + idents: identifier.ACMEIdentifiers{ + identifier.NewIP(netip.MustParseAddr("3fff:aaa:aaaa:aaaa:abad:0ff1:cec0:ffee")), + }, + want: []string{"3fff:aaa:aaaa:aaaa::/64"}, + }, + { + name: "four IP addresses in three prefixes", + idents: identifier.ACMEIdentifiers{ + identifier.NewIP(netip.MustParseAddr("127.0.0.1")), + identifier.NewIP(netip.MustParseAddr("127.0.0.254")), + identifier.NewIP(netip.MustParseAddr("3fff:aaa:aaaa:aaaa:abad:0ff1:cec0:ffee")), + identifier.NewIP(netip.MustParseAddr("3fff:aaa:aaaa:ffff:abad:0ff1:cec0:ffee")), + }, + want: []string{"127.0.0.1/32", "127.0.0.254/32", "3fff:aaa:aaaa:aaaa::/64", "3fff:aaa:aaaa:ffff::/64"}, + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + got, err := coveringIdentifiers(tc.idents) + if err != nil && err.Error() != tc.wantErr { + t.Errorf("Got unwanted error %#v", err.Error()) + } + if err == nil && tc.wantErr != "" { + t.Errorf("Got no error, wanted %#v", tc.wantErr) + } + if !slices.Equal(got, tc.want) { + t.Errorf("Got %#v, but want %#v", got, tc.want) + } + }) + } +} diff --git a/redis/config.go b/redis/config.go new file mode 100644 index 00000000000..c858a4beb1b --- /dev/null +++ b/redis/config.go @@ -0,0 +1,188 @@ +package redis + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/redis/go-redis/extra/redisotel/v9" + "github.com/redis/go-redis/v9" + + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" + blog "github.com/letsencrypt/boulder/log" +) + +// Config contains the configuration needed to act as a Redis client. +type Config struct { + // TLS contains the configuration to speak TLS with Redis. + TLS cmd.TLSConfig + + // Username used to authenticate to each Redis instance. + Username string `validate:"required"` + + // PasswordFile is the path to a file holding the password used to + // authenticate to each Redis instance. + cmd.PasswordConfig + + // ShardAddrs is a map of shard names to IP address:port pairs. The go-redis + // `Ring` client will shard reads and writes across the provided Redis + // Servers based on a consistent hashing algorithm. + ShardAddrs map[string]string `validate:"omitempty,required_without=Lookups,min=1,dive,hostname_port"` + + // Lookups each entry contains a service and domain name that will be used + // to construct a SRV DNS query to lookup Redis backends. For example: if + // the resource record is 'foo.service.consul', then the 'Service' is 'foo' + // and the 'Domain' is 'service.consul'. The expected dNSName to be + // authenticated in the server certificate would be 'foo.service.consul'. + Lookups []cmd.ServiceDomain `validate:"omitempty,required_without=ShardAddrs,min=1,dive"` + + // LookupFrequency is the frequency of periodic SRV lookups. Defaults to 30 + // seconds. + LookupFrequency config.Duration `validate:"-"` + + // LookupDNSAuthority can only be specified with Lookups. It's a single + // : of the DNS server to be used for resolution + // of Redis backends. If the address contains a hostname it will be resolved + // using system DNS. If the address contains a port, the client will use it + // directly, otherwise port 53 is used. If this field is left unspecified + // the system DNS will be used for resolution. + LookupDNSAuthority string `validate:"excluded_without=Lookups,omitempty,ip|hostname|hostname_port"` + + // Enables read-only commands on replicas. + ReadOnly bool + // Allows routing read-only commands to the closest primary or replica. + // It automatically enables ReadOnly. + RouteByLatency bool + // Allows routing read-only commands to a random primary or replica. + // It automatically enables ReadOnly. + RouteRandomly bool + + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). + PoolFIFO bool + + // Maximum number of retries before giving up. + // Default is to not retry failed commands. + MaxRetries int `validate:"min=0"` + // Minimum backoff between each retry. + // Default is 8 milliseconds; -1 disables backoff. + MinRetryBackoff config.Duration `validate:"-"` + // Maximum backoff between each retry. + // Default is 512 milliseconds; -1 disables backoff. + MaxRetryBackoff config.Duration `validate:"-"` + + // Dial timeout for establishing new connections. + // Default is 5 seconds. + DialTimeout config.Duration `validate:"-"` + // Timeout for socket reads. If reached, commands will fail + // with a timeout instead of blocking. Use value -1 for no timeout and 0 for default. + // Default is 3 seconds. + ReadTimeout config.Duration `validate:"-"` + // Timeout for socket writes. If reached, commands will fail + // with a timeout instead of blocking. + // Default is ReadTimeout. + WriteTimeout config.Duration `validate:"-"` + + // Maximum number of socket connections. + // Default is 5 connections per every CPU as reported by runtime.NumCPU. + // If this is set to an explicit value, that's not multiplied by NumCPU. + // PoolSize applies per cluster node and not for the whole cluster. + // https://pkg.go.dev/github.com/go-redis/redis#ClusterOptions + PoolSize int `validate:"min=0"` + // Minimum number of idle connections which is useful when establishing + // new connection is slow. + MinIdleConns int `validate:"min=0"` + // Connection age at which client retires (closes) the connection. + // Default is to not close aged connections. + MaxConnAge config.Duration `validate:"-"` + // Amount of time client waits for connection if all connections + // are busy before returning an error. + // Default is ReadTimeout + 1 second. + PoolTimeout config.Duration `validate:"-"` + // Amount of time after which client closes idle connections. + // Should be less than server's timeout. + // Default is 5 minutes. -1 disables idle timeout check. + IdleTimeout config.Duration `validate:"-"` + // Frequency of idle checks made by idle connections reaper. + // Default is 1 minute. -1 disables idle connections reaper, + // but idle connections are still discarded by the client + // if IdleTimeout is set. + // Deprecated: This field has been deprecated and will be removed. + IdleCheckFrequency config.Duration `validate:"-"` +} + +// Ring is a wrapper around the go-redis/v9 Ring client that adds support for +// (optional) periodic SRV lookups. +type Ring struct { + *redis.Ring + lookup *lookup +} + +// NewRingFromConfig returns a new *redis.Ring client. If periodic SRV lookups +// are supplied, a goroutine will be started to periodically perform lookups. +// Callers should defer a call to StopLookups() to ensure that this goroutine is +// gracefully shutdown. +func NewRingFromConfig(c Config, stats prometheus.Registerer, log blog.Logger) (*Ring, error) { + password, err := c.Pass() + if err != nil { + return nil, fmt.Errorf("loading password: %w", err) + } + + tlsConfig, err := c.TLS.Load(stats) + if err != nil { + return nil, fmt.Errorf("loading TLS config: %w", err) + } + + inner := redis.NewRing(&redis.RingOptions{ + Addrs: c.ShardAddrs, + Username: c.Username, + Password: password, + TLSConfig: tlsConfig, + + MaxRetries: c.MaxRetries, + MinRetryBackoff: c.MinRetryBackoff.Duration, + MaxRetryBackoff: c.MaxRetryBackoff.Duration, + DialTimeout: c.DialTimeout.Duration, + ReadTimeout: c.ReadTimeout.Duration, + WriteTimeout: c.WriteTimeout.Duration, + + PoolSize: c.PoolSize, + MinIdleConns: c.MinIdleConns, + ConnMaxLifetime: c.MaxConnAge.Duration, + PoolTimeout: c.PoolTimeout.Duration, + ConnMaxIdleTime: c.IdleTimeout.Duration, + }) + if len(c.ShardAddrs) > 0 { + // Client was statically configured with a list of shards. + MustRegisterClientMetricsCollector(inner, stats, c.ShardAddrs, c.Username) + } + + var lookup *lookup + if len(c.Lookups) != 0 { + lookup, err = newLookup(c.Lookups, c.LookupDNSAuthority, c.LookupFrequency.Duration, inner, log, stats) + if err != nil { + return nil, err + } + lookup.start() + } + + err = redisotel.InstrumentTracing(inner) + if err != nil { + return nil, err + } + + return &Ring{ + Ring: inner, + lookup: lookup, + }, nil +} + +// StopLookups stops the goroutine responsible for keeping the shards of the +// inner *redis.Ring up-to-date. It is a no-op if the Ring was not constructed +// with periodic lookups or if the lookups have already been stopped. +func (r *Ring) StopLookups() { + if r == nil || r.lookup == nil { + // No-op. + return + } + r.lookup.stop() +} diff --git a/redis/lookup.go b/redis/lookup.go new file mode 100644 index 00000000000..f66ed7450a3 --- /dev/null +++ b/redis/lookup.go @@ -0,0 +1,218 @@ +package redis + +import ( + "context" + "errors" + "fmt" + "net" + "strings" + "time" + + "github.com/letsencrypt/boulder/cmd" + blog "github.com/letsencrypt/boulder/log" + "github.com/prometheus/client_golang/prometheus" + + "github.com/redis/go-redis/v9" +) + +var ErrNoShardsResolved = errors.New("0 shards were resolved") + +// lookup wraps a Redis ring client by reference and keeps the Redis ring shards +// up to date via periodic SRV lookups. +type lookup struct { + // srvLookups is a list of SRV records to be looked up. + srvLookups []cmd.ServiceDomain + + // updateFrequency is the frequency of periodic SRV lookups. Defaults to 30 + // seconds. + updateFrequency time.Duration + + // updateTimeout is the timeout for each SRV lookup. Defaults to 90% of the + // update frequency. + updateTimeout time.Duration + + // dnsAuthority is the single : of the DNS + // server to be used for SRV lookups. If the address contains a hostname it + // will be resolved via the system DNS. If the port is left unspecified it + // will default to '53'. If this field is left unspecified the system DNS + // will be used for resolution. + dnsAuthority string + + // stop is a context.CancelFunc that can be used to stop the goroutine + // responsible for performing periodic SRV lookups. + stop context.CancelFunc + + resolver *net.Resolver + ring *redis.Ring + logger blog.Logger + stats prometheus.Registerer +} + +// newLookup constructs and returns a new lookup instance. An initial SRV lookup +// is performed to populate the Redis ring shards. If this lookup fails or +// otherwise results in an empty set of resolved shards, an error is returned. +func newLookup(srvLookups []cmd.ServiceDomain, dnsAuthority string, frequency time.Duration, ring *redis.Ring, logger blog.Logger, stats prometheus.Registerer) (*lookup, error) { + updateFrequency := frequency + if updateFrequency <= 0 { + // Set default frequency. + updateFrequency = 30 * time.Second + } + // Set default timeout to 90% of the update frequency. + updateTimeout := updateFrequency - updateFrequency/10 + + lookup := &lookup{ + srvLookups: srvLookups, + ring: ring, + logger: logger, + stats: stats, + updateFrequency: updateFrequency, + updateTimeout: updateTimeout, + dnsAuthority: dnsAuthority, + } + + if dnsAuthority == "" { + // Use the system DNS resolver. + lookup.resolver = net.DefaultResolver + } else { + // Setup a custom DNS resolver. + host, port, err := net.SplitHostPort(dnsAuthority) + if err != nil { + // Assume only hostname or IPv4 address was specified. + host = dnsAuthority + port = "53" + } + lookup.dnsAuthority = net.JoinHostPort(host, port) + lookup.resolver = &net.Resolver{ + PreferGo: true, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + // The custom resolver closes over the lookup.dnsAuthority field + // so it can be swapped out in testing. + return net.Dial(network, lookup.dnsAuthority) + }, + } + } + + ctx, cancel := context.WithTimeout(context.Background(), updateTimeout) + defer cancel() + tempErr, nonTempErr := lookup.updateNow(ctx) + if tempErr != nil { + // Log and discard temporary errors, as they're likely to be transient + // (e.g. network connectivity issues). + logger.Warningf("resolving ring shards: %s", tempErr) + } + if nonTempErr != nil && errors.Is(nonTempErr, ErrNoShardsResolved) { + // Non-temporary errors are always logged inside of updateNow(), so we + // only need return the error here if it's ErrNoShardsResolved. + return nil, nonTempErr + } + + return lookup, nil +} + +// updateNow resolves and updates the Redis ring shards accordingly. If all +// lookups fail or otherwise result in an empty set of resolved shards, the +// Redis ring is left unmodified and any errors are returned. If at least one +// lookup succeeds, the Redis ring is updated, and all errors are discarded. +// Non-temporary DNS errors are always logged as they occur, as they're likely +// to be indicative of a misconfiguration. +func (look *lookup) updateNow(ctx context.Context) (tempError, nonTempError error) { + var tempErrs []error + handleDNSError := func(err error, srv cmd.ServiceDomain) { + var dnsErr *net.DNSError + if errors.As(err, &dnsErr) && (dnsErr.IsTimeout || dnsErr.IsTemporary) { + tempErrs = append(tempErrs, err) + return + } + // Log non-temporary DNS errors as they occur, as they're likely to be + // indicative of misconfiguration. + look.logger.Errf("resolving service _%s._tcp.%s: %s", srv.Service, srv.Domain, err) + } + + nextAddrs := make(map[string]string) + for _, srv := range look.srvLookups { + _, targets, err := look.resolver.LookupSRV(ctx, srv.Service, "tcp", srv.Domain) + if err != nil { + handleDNSError(err, srv) + // Skip to the next SRV lookup. + continue + } + if len(targets) <= 0 { + tempErrs = append(tempErrs, fmt.Errorf("0 targets resolved for service \"_%s._tcp.%s\"", srv.Service, srv.Domain)) + // Skip to the next SRV lookup. + continue + } + + for _, target := range targets { + host := strings.TrimRight(target.Target, ".") + if look.dnsAuthority != "" { + // Lookup A/AAAA records for the SRV target using the custom DNS + // authority. + hostAddrs, err := look.resolver.LookupHost(ctx, host) + if err != nil { + handleDNSError(err, srv) + // Skip to the next A/AAAA lookup. + continue + } + if len(hostAddrs) <= 0 { + tempErrs = append(tempErrs, fmt.Errorf("0 addrs resolved for target %q of service \"_%s._tcp.%s\"", host, srv.Service, srv.Domain)) + // Skip to the next A/AAAA lookup. + continue + } + // Use the first resolved IP address. + host = hostAddrs[0] + } + addr := fmt.Sprintf("%s:%d", host, target.Port) + nextAddrs[addr] = addr + } + } + + // Only return errors if we failed to resolve any shards. + if len(nextAddrs) <= 0 { + return errors.Join(tempErrs...), ErrNoShardsResolved + } + + // Some shards were resolved, update the Redis ring and discard all errors. + look.ring.SetAddrs(nextAddrs) + + // Update the Redis client metrics. + MustRegisterClientMetricsCollector(look.ring, look.stats, nextAddrs, look.ring.Options().Username) + + return nil, nil +} + +// start starts a goroutine that keeps the Redis ring shards up-to-date by +// periodically performing SRV lookups. +func (look *lookup) start() { + var lookupCtx context.Context + lookupCtx, look.stop = context.WithCancel(context.Background()) + go func() { + ticker := time.NewTicker(look.updateFrequency) + defer ticker.Stop() + for { + // Check for context cancellation before we do any work. + if lookupCtx.Err() != nil { + return + } + + timeoutCtx, cancel := context.WithTimeout(lookupCtx, look.updateTimeout) + tempErrs, nonTempErrs := look.updateNow(timeoutCtx) + cancel() + if tempErrs != nil { + look.logger.Warningf("resolving ring shards, temporary errors: %s", tempErrs) + continue + } + if nonTempErrs != nil { + look.logger.Errf("resolving ring shards, non-temporary errors: %s", nonTempErrs) + continue + } + + select { + case <-ticker.C: + continue + + case <-lookupCtx.Done(): + return + } + } + }() +} diff --git a/redis/lookup_test.go b/redis/lookup_test.go new file mode 100644 index 00000000000..d81870c9fe4 --- /dev/null +++ b/redis/lookup_test.go @@ -0,0 +1,247 @@ +package redis + +import ( + "context" + "testing" + "time" + + "github.com/letsencrypt/boulder/cmd" + blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/metrics" + "github.com/letsencrypt/boulder/test" + + "github.com/redis/go-redis/v9" +) + +func newTestRedisRing() *redis.Ring { + CACertFile := "../test/certs/ipki/minica.pem" + CertFile := "../test/certs/ipki/localhost/cert.pem" + KeyFile := "../test/certs/ipki/localhost/key.pem" + tlsConfig := cmd.TLSConfig{ + CACertFile: CACertFile, + CertFile: CertFile, + KeyFile: KeyFile, + } + tlsConfig2, err := tlsConfig.Load(metrics.NoopRegisterer) + if err != nil { + panic(err) + } + + client := redis.NewRing(&redis.RingOptions{ + Username: "boulder", + Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d", + TLSConfig: tlsConfig2, + }) + return client +} + +func TestNewLookup(t *testing.T) { + t.Parallel() + + logger := blog.NewMock() + ring := newTestRedisRing() + + _, err := newLookup([]cmd.ServiceDomain{ + { + Service: "redisratelimits", + Domain: "service.consul", + }, + }, + "consul.service.consul", + 250*time.Millisecond, + ring, + logger, + metrics.NoopRegisterer, + ) + test.AssertNotError(t, err, "Expected newLookup construction to succeed") +} + +func TestStart(t *testing.T) { + t.Parallel() + + logger := blog.NewMock() + ring := newTestRedisRing() + + lookup, err := newLookup([]cmd.ServiceDomain{ + { + Service: "redisratelimits", + Domain: "service.consul", + }, + }, + "consul.service.consul", + 250*time.Millisecond, + ring, + logger, + metrics.NoopRegisterer, + ) + test.AssertNotError(t, err, "Expected newLookup construction to succeed") + + lookup.start() + lookup.stop() +} + +func TestNewLookupWithOneFailingSRV(t *testing.T) { + t.Parallel() + + logger := blog.NewMock() + ring := newTestRedisRing() + + _, err := newLookup([]cmd.ServiceDomain{ + { + Service: "doesnotexist", + Domain: "service.consuls", + }, + { + Service: "redisratelimits", + Domain: "service.consul", + }, + }, + "consul.service.consul", + 250*time.Millisecond, + ring, + logger, + metrics.NoopRegisterer, + ) + test.AssertNotError(t, err, "Expected newLookup construction to succeed") +} + +func TestNewLookupWithAllFailingSRV(t *testing.T) { + t.Parallel() + + logger := blog.NewMock() + ring := newTestRedisRing() + + _, err := newLookup([]cmd.ServiceDomain{ + { + Service: "doesnotexist", + Domain: "service.consuls", + }, + { + Service: "doesnotexist2", + Domain: "service.consuls", + }, + }, + "consul.service.consul", + 250*time.Millisecond, + ring, + logger, + metrics.NoopRegisterer, + ) + test.AssertError(t, err, "Expected newLookup construction to fail") +} + +func TestUpdateNowWithAllFailingSRV(t *testing.T) { + t.Parallel() + + logger := blog.NewMock() + ring := newTestRedisRing() + + lookup, err := newLookup([]cmd.ServiceDomain{ + { + Service: "redisratelimits", + Domain: "service.consul", + }, + }, + "consul.service.consul", + 250*time.Millisecond, + ring, + logger, + metrics.NoopRegisterer, + ) + test.AssertNotError(t, err, "Expected newLookup construction to succeed") + + lookup.srvLookups = []cmd.ServiceDomain{ + { + Service: "doesnotexist1", + Domain: "service.consul", + }, + { + Service: "doesnotexist2", + Domain: "service.consul", + }, + } + + tempErr, nonTempErr := lookup.updateNow(t.Context()) + test.AssertNotError(t, tempErr, "Expected no temporary errors") + test.AssertError(t, nonTempErr, "Expected non-temporary errors to have occurred") +} + +func TestUpdateNowWithAllFailingSRVs(t *testing.T) { + t.Parallel() + + logger := blog.NewMock() + ring := newTestRedisRing() + + lookup, err := newLookup([]cmd.ServiceDomain{ + { + Service: "redisratelimits", + Domain: "service.consul", + }, + }, + "consul.service.consul", + 250*time.Millisecond, + ring, + logger, + metrics.NoopRegisterer, + ) + test.AssertNotError(t, err, "Expected newLookup construction to succeed") + + // Replace the dnsAuthority with a non-existent DNS server, this will cause + // a timeout error, which is technically a temporary error, but will + // eventually result in a non-temporary error when no shards are resolved. + lookup.dnsAuthority = "consuls.services.consuls:53" + + tempErr, nonTempErr := lookup.updateNow(t.Context()) + test.AssertError(t, tempErr, "Expected temporary errors") + test.AssertError(t, nonTempErr, "Expected a non-temporary error") + test.AssertErrorIs(t, nonTempErr, ErrNoShardsResolved) +} + +func TestUpdateNowWithOneFailingSRV(t *testing.T) { + t.Parallel() + + logger := blog.NewMock() + ring := newTestRedisRing() + + lookup, err := newLookup([]cmd.ServiceDomain{ + { + Service: "doesnotexist", + Domain: "service.consuls", + }, + { + Service: "redisratelimits", + Domain: "service.consul", + }, + }, + "consul.service.consul", + 250*time.Millisecond, + ring, + logger, + metrics.NoopRegisterer, + ) + test.AssertNotError(t, err, "Expected newLookup construction to succeed") + + // The Consul service entry for 'redisratelimits' is configured to return + // two SRV targets. We should only have two shards in the ring. + test.Assert(t, ring.Len() == 2, "Expected 2 shards in the ring") + + testCtx := t.Context() + + // Ensure we can reach both shards using the PING command. + err = ring.ForEachShard(testCtx, func(ctx context.Context, shard *redis.Client) error { + return shard.Ping(ctx).Err() + }) + test.AssertNotError(t, err, "Expected PING to succeed for both shards") + + // Drop both Shards from the ring. + ring.SetAddrs(map[string]string{}) + test.Assert(t, ring.Len() == 0, "Expected 0 shards in the ring") + + // Force a lookup to occur. + tempErr, nonTempErr := lookup.updateNow(testCtx) + test.AssertNotError(t, tempErr, "Expected no temporary errors") + test.AssertNotError(t, nonTempErr, "Expected no non-temporary errors") + + // The ring should now have two shards again. + test.Assert(t, ring.Len() == 2, "Expected 2 shards in the ring") +} diff --git a/redis/metrics.go b/redis/metrics.go new file mode 100644 index 00000000000..1a7c0487852 --- /dev/null +++ b/redis/metrics.go @@ -0,0 +1,103 @@ +package redis + +import ( + "errors" + "slices" + "strings" + + "github.com/prometheus/client_golang/prometheus" + "github.com/redis/go-redis/v9" +) + +// An interface satisfied by *redis.ClusterClient and also by a mock in our tests. +type poolStatGetter interface { + PoolStats() *redis.PoolStats +} + +var _ poolStatGetter = (*redis.ClusterClient)(nil) + +type metricsCollector struct { + statGetter poolStatGetter + + // Stats accessible from the go-redis connector: + // https://pkg.go.dev/github.com/go-redis/redis@v6.15.9+incompatible/internal/pool#Stats + lookups *prometheus.Desc + totalConns *prometheus.Desc + idleConns *prometheus.Desc + staleConns *prometheus.Desc +} + +// Describe is implemented with DescribeByCollect. That's possible because the +// Collect method will always return the same metrics with the same descriptors. +func (dbc metricsCollector) Describe(ch chan<- *prometheus.Desc) { + prometheus.DescribeByCollect(dbc, ch) +} + +// Collect first triggers the Redis ClusterClient's PoolStats function. +// Then it creates constant metrics for each Stats value on the fly based +// on the returned data. +// +// Note that Collect could be called concurrently, so we depend on PoolStats() +// to be concurrency-safe. +func (dbc metricsCollector) Collect(ch chan<- prometheus.Metric) { + writeGauge := func(stat *prometheus.Desc, val uint32, labelValues ...string) { + ch <- prometheus.MustNewConstMetric(stat, prometheus.GaugeValue, float64(val), labelValues...) + } + + stats := dbc.statGetter.PoolStats() + writeGauge(dbc.lookups, stats.Hits, "hit") + writeGauge(dbc.lookups, stats.Misses, "miss") + writeGauge(dbc.lookups, stats.Timeouts, "timeout") + writeGauge(dbc.totalConns, stats.TotalConns) + writeGauge(dbc.idleConns, stats.IdleConns) + writeGauge(dbc.staleConns, stats.StaleConns) +} + +// newClientMetricsCollector is broken out for testing purposes. +func newClientMetricsCollector(statGetter poolStatGetter, labels prometheus.Labels) metricsCollector { + return metricsCollector{ + statGetter: statGetter, + lookups: prometheus.NewDesc( + "redis_connection_pool_lookups", + "Number of lookups for a connection in the pool, labeled by hit/miss", + []string{"result"}, labels), + totalConns: prometheus.NewDesc( + "redis_connection_pool_total_conns", + "Number of total connections in the pool.", + nil, labels), + idleConns: prometheus.NewDesc( + "redis_connection_pool_idle_conns", + "Number of idle connections in the pool.", + nil, labels), + staleConns: prometheus.NewDesc( + "redis_connection_pool_stale_conns", + "Number of stale connections removed from the pool.", + nil, labels), + } +} + +// MustRegisterClientMetricsCollector registers a metrics collector for the +// given Redis client with the provided prometheus.Registerer. The collector +// will report metrics labelled by the provided addresses and username. If the +// collector is already registered, this function is a no-op. +func MustRegisterClientMetricsCollector(client poolStatGetter, stats prometheus.Registerer, addrs map[string]string, user string) { + var labelAddrs []string + for addr := range addrs { + labelAddrs = append(labelAddrs, addr) + } + // Keep the list of addresses sorted for consistency. + slices.Sort(labelAddrs) + labels := prometheus.Labels{ + "addresses": strings.Join(labelAddrs, ", "), + "user": user, + } + err := stats.Register(newClientMetricsCollector(client, labels)) + if err != nil { + are := prometheus.AlreadyRegisteredError{} + if errors.As(err, &are) { + // The collector is already registered using the same labels. + return + } + panic(err) + } +} diff --git a/redis/metrics_test.go b/redis/metrics_test.go new file mode 100644 index 00000000000..b67237ec9e9 --- /dev/null +++ b/redis/metrics_test.go @@ -0,0 +1,77 @@ +package redis + +import ( + "strings" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/redis/go-redis/v9" + + "github.com/letsencrypt/boulder/metrics" +) + +type mockPoolStatGetter struct{} + +var _ poolStatGetter = mockPoolStatGetter{} + +func (mockPoolStatGetter) PoolStats() *redis.PoolStats { + return &redis.PoolStats{ + Hits: 13, + Misses: 7, + Timeouts: 4, + TotalConns: 1000, + IdleConns: 500, + StaleConns: 10, + } +} + +func TestMetrics(t *testing.T) { + mets := newClientMetricsCollector(mockPoolStatGetter{}, + prometheus.Labels{ + "foo": "bar", + }) + // Check that it has the correct type to satisfy MustRegister + metrics.NoopRegisterer.MustRegister(mets) + + expectedMetrics := 6 + outChan := make(chan prometheus.Metric, expectedMetrics) + mets.Collect(outChan) + + results := make(map[string]bool) + for range expectedMetrics { + metric := <-outChan + t.Log(metric.Desc().String()) + results[metric.Desc().String()] = true + } + + expected := strings.Split( + `Desc{fqName: "redis_connection_pool_lookups", help: "Number of lookups for a connection in the pool, labeled by hit/miss", constLabels: {foo="bar"}, variableLabels: {result}} +Desc{fqName: "redis_connection_pool_lookups", help: "Number of lookups for a connection in the pool, labeled by hit/miss", constLabels: {foo="bar"}, variableLabels: {result}} +Desc{fqName: "redis_connection_pool_lookups", help: "Number of lookups for a connection in the pool, labeled by hit/miss", constLabels: {foo="bar"}, variableLabels: {result}} +Desc{fqName: "redis_connection_pool_total_conns", help: "Number of total connections in the pool.", constLabels: {foo="bar"}, variableLabels: {}} +Desc{fqName: "redis_connection_pool_idle_conns", help: "Number of idle connections in the pool.", constLabels: {foo="bar"}, variableLabels: {}} +Desc{fqName: "redis_connection_pool_stale_conns", help: "Number of stale connections removed from the pool.", constLabels: {foo="bar"}, variableLabels: {}}`, + "\n") + + for _, e := range expected { + if !results[e] { + t.Errorf("expected metrics to contain %q, but they didn't", e) + } + } + + if len(results) > len(expected) { + t.Errorf("expected metrics to contain %d entries, but they contained %d", + len(expected), len(results)) + } +} + +func TestMustRegisterClientMetricsCollector(t *testing.T) { + client := mockPoolStatGetter{} + stats := prometheus.NewRegistry() + // First registration should succeed. + MustRegisterClientMetricsCollector(client, stats, map[string]string{"foo": "bar"}, "baz") + // Duplicate registration should succeed. + MustRegisterClientMetricsCollector(client, stats, map[string]string{"foo": "bar"}, "baz") + // Registration with different label values should succeed. + MustRegisterClientMetricsCollector(client, stats, map[string]string{"f00": "b4r"}, "b4z") +} diff --git a/reloader/reloader.go b/reloader/reloader.go deleted file mode 100644 index d885af6302e..00000000000 --- a/reloader/reloader.go +++ /dev/null @@ -1,84 +0,0 @@ -// Package reloader provides a method to load a file whenever it changes. -package reloader - -import ( - "io/ioutil" - "os" - "time" -) - -// Wrap time.Tick so we can override it in tests. -var makeTicker = func() (func(), <-chan time.Time) { - t := time.NewTicker(1 * time.Second) - return t.Stop, t.C -} - -// Reloader represents an ongoing reloader task. -type Reloader struct { - stopChan chan<- struct{} -} - -// Stop stops an active reloader, release its resources. -func (r *Reloader) Stop() { - r.stopChan <- struct{}{} -} - -// A pointer we can override for testing. -var readFile = ioutil.ReadFile - -// New loads the filename provided, and calls the callback. It then spawns a -// goroutine to check for updates to that file, calling the callback again with -// any new contents. The first load, and the first call to callback, are run -// synchronously, so it is easy for the caller to check for errors and fail -// fast. New will return an error if it occurs on the first load. Otherwise all -// errors are sent to the callback. -func New(filename string, dataCallback func([]byte) error, errorCallback func(error)) (*Reloader, error) { - if errorCallback == nil { - errorCallback = func(e error) {} - } - fileInfo, err := os.Stat(filename) - if err != nil { - return nil, err - } - b, err := readFile(filename) - if err != nil { - return nil, err - } - stopChan := make(chan struct{}) - tickerStop, tickChan := makeTicker() - loop := func() { - for { - select { - case <-stopChan: - tickerStop() - return - case <-tickChan: - currentFileInfo, err := os.Stat(filename) - if err != nil { - errorCallback(err) - continue - } - if !currentFileInfo.ModTime().After(fileInfo.ModTime()) { - continue - } - b, err := readFile(filename) - if err != nil { - errorCallback(err) - continue - } - fileInfo = currentFileInfo - err = dataCallback(b) - if err != nil { - errorCallback(err) - } - } - } - } - err = dataCallback(b) - if err != nil { - tickerStop() - return nil, err - } - go loop() - return &Reloader{stopChan}, nil -} diff --git a/reloader/reloader_test.go b/reloader/reloader_test.go deleted file mode 100644 index 21786ae9d6f..00000000000 --- a/reloader/reloader_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package reloader - -import ( - "fmt" - "io/ioutil" - "os" - "reflect" - "testing" - "time" -) - -func noop([]byte) error { - return nil -} - -func testErrCb(t *testing.T) func(error) { - return func(e error) { - t.Error(e) - } -} - -func testFatalCb(t *testing.T) func(error) { - return func(e error) { - t.Fatal(e) - } -} - -func TestNoStat(t *testing.T) { - filename := os.TempDir() + "/doesntexist.123456789" - _, err := New(filename, noop, testErrCb(t)) - if err == nil { - t.Fatalf("Expected New to return error when the file doesn't exist.") - } -} - -func TestNoRead(t *testing.T) { - f, _ := ioutil.TempFile("", "test-no-read.txt") - defer os.Remove(f.Name()) - oldReadFile := readFile - readFile = func(string) ([]byte, error) { - return nil, fmt.Errorf("read failed") - } - _, err := New(f.Name(), noop, testErrCb(t)) - if err == nil { - t.Fatalf("Expected New to return error when permission denied.") - readFile = oldReadFile - } - readFile = oldReadFile -} - -func TestFirstError(t *testing.T) { - f, _ := ioutil.TempFile("", "test-first-error.txt") - defer os.Remove(f.Name()) - _, err := New(f.Name(), func([]byte) error { - return fmt.Errorf("i die") - }, testErrCb(t)) - if err == nil { - t.Fatalf("Expected New to return error when the callback returned error the first time.") - } -} - -func TestFirstSuccess(t *testing.T) { - f, _ := ioutil.TempFile("", "test-first-success.txt") - defer os.Remove(f.Name()) - r, err := New(f.Name(), func([]byte) error { - return nil - }, testErrCb(t)) - if err != nil { - t.Errorf("Expected New to succeed, got %s", err) - } - r.Stop() -} - -// Override makeTicker for testing. -// Returns a channel on which to send artificial ticks, and a function to -// restore the default makeTicker. -func makeFakeMakeTicker() (chan<- time.Time, func()) { - origMakeTicker := makeTicker - fakeTickChan := make(chan time.Time) - makeTicker = func() (func(), <-chan time.Time) { - return func() {}, fakeTickChan - } - return fakeTickChan, func() { - makeTicker = origMakeTicker - } -} - -func TestReload(t *testing.T) { - // Mock out makeTicker - fakeTick, restoreMakeTicker := makeFakeMakeTicker() - defer restoreMakeTicker() - - f, _ := ioutil.TempFile("", "test-reload.txt") - filename := f.Name() - defer os.Remove(filename) - - _, _ = f.Write([]byte("first body")) - _ = f.Close() - - var bodies []string - reloads := make(chan []byte, 1) - r, err := New(filename, func(b []byte) error { - bodies = append(bodies, string(b)) - reloads <- b - return nil - }, testFatalCb(t)) - if err != nil { - t.Fatalf("Expected New to succeed, got %s", err) - } - defer r.Stop() - expected := []string{"first body"} - if !reflect.DeepEqual(bodies, expected) { - t.Errorf("Expected bodies = %#v, got %#v", expected, bodies) - } - fakeTick <- time.Now() - <-reloads - if !reflect.DeepEqual(bodies, expected) { - t.Errorf("Expected bodies = %#v, got %#v", expected, bodies) - } - - // Write to the file, expect a reload. Sleep a few milliseconds first so the - // timestamps actually differ. - time.Sleep(1 * time.Second) - err = ioutil.WriteFile(filename, []byte("second body"), 0644) - if err != nil { - t.Fatal(err) - } - fakeTick <- time.Now() - <-reloads - expected = []string{"first body", "second body"} - if !reflect.DeepEqual(bodies, expected) { - t.Errorf("Expected bodies = %#v, got %#v", expected, bodies) - } - - // Send twice on this blocking channel to make sure we go through at least on - // iteration of the reloader's loop. - fakeTick <- time.Now() - fakeTick <- time.Now() - if !reflect.DeepEqual(bodies, expected) { - t.Errorf("Expected bodies = %#v, got %#v", expected, bodies) - } -} - -func TestReloadFailure(t *testing.T) { - // Mock out makeTicker - fakeTick, restoreMakeTicker := makeFakeMakeTicker() - - f, _ := ioutil.TempFile("", "test-reload-failure.txt") - filename := f.Name() - defer func() { - restoreMakeTicker() - _ = os.Remove(filename) - }() - - _, _ = f.Write([]byte("first body")) - _ = f.Close() - - type res struct { - b []byte - err error - } - - reloads := make(chan res, 1) - _, err := New(filename, func(b []byte) error { - reloads <- res{b, nil} - return nil - }, func(e error) { - reloads <- res{nil, e} - }) - if err != nil { - t.Fatalf("Expected New to succeed.") - } - <-reloads - os.Remove(filename) - fakeTick <- time.Now() - select { - case r := <-reloads: - if r.err == nil { - t.Errorf("Expected error trying to read missing file.") - } - case <-time.After(5 * time.Second): - t.Errorf("timed out waiting for reload") - } - - time.Sleep(1 * time.Second) - // Create a file with no permissions - oldReadFile := readFile - readFile = func(string) ([]byte, error) { - return nil, fmt.Errorf("permission denied") - } - - fakeTick <- time.Now() - select { - case r := <-reloads: - if r.err == nil { - t.Errorf("Expected error trying to read file with no permissions.") - } - case <-time.After(5 * time.Second): - t.Fatalf("timed out waiting for reload") - } - readFile = oldReadFile - - err = ioutil.WriteFile(filename, []byte("third body"), 0644) - if err != nil { - t.Fatal(err) - } - fakeTick <- time.Now() - select { - case r := <-reloads: - if r.err != nil { - t.Errorf("Unexpected error: %s", err) - } - if string(r.b) != "third body" { - t.Errorf("Expected 'third body' reading file after restoring it.") - } - return - case <-time.After(5 * time.Second): - t.Fatalf("timed out waiting for successful reload") - } -} diff --git a/revocation/reasons.go b/revocation/reasons.go index a5b3f0807a9..8c08014d50a 100644 --- a/revocation/reasons.go +++ b/revocation/reasons.go @@ -2,73 +2,93 @@ package revocation import ( "fmt" - "sort" - "strings" - - "golang.org/x/crypto/ocsp" ) // Reason is used to specify a certificate revocation reason -type Reason int +type Reason int64 -// ReasonToString provides a map from reason code to string -var ReasonToString = map[Reason]string{ - ocsp.Unspecified: "unspecified", - ocsp.KeyCompromise: "keyCompromise", - ocsp.CACompromise: "cACompromise", - ocsp.AffiliationChanged: "affiliationChanged", - ocsp.Superseded: "superseded", - ocsp.CessationOfOperation: "cessationOfOperation", - ocsp.CertificateHold: "certificateHold", +// The enumerated reasons for revoking a certificate. See RFC 5280: +// https://datatracker.ietf.org/doc/html/rfc5280#section-5.3.1. +const ( + Unspecified Reason = 0 + KeyCompromise Reason = 1 + CACompromise Reason = 2 + AffiliationChanged Reason = 3 + Superseded Reason = 4 + CessationOfOperation Reason = 5 + CertificateHold Reason = 6 // 7 is unused - ocsp.RemoveFromCRL: "removeFromCRL", - ocsp.PrivilegeWithdrawn: "privilegeWithdrawn", - ocsp.AACompromise: "aAcompromise", -} + RemoveFromCRL Reason = 8 + PrivilegeWithdrawn Reason = 9 + AACompromise Reason = 10 +) -// UserAllowedReasons contains the subset of Reasons which users are -// allowed to use -var UserAllowedReasons = map[Reason]struct{}{ - ocsp.Unspecified: {}, - ocsp.KeyCompromise: {}, - ocsp.AffiliationChanged: {}, - ocsp.Superseded: {}, - ocsp.CessationOfOperation: {}, +// reasonToString provides a map from reason code to string. It is unexported +// to make it immutable. +var reasonToString = map[Reason]string{ + Unspecified: "unspecified", + KeyCompromise: "keyCompromise", + CACompromise: "cACompromise", + AffiliationChanged: "affiliationChanged", + Superseded: "superseded", + CessationOfOperation: "cessationOfOperation", + CertificateHold: "certificateHold", + RemoveFromCRL: "removeFromCRL", + PrivilegeWithdrawn: "privilegeWithdrawn", + AACompromise: "aAcompromise", } -// AdminAllowedReasons contains the subset of Reasons which admins are allowed -// to use. Reasons not found here will soon be forbidden from appearing in CRLs -// or OCSP responses by root programs. -var AdminAllowedReasons = map[Reason]struct{}{ - ocsp.Unspecified: {}, - ocsp.KeyCompromise: {}, - ocsp.AffiliationChanged: {}, - ocsp.Superseded: {}, - ocsp.CessationOfOperation: {}, - ocsp.PrivilegeWithdrawn: {}, +// String converts a revocation reason code (such as 0) into its corresponding +// reason string (e.g. "unspecified"). +// +// The receiver *must* be one of the valid reason code constants defined in this +// package: this method will panic if called on an invalid Reason. It is +// expected that this method is only called on const Reasons, or after a call to +// UserAllowedReason or AdminAllowedReason. +func (r Reason) String() string { + res, ok := reasonToString[r] + if !ok { + panic(fmt.Errorf("unrecognized revocation code %d", r)) + } + return res } -// UserAllowedReasonsMessage contains a string describing a list of user allowed -// revocation reasons. This is useful when a revocation is rejected because it -// is not a valid user supplied reason and the allowed values must be -// communicated. This variable is populated during package initialization. -var UserAllowedReasonsMessage = "" +// StringToReason converts a revocation reason string (such as "keyCompromise") +// into the corresponding integer reason code (e.g. 1). +func StringToReason(s string) (Reason, error) { + for code, str := range reasonToString { + if s == str { + return code, nil + } + } + return 0, fmt.Errorf("unrecognized revocation reason %q", s) +} -func init() { - // Build a slice of ints from the allowed reason codes. - // We want a slice because iterating `UserAllowedReasons` will change order - // and make the message unpredictable and cumbersome for unit testing. - // We use []ints instead of []Reason to use `sort.Ints` without fuss. - var allowed []int - for reason := range UserAllowedReasons { - allowed = append(allowed, int(reason)) +// UserAllowedReason returns true if the given Reason is in the subset of +// Reasons which users are allowed to request. +func UserAllowedReason(r Reason) bool { + switch r { + case Unspecified, + KeyCompromise, + Superseded, + CessationOfOperation: + return true } - sort.Ints(allowed) + return false +} - var reasonStrings []string - for _, reason := range allowed { - reasonStrings = append(reasonStrings, fmt.Sprintf("%s (%d)", - ReasonToString[Reason(reason)], reason)) +// AdminAllowedReason returns true if the given Reason is in the subset of +// Reasons which admins (i.e. people acting in CA Trusted Roles) are allowed +// to request. Reasons which do *not* appear here are those which are defined +// by RFC 5280 but are disallowed by the Baseline Requirements. +func AdminAllowedReason(r Reason) bool { + switch r { + case Unspecified, + KeyCompromise, + Superseded, + CessationOfOperation, + PrivilegeWithdrawn: + return true } - UserAllowedReasonsMessage = strings.Join(reasonStrings, ", ") + return false } diff --git a/rocsp/config/issuers_test.go b/rocsp/config/issuers_test.go deleted file mode 100644 index 3c28e3bf5c5..00000000000 --- a/rocsp/config/issuers_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package rocsp_config - -import ( - "encoding/hex" - "strings" - "testing" - - "github.com/letsencrypt/boulder/test" - "golang.org/x/crypto/ocsp" -) - -func TestLoadIssuers(t *testing.T) { - input := map[string]int{ - "../../test/hierarchy/int-e1.cert.pem": 23, - "../../test/hierarchy/int-r3.cert.pem": 99, - } - output, err := LoadIssuers(input) - if err != nil { - t.Fatal(err) - } - - var e1 *ShortIDIssuer - var r3 *ShortIDIssuer - - for i, v := range output { - if strings.Contains(v.Certificate.Subject.String(), "E1") { - e1 = &output[i] - } - if strings.Contains(v.Certificate.Subject.String(), "R3") { - r3 = &output[i] - } - } - - test.AssertEquals(t, e1.Subject.String(), "CN=(TEST) Elegant Elephant E1,O=Boulder Test,C=XX") - test.AssertEquals(t, r3.Subject.String(), "CN=(TEST) Radical Rhino R3,O=Boulder Test,C=XX") - test.AssertEquals(t, e1.shortID, uint8(23)) - test.AssertEquals(t, r3.shortID, uint8(99)) -} - -func TestFindIssuerByName(t *testing.T) { - input := map[string]int{ - "../../test/hierarchy/int-e1.cert.pem": 23, - "../../test/hierarchy/int-r3.cert.pem": 99, - } - issuers, err := LoadIssuers(input) - if err != nil { - t.Fatal(err) - } - - elephant, err := hex.DecodeString("3049310b300906035504061302585831153013060355040a130c426f756c6465722054657374312330210603550403131a28544553542920456c6567616e7420456c657068616e74204531") - if err != nil { - t.Fatal(err) - } - rhino, err := hex.DecodeString("3046310b300906035504061302585831153013060355040a130c426f756c64657220546573743120301e06035504031317285445535429205261646963616c205268696e6f205233") - if err != nil { - t.Fatal(err) - } - - ocspResp := &ocsp.Response{ - RawResponderName: elephant, - } - - issuer, err := FindIssuerByName(ocspResp, issuers) - if err != nil { - t.Fatalf("couldn't find issuer: %s", err) - } - - test.AssertEquals(t, issuer.shortID, uint8(23)) - - ocspResp = &ocsp.Response{ - RawResponderName: rhino, - } - - issuer, err = FindIssuerByName(ocspResp, issuers) - if err != nil { - t.Fatalf("couldn't find issuer: %s", err) - } - - test.AssertEquals(t, issuer.shortID, uint8(99)) -} - -func TestFindIssuerByID(t *testing.T) { - input := map[string]int{ - "../../test/hierarchy/int-e1.cert.pem": 23, - "../../test/hierarchy/int-r3.cert.pem": 99, - } - issuers, err := LoadIssuers(input) - if err != nil { - t.Fatal(err) - } - - // an IssuerNameID - issuer, err := FindIssuerByID(66283756913588288, issuers) - if err != nil { - t.Fatalf("couldn't find issuer: %s", err) - } - test.AssertEquals(t, issuer.shortID, uint8(23)) - - // an IssuerID - issuer, err = FindIssuerByID(2823400738, issuers) - if err != nil { - t.Fatalf("couldn't find issuer: %s", err) - } - test.AssertEquals(t, issuer.shortID, uint8(23)) - - // an IssuerNameID - issuer, err = FindIssuerByID(58923463773186183, issuers) - if err != nil { - t.Fatalf("couldn't find issuer: %s", err) - } - test.AssertEquals(t, issuer.shortID, uint8(99)) - - // an IssuerID - issuer, err = FindIssuerByID(2890189813, issuers) - if err != nil { - t.Fatalf("couldn't find issuer: %s", err) - } - test.AssertEquals(t, issuer.shortID, uint8(99)) -} diff --git a/rocsp/config/rocsp_config.go b/rocsp/config/rocsp_config.go deleted file mode 100644 index 0065d8acd21..00000000000 --- a/rocsp/config/rocsp_config.go +++ /dev/null @@ -1,226 +0,0 @@ -package rocsp_config - -import ( - "bytes" - "crypto/x509/pkix" - "encoding/asn1" - "fmt" - "strings" - - "github.com/go-redis/redis/v8" - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/issuance" - "github.com/letsencrypt/boulder/rocsp" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" -) - -// RedisConfig contains the configuration needed to act as a Redis client. -type RedisConfig struct { - // PasswordFile is a file containing the password for the Redis user. - cmd.PasswordConfig - // TLS contains the configuration to speak TLS with Redis. - TLS cmd.TLSConfig - // Username is a Redis username. - Username string - // Addrs is a list of IP address:port pairs. - Addrs []string - // Timeout is a per-request timeout applied to all Redis requests. - Timeout cmd.ConfigDuration - - // Maximum number of retries before giving up. - // Default is to not retry failed commands. - MaxRetries int - // Minimum backoff between each retry. - // Default is 8 milliseconds; -1 disables backoff. - MinRetryBackoff cmd.ConfigDuration - // Maximum backoff between each retry. - // Default is 512 milliseconds; -1 disables backoff. - MaxRetryBackoff cmd.ConfigDuration - - // Dial timeout for establishing new connections. - // Default is 5 seconds. - DialTimeout cmd.ConfigDuration - // Timeout for socket reads. If reached, commands will fail - // with a timeout instead of blocking. Use value -1 for no timeout and 0 for default. - // Default is 3 seconds. - ReadTimeout cmd.ConfigDuration - // Timeout for socket writes. If reached, commands will fail - // with a timeout instead of blocking. - // Default is ReadTimeout. - WriteTimeout cmd.ConfigDuration - - // Maximum number of socket connections. - // Default is 10 connections per every CPU as reported by runtime.NumCPU. - PoolSize int - // Minimum number of idle connections which is useful when establishing - // new connection is slow. - MinIdleConns int - // Connection age at which client retires (closes) the connection. - // Default is to not close aged connections. - MaxConnAge cmd.ConfigDuration - // Amount of time client waits for connection if all connections - // are busy before returning an error. - // Default is ReadTimeout + 1 second. - PoolTimeout cmd.ConfigDuration - // Amount of time after which client closes idle connections. - // Should be less than server's timeout. - // Default is 5 minutes. -1 disables idle timeout check. - IdleTimeout cmd.ConfigDuration - // Frequency of idle checks made by idle connections reaper. - // Default is 1 minute. -1 disables idle connections reaper, - // but idle connections are still discarded by the client - // if IdleTimeout is set. - IdleCheckFrequency cmd.ConfigDuration -} - -// MakeClient produces a *rocsp.WritingClient from a config. -func MakeClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer) (*rocsp.WritingClient, error) { - password, err := c.PasswordConfig.Pass() - if err != nil { - return nil, fmt.Errorf("loading password: %w", err) - } - - tlsConfig, err := c.TLS.Load() - if err != nil { - return nil, fmt.Errorf("loading TLS config: %w", err) - } - - timeout := c.Timeout.Duration - - rdb := redis.NewClusterClient(&redis.ClusterOptions{ - Addrs: c.Addrs, - Username: c.Username, - Password: password, - TLSConfig: tlsConfig, - - MaxRetries: c.MaxRetries, - MinRetryBackoff: c.MinRetryBackoff.Duration, - MaxRetryBackoff: c.MaxRetryBackoff.Duration, - DialTimeout: c.DialTimeout.Duration, - ReadTimeout: c.ReadTimeout.Duration, - WriteTimeout: c.WriteTimeout.Duration, - - PoolSize: c.PoolSize, - MinIdleConns: c.MinIdleConns, - MaxConnAge: c.MaxConnAge.Duration, - PoolTimeout: c.PoolTimeout.Duration, - IdleTimeout: c.IdleTimeout.Duration, - IdleCheckFrequency: c.IdleCheckFrequency.Duration, - }) - return rocsp.NewWritingClient(rdb, timeout, clk, stats), nil -} - -// MakeReadClient produces a *rocsp.Client from a config. -func MakeReadClient(c *RedisConfig, clk clock.Clock, stats prometheus.Registerer) (*rocsp.Client, error) { - password, err := c.PasswordConfig.Pass() - if err != nil { - return nil, fmt.Errorf("loading password: %w", err) - } - - tlsConfig, err := c.TLS.Load() - if err != nil { - return nil, fmt.Errorf("loading TLS config: %w", err) - } - - timeout := c.Timeout.Duration - - rdb := redis.NewClusterClient(&redis.ClusterOptions{ - Addrs: c.Addrs, - Username: c.Username, - Password: password, - TLSConfig: tlsConfig, - - MaxRetries: c.MaxRetries, - MinRetryBackoff: c.MinRetryBackoff.Duration, - MaxRetryBackoff: c.MaxRetryBackoff.Duration, - DialTimeout: c.DialTimeout.Duration, - ReadTimeout: c.ReadTimeout.Duration, - - PoolSize: c.PoolSize, - MinIdleConns: c.MinIdleConns, - MaxConnAge: c.MaxConnAge.Duration, - PoolTimeout: c.PoolTimeout.Duration, - IdleTimeout: c.IdleTimeout.Duration, - IdleCheckFrequency: c.IdleCheckFrequency.Duration, - }) - return rocsp.NewClient(rdb, timeout, clk, stats), nil -} - -// A ShortIDIssuer combines an issuance.Certificate with some fields necessary -// to process OCSP responses: the subject name and the shortID. -type ShortIDIssuer struct { - *issuance.Certificate - subject pkix.RDNSequence - shortID byte -} - -// LoadIssuers takes a map where the keys are filenames and the values are the -// corresponding short issuer ID. It loads issuer certificates from the given -// files and produces a []ShortIDIssuer. -func LoadIssuers(input map[string]int) ([]ShortIDIssuer, error) { - var issuers []ShortIDIssuer - for issuerFile, shortID := range input { - if shortID > 255 || shortID < 0 { - return nil, fmt.Errorf("invalid shortID %d (must be byte)", shortID) - } - cert, err := issuance.LoadCertificate(issuerFile) - if err != nil { - return nil, fmt.Errorf("reading issuer: %w", err) - } - var subject pkix.RDNSequence - _, err = asn1.Unmarshal(cert.Certificate.RawSubject, &subject) - if err != nil { - return nil, fmt.Errorf("parsing issuer.RawSubject: %w", err) - } - shortID := byte(shortID) - for _, issuer := range issuers { - if issuer.shortID == shortID { - return nil, fmt.Errorf("duplicate shortID '%d' in (for %q and %q) in config file", shortID, issuer.subject, subject) - } - if !issuer.IsCA { - return nil, fmt.Errorf("certificate for %q is not a CA certificate", subject) - } - } - issuers = append(issuers, ShortIDIssuer{ - Certificate: cert, - subject: subject, - shortID: shortID, - }) - } - return issuers, nil -} - -// ShortID returns the short ID of an issuer. The short ID is a single byte that -// is unique for that issuer. -func (si *ShortIDIssuer) ShortID() byte { - return si.shortID -} - -// FindIssuerByID returns the issuer that matches the given IssuerID or IssuerNameID. -func FindIssuerByID(longID int64, issuers []ShortIDIssuer) (*ShortIDIssuer, error) { - for _, iss := range issuers { - if iss.NameID() == issuance.IssuerNameID(longID) || iss.ID() == issuance.IssuerID(longID) { - return &iss, nil - } - } - return nil, fmt.Errorf("no issuer found for an ID in certificateStatus: %d", longID) -} - -// FindIssuerByName returns the issuer with a Subject matching the *ocsp.Response. -func FindIssuerByName(resp *ocsp.Response, issuers []ShortIDIssuer) (*ShortIDIssuer, error) { - var responder pkix.RDNSequence - _, err := asn1.Unmarshal(resp.RawResponderName, &responder) - if err != nil { - return nil, fmt.Errorf("parsing resp.RawResponderName: %w", err) - } - var responders strings.Builder - for _, issuer := range issuers { - fmt.Fprintf(&responders, "%s\n", issuer.subject) - if bytes.Equal(issuer.RawSubject, resp.RawResponderName) { - return &issuer, nil - } - } - return nil, fmt.Errorf("no issuer found matching OCSP response for %s. Available issuers:\n%s\n", responder, responders.String()) -} diff --git a/rocsp/metrics.go b/rocsp/metrics.go deleted file mode 100644 index e63da78c8ed..00000000000 --- a/rocsp/metrics.go +++ /dev/null @@ -1,48 +0,0 @@ -package rocsp - -import ( - "github.com/go-redis/redis/v8" - "github.com/prometheus/client_golang/prometheus" -) - -type metricsCollector struct { - rdb *redis.ClusterClient - - // Stats accessible from the go-redis connector: - // https://pkg.go.dev/github.com/go-redis/redis@v6.15.9+incompatible/internal/pool#Stats - hits *prometheus.Desc - misses *prometheus.Desc - timeouts *prometheus.Desc - totalConns *prometheus.Desc - idleConns *prometheus.Desc - staleConns *prometheus.Desc -} - -// Describe is implemented with DescribeByCollect. That's possible because the -// Collect method will always return the same metrics with the same descriptors. -func (dbc metricsCollector) Describe(ch chan<- *prometheus.Desc) { - prometheus.DescribeByCollect(dbc, ch) -} - -// Collect first triggers the Redis ClusterClient's PoolStats function. -// Then it creates constant metrics for each Stats value on the fly based -// on the returned data. -// -// Note that Collect could be called concurrently, so we depend on PoolStats() -// to be concurrency-safe. -func (dbc metricsCollector) Collect(ch chan<- prometheus.Metric) { - writeStat := func(stat *prometheus.Desc, typ prometheus.ValueType, val float64) { - ch <- prometheus.MustNewConstMetric(stat, typ, val) - } - writeGauge := func(stat *prometheus.Desc, val float64) { - writeStat(stat, prometheus.GaugeValue, val) - } - - stats := dbc.rdb.PoolStats() - writeGauge(dbc.hits, float64(stats.Hits)) - writeGauge(dbc.misses, float64(stats.Misses)) - writeGauge(dbc.timeouts, float64(stats.Timeouts)) - writeGauge(dbc.totalConns, float64(stats.TotalConns)) - writeGauge(dbc.idleConns, float64(stats.IdleConns)) - writeGauge(dbc.staleConns, float64(stats.StaleConns)) -} diff --git a/rocsp/mocks.go b/rocsp/mocks.go deleted file mode 100644 index f0b7991f2d3..00000000000 --- a/rocsp/mocks.go +++ /dev/null @@ -1,30 +0,0 @@ -package rocsp - -import ( - "context" - "fmt" - "time" -) - -// MockWriteClient is a mock -type MockWriteClient struct { - StoreReponseReturnError error -} - -// StoreResponse mocks a rocsp.StoreResponse method and returns nil or an -// error depending on the desired state. -func (r MockWriteClient) StoreResponse(ctx context.Context, respBytes []byte, shortIssuerID byte, ttl time.Duration) error { - return r.StoreReponseReturnError -} - -// NewMockWriteSucceedClient returns a mock MockWriteClient with a -// StoreResponse method that will always succeed. -func NewMockWriteSucceedClient() MockWriteClient { - return MockWriteClient{nil} -} - -// NewMockWriteFailClient returns a mock MockWriteClient with a -// StoreResponse method that will always fail. -func NewMockWriteFailClient() MockWriteClient { - return MockWriteClient{StoreReponseReturnError: fmt.Errorf("could not store response")} -} diff --git a/rocsp/rocsp.go b/rocsp/rocsp.go deleted file mode 100644 index e2b90d71c68..00000000000 --- a/rocsp/rocsp.go +++ /dev/null @@ -1,378 +0,0 @@ -package rocsp - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "time" - - "github.com/go-redis/redis/v8" - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/core" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" -) - -var ErrRedisNotFound = errors.New("redis key not found") - -// Metadata represents information stored with the 'm' prefix in the Redis DB: -// information required to maintain or serve the response, but not the response -// itself. -type Metadata struct { - ShortIssuerID byte - // ThisUpdate contains the ThisUpdate time of the stored OCSP response. - ThisUpdate time.Time -} - -// String implements pretty-printing of Metadata -func (m Metadata) String() string { - return fmt.Sprintf("shortIssuerID: 0x%x, updated at: %s", m.ShortIssuerID, m.ThisUpdate) -} - -// Marshal turns a metadata into a slice of 9 bytes for writing into Redis. -// Storing these always as 9 bytes gives us some potential to change the -// storage format non-disruptively in the future, so long as we can distinguish -// on the length of the stored value. -func (m Metadata) Marshal() []byte { - var output [9]byte - output[0] = m.ShortIssuerID - epochSeconds := uint64(m.ThisUpdate.Unix()) - binary.LittleEndian.PutUint64(output[1:], epochSeconds) - return output[:] -} - -// UnmarshalMetadata takes data from Redis and turns it into a Metadata object. -func UnmarshalMetadata(input []byte) (Metadata, error) { - if len(input) != 9 { - return Metadata{}, fmt.Errorf("invalid metadata length %d", len(input)) - } - var output Metadata - output.ShortIssuerID = input[0] - epochSeconds := binary.LittleEndian.Uint64(input[1:]) - output.ThisUpdate = time.Unix(int64(epochSeconds), 0).UTC() - return output, nil -} - -// MakeResponseKey generates a Redis key string under which a response with the -// given serial should be stored. -func MakeResponseKey(serial string) string { - return fmt.Sprintf("r{%s}", serial) -} - -// MakeMetadataKey generates a Redis key string under which metadata for the -// response with the given serial should be stored. -func MakeMetadataKey(serial string) string { - return fmt.Sprintf("m{%s}", serial) -} - -func SerialFromResponseKey(key string) (string, error) { - if len(key) != 39 || key[0:2] != "r{" || key[38:39] != "}" { - return "", fmt.Errorf("malformed Redis OCSP response key %q", key) - } - return key[2:38], nil -} - -func SerialFromMetadataKey(key string) (string, error) { - if len(key) != 39 || key[0:2] != "m{" || key[38:39] != "}" { - return "", fmt.Errorf("malformed Redis OCSP metadata key %q", key) - } - return key[2:38], nil -} - -// Client represents a read-only Redis client. -type Client struct { - rdb *redis.ClusterClient - timeout time.Duration - clk clock.Clock - rdc metricsCollector - getLatency *prometheus.HistogramVec -} - -// NewClient creates a Client. The timeout applies to all requests, though a shorter timeout can be -// applied on a per-request basis using context.Context. -func NewClient( - rdb *redis.ClusterClient, - timeout time.Duration, - clk clock.Clock, - stats prometheus.Registerer, -) *Client { - dbc := metricsCollector{rdb: rdb} - - labels := prometheus.Labels{"address": rdb.Options().Addrs[0], "user": rdb.Options().Username} - dbc.hits = prometheus.NewDesc( - "redis_hits", - "Number of times free connection was found in the pool.", - nil, labels) - dbc.misses = prometheus.NewDesc( - "redis_misses", - "Number of times free connection was NOT found in the pool.", - nil, labels) - dbc.timeouts = prometheus.NewDesc( - "redis_timeouts", - "Number of times a wait timeout occurred.", - nil, labels) - dbc.totalConns = prometheus.NewDesc( - "redis_total_conns", - "Number of total connections in the pool.", - nil, labels) - dbc.idleConns = prometheus.NewDesc( - "redis_idle_conns", - "Number of idle connections in the pool.", - nil, labels) - dbc.staleConns = prometheus.NewDesc( - "redis_stale_conns", - "Number of stale connections removed from the pool.", - nil, labels) - stats.MustRegister(dbc) - getLatency := prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rocsp_get_latency", - Help: "Histogram of latencies of rocsp.GetResponse and rocsp.GetMetadata calls with result and method labels", - }, - []string{"result", "method"}, - ) - stats.MustRegister(getLatency) - - return &Client{ - rdb: rdb, - timeout: timeout, - clk: clk, - rdc: dbc, - getLatency: getLatency, - } -} - -// WritingClient represents a Redis client that can both read and write. -type WritingClient struct { - *Client - storeResponseLatency *prometheus.HistogramVec -} - -// NewWritingClient creates a WritingClient. -func NewWritingClient(rdb *redis.ClusterClient, timeout time.Duration, clk clock.Clock, stats prometheus.Registerer) *WritingClient { - storeResponseLatency := prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rocsp_store_response_latency", - Help: "Histogram of latencies of rocsp.StoreResponse calls with result labels", - }, - []string{"result"}, - ) - stats.MustRegister(storeResponseLatency) - return &WritingClient{NewClient(rdb, timeout, clk, stats), storeResponseLatency} -} - -// StoreResponse parses the given bytes as an OCSP response, and stores it -// into Redis, updating both the metadata and response keys. ShortIssuerID -// is an arbitrarily assigned byte that unique identifies each issuer. -// Must be the same across OCSP components. Returns error if the OCSP -// response fails to parse. -func (c *WritingClient) StoreResponse(ctx context.Context, respBytes []byte, shortIssuerID byte, ttl time.Duration) error { - start := c.clk.Now() - ctx, cancel := context.WithTimeout(ctx, c.timeout) - defer cancel() - - resp, err := ocsp.ParseResponse(respBytes, nil) - if err != nil { - return fmt.Errorf("parsing %d-byte response: %w", len(respBytes), err) - } - - serial := core.SerialToString(resp.SerialNumber) - - responseKey := MakeResponseKey(serial) - metadataKey := MakeMetadataKey(serial) - - metadataStruct := Metadata{ - ThisUpdate: resp.ThisUpdate, - ShortIssuerID: shortIssuerID, - } - metadataValue := metadataStruct.Marshal() - - err = c.rdb.Watch(ctx, func(tx *redis.Tx) error { - err := tx.Set(ctx, responseKey, respBytes, ttl).Err() - if err != nil { - return fmt.Errorf("setting response: %w", err) - } - - err = tx.Set(ctx, metadataKey, metadataValue, ttl).Err() - if err != nil { - return fmt.Errorf("setting metadata: %w", err) - } - - return nil - }, metadataKey, responseKey) - if err != nil { - state := "failed" - if errors.Is(err, context.DeadlineExceeded) { - state = "deadlineExceeded" - } else if errors.Is(err, context.Canceled) { - state = "canceled" - } - c.storeResponseLatency.With(prometheus.Labels{"result": state}).Observe(time.Since(start).Seconds()) - return fmt.Errorf("transaction failed: %w", err) - } - - c.storeResponseLatency.With(prometheus.Labels{"result": "success"}).Observe(time.Since(start).Seconds()) - return nil -} - -// GetResponse fetches a response for the given serial number. -// Returns error if the OCSP response fails to parse. -// Does not check the metadata field. -func (c *Client) GetResponse(ctx context.Context, serial string) ([]byte, error) { - start := c.clk.Now() - ctx, cancel := context.WithTimeout(ctx, c.timeout) - defer cancel() - - responseKey := MakeResponseKey(serial) - - resp, err := c.rdb.Get(ctx, responseKey).Result() - if err != nil { - // go-redis `Get` returns redis.Nil error when key does not exist. In - // that case return a `ErrRedisNotFound` error. - if errors.Is(err, redis.Nil) { - c.getLatency.With(prometheus.Labels{"result": "notFound", "method": "GetResponse"}).Observe(time.Since(start).Seconds()) - return nil, ErrRedisNotFound - } - - state := "failed" - if errors.Is(err, context.DeadlineExceeded) { - state = "deadlineExceeded" - } else if errors.Is(err, context.Canceled) { - state = "canceled" - } - c.getLatency.With(prometheus.Labels{"result": state, "method": "GetResponse"}).Observe(time.Since(start).Seconds()) - return nil, fmt.Errorf("getting response: %w", err) - } - - c.getLatency.With(prometheus.Labels{"result": "success", "method": "GetResponse"}).Observe(time.Since(start).Seconds()) - return []byte(resp), nil -} - -// GetMetadata fetches the metadata for the given serial number. -func (c *Client) GetMetadata(ctx context.Context, serial string) (*Metadata, error) { - start := c.clk.Now() - ctx, cancel := context.WithTimeout(ctx, c.timeout) - defer cancel() - - metadataKey := MakeMetadataKey(serial) - - resp, err := c.rdb.Get(ctx, metadataKey).Result() - if err != nil { - // go-redis `Get` returns redis.Nil error when key does not exist. In - // that case return a `ErrRedisNotFound` error. - if errors.Is(err, redis.Nil) { - c.getLatency.With(prometheus.Labels{"result": "notFound", "method": "GetMetadata"}).Observe(time.Since(start).Seconds()) - return nil, ErrRedisNotFound - } - - state := "failed" - if errors.Is(err, context.DeadlineExceeded) { - state = "deadlineExceeded" - } else if errors.Is(err, context.Canceled) { - state = "canceled" - } - c.getLatency.With(prometheus.Labels{"result": state, "method": "GetMetadata"}).Observe(time.Since(start).Seconds()) - return nil, fmt.Errorf("getting metadata: %w", err) - } - metadata, err := UnmarshalMetadata([]byte(resp)) - if err != nil { - c.getLatency.With(prometheus.Labels{"result": "failed", "method": "GetMetadata"}).Observe(time.Since(start).Seconds()) - return nil, fmt.Errorf("unmarshaling metadata: %w", err) - } - - c.getLatency.With(prometheus.Labels{"result": "success", "method": "GetMetadata"}).Observe(time.Since(start).Seconds()) - return &metadata, nil -} - -// ScanResponsesResult represents a single OCSP response entry in redis. -// `Serial` is the stringified serial number of the response. `Body` is the -// DER bytes of the response. If this object represents an error, `Err` will -// be non-nil and the other entries will have their zero values. -type ScanResponsesResult struct { - Serial string - Body []byte - Err error -} - -// ScanResponses scans Redis for all OCSP responses where the serial number matches the provided pattern. -// It returns immediately and emits results and errors on `<-chan ScanResponsesResult`. It closes the -// channel when it is done or hits an error. -func (c *Client) ScanResponses(ctx context.Context, serialPattern string) <-chan ScanResponsesResult { - pattern := fmt.Sprintf("r{%s}", serialPattern) - results := make(chan ScanResponsesResult) - go func() { - defer close(results) - err := c.rdb.ForEachMaster(ctx, func(ctx context.Context, rdb *redis.Client) error { - iter := rdb.Scan(ctx, 0, pattern, 0).Iterator() - for iter.Next(ctx) { - key := iter.Val() - serial, err := SerialFromResponseKey(key) - if err != nil { - results <- ScanResponsesResult{Err: err} - continue - } - val, err := c.rdb.Get(ctx, key).Result() - if err != nil { - results <- ScanResponsesResult{Err: fmt.Errorf("getting metadata: %w", err)} - continue - } - results <- ScanResponsesResult{Serial: serial, Body: []byte(val)} - } - return iter.Err() - }) - if err != nil { - results <- ScanResponsesResult{Err: err} - return - } - }() - return results -} - -// ScanMetadataResult represents a single OCSP response entry in redis. -// `Serial` is the stringified serial number of the response. `Metadata` is the -// parsed metadata. If this object represents an error, `Err` will -// be non-nil and the other entries will have their zero values. -type ScanMetadataResult struct { - Serial string - Metadata *Metadata - Err error -} - -// ScanMetadata scans Redis for the metadata of all OCSP responses where the serial number matches -// the provided pattern. It returns immediately and emits results and errors on -// `<-chan ScanResponsesResult`. It closes the channel when it is done or hits an error. -func (c *Client) ScanMetadata(ctx context.Context, serialPattern string) <-chan ScanMetadataResult { - pattern := fmt.Sprintf("m{%s}", serialPattern) - results := make(chan ScanMetadataResult) - go func() { - defer close(results) - var cursor uint64 - for { - var keys []string - var err error - keys, cursor, err = c.rdb.Scan(ctx, cursor, pattern, 10).Result() - if err != nil { - results <- ScanMetadataResult{Err: err} - return - } - if cursor == 0 { - return - } - for _, key := range keys { - serial, err := SerialFromMetadataKey(key) - if err != nil { - results <- ScanMetadataResult{Err: err} - return - } - m, err := c.GetMetadata(ctx, serial) - if err != nil { - results <- ScanMetadataResult{Err: err} - return - } - results <- ScanMetadataResult{Serial: serial, Metadata: m} - } - } - }() - return results -} diff --git a/rocsp/rocsp_test.go b/rocsp/rocsp_test.go deleted file mode 100644 index 76ef03276e9..00000000000 --- a/rocsp/rocsp_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package rocsp - -import ( - "bytes" - "context" - "io/ioutil" - "testing" - "time" - - "github.com/go-redis/redis/v8" - "github.com/jmhodges/clock" - "github.com/letsencrypt/boulder/cmd" - "github.com/letsencrypt/boulder/metrics" -) - -func makeClient() (*WritingClient, clock.Clock) { - CACertFile := "../test/redis-tls/minica.pem" - CertFile := "../test/redis-tls/boulder/cert.pem" - KeyFile := "../test/redis-tls/boulder/key.pem" - tlsConfig := cmd.TLSConfig{ - CACertFile: &CACertFile, - CertFile: &CertFile, - KeyFile: &KeyFile, - } - tlsConfig2, err := tlsConfig.Load() - if err != nil { - panic(err) - } - - rdb := redis.NewClusterClient(&redis.ClusterOptions{ - Addrs: []string{"10.33.33.2:4218"}, - Username: "unittest-rw", - Password: "824968fa490f4ecec1e52d5e34916bdb60d45f8d", - TLSConfig: tlsConfig2, - }) - clk := clock.NewFake() - return NewWritingClient(rdb, 5*time.Second, clk, metrics.NoopRegisterer), clk -} - -func TestSetAndGet(t *testing.T) { - client, _ := makeClient() - - response, err := ioutil.ReadFile("testdata/ocsp.response") - if err != nil { - t.Fatal(err) - } - var shortIssuerID byte = 99 - err = client.StoreResponse(context.Background(), response, byte(shortIssuerID), time.Hour) - if err != nil { - t.Fatalf("storing response: %s", err) - } - - serial := "ffaa13f9c34be80b8e2532b83afe063b59a6" - resp2, err := client.GetResponse(context.Background(), serial) - if err != nil { - t.Fatalf("getting response: %s", err) - } - if !bytes.Equal(resp2, response) { - t.Errorf("response written and response retrieved were not equal") - } - - metadata, err := client.GetMetadata(context.Background(), serial) - if err != nil { - t.Fatalf("getting metadata: %s", err) - } - if metadata.ShortIssuerID != shortIssuerID { - t.Errorf("expected shortIssuerID %d, got %d", shortIssuerID, metadata.ShortIssuerID) - } - expectedTime, err := time.Parse(time.RFC3339, "2021-10-25T20:00:00Z") - if err != nil { - t.Fatalf("failed to parse time: %s", err) - } - if metadata.ThisUpdate != expectedTime { - t.Errorf("expected ThisUpdate %q, got %q", expectedTime, metadata.ThisUpdate) - } -} diff --git a/rocsp/testdata/ocsp.response b/rocsp/testdata/ocsp.response deleted file mode 100644 index c52cbbc1eb4..00000000000 Binary files a/rocsp/testdata/ocsp.response and /dev/null differ diff --git a/sa/_db-next/dbconf.yml b/sa/_db-next/dbconf.yml deleted file mode 120000 index 557b467ad53..00000000000 --- a/sa/_db-next/dbconf.yml +++ /dev/null @@ -1 +0,0 @@ -../_db/dbconf.yml \ No newline at end of file diff --git a/sa/_db-next/migrations/20210223140000_CombinedSchema.sql b/sa/_db-next/migrations/20210223140000_CombinedSchema.sql deleted file mode 120000 index 918ac6eff69..00000000000 --- a/sa/_db-next/migrations/20210223140000_CombinedSchema.sql +++ /dev/null @@ -1 +0,0 @@ -../../_db/migrations/20210223140000_CombinedSchema.sql \ No newline at end of file diff --git a/sa/_db-next/migrations/20210223140001_DropCertStatusSubscriberApproved.sql b/sa/_db-next/migrations/20210223140001_DropCertStatusSubscriberApproved.sql deleted file mode 100644 index b7a41a9745f..00000000000 --- a/sa/_db-next/migrations/20210223140001_DropCertStatusSubscriberApproved.sql +++ /dev/null @@ -1,10 +0,0 @@ - --- +goose Up --- SQL in section 'Up' is executed when this migration is applied - -ALTER TABLE `certificateStatus` DROP COLUMN `subscriberApproved`; - --- +goose Down --- SQL section 'Down' is executed when this migration is rolled back - -ALTER TABLE `certificateStatus` ADD COLUMN `subscriberApproved` TINYINT(1) DEFAULT 0; diff --git a/sa/_db-next/migrations/20210223140002_DropCertStatusLockCol.sql b/sa/_db-next/migrations/20210223140002_DropCertStatusLockCol.sql deleted file mode 100644 index 3e6459e9607..00000000000 --- a/sa/_db-next/migrations/20210223140002_DropCertStatusLockCol.sql +++ /dev/null @@ -1,10 +0,0 @@ - --- +goose Up --- SQL in section 'Up' is executed when this migration is applied - -ALTER TABLE `certificateStatus` DROP COLUMN `LockCol`; - --- +goose Down --- SQL section 'Down' is executed when this migration is rolled back - -ALTER TABLE `certificateStatus` ADD COLUMN `LockCol` BIGINT(20) DEFAULT 0; diff --git a/sa/_db-next/migrations/20210223140003_IssuedNamesDropIndex.sql b/sa/_db-next/migrations/20210223140003_IssuedNamesDropIndex.sql deleted file mode 100644 index f52d70d8bb2..00000000000 --- a/sa/_db-next/migrations/20210223140003_IssuedNamesDropIndex.sql +++ /dev/null @@ -1,10 +0,0 @@ - --- +goose Up --- SQL in section 'Up' is executed when this migration is applied - -ALTER TABLE issuedNames DROP INDEX `reversedName_renewal_notBefore_Idx`; - --- +goose Down --- SQL section 'Down' is executed when this migration is rolled back - -ALTER TABLE issuedNames ADD INDEX `reversedName_renewal_notBefore_Idx` (`reversedName`,`renewal`,`notBefore`); diff --git a/sa/_db-next/migrations/20210308140000_SimplePartitioning.sql b/sa/_db-next/migrations/20210308140000_SimplePartitioning.sql deleted file mode 120000 index 222cdb1594a..00000000000 --- a/sa/_db-next/migrations/20210308140000_SimplePartitioning.sql +++ /dev/null @@ -1 +0,0 @@ -../../_db/migrations/20210308140000_SimplePartitioning.sql \ No newline at end of file diff --git a/sa/_db-next/migrations/20210924100000_OldFQDNSets.sql b/sa/_db-next/migrations/20210924100000_OldFQDNSets.sql deleted file mode 120000 index 0ead4e277ea..00000000000 --- a/sa/_db-next/migrations/20210924100000_OldFQDNSets.sql +++ /dev/null @@ -1 +0,0 @@ -../../_db/migrations/20210924100000_OldFQDNSets.sql \ No newline at end of file diff --git a/sa/_db/dbconf.yml b/sa/_db/dbconf.yml deleted file mode 100644 index 46a87734a1f..00000000000 --- a/sa/_db/dbconf.yml +++ /dev/null @@ -1,10 +0,0 @@ -test: - driver: mysql - open: root@tcp(boulder-mysql:3306)/boulder_sa_test -integration: - driver: mysql - open: root@tcp(boulder-mysql:3306)/boulder_sa_integration -# what goose uses by default, even during migration creation -development: - driver: mysql - open: root@tcp(boulder-mysql:3306)/boulder_sa_integration diff --git a/sa/_db/migrations/20210223140000_CombinedSchema.sql b/sa/_db/migrations/20210223140000_CombinedSchema.sql deleted file mode 100644 index f9705d018f1..00000000000 --- a/sa/_db/migrations/20210223140000_CombinedSchema.sql +++ /dev/null @@ -1,237 +0,0 @@ --- +goose Up --- SQL in section 'Up' is executed when this migration is applied - -CREATE TABLE `authz2` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `identifierType` tinyint(4) NOT NULL, - `identifierValue` varchar(255) NOT NULL, - `registrationID` bigint(20) NOT NULL, - `status` tinyint(4) NOT NULL, - `expires` datetime NOT NULL, - `challenges` tinyint(4) NOT NULL, - `attempted` tinyint(4) DEFAULT NULL, - `attemptedAt` datetime DEFAULT NULL, - `token` binary(32) NOT NULL, - `validationError` mediumblob DEFAULT NULL, - `validationRecord` mediumblob DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `token` (`token`), - KEY `regID_expires_idx` (`registrationID`,`status`,`expires`), - KEY `regID_identifier_status_expires_idx` (`registrationID`,`identifierType`,`identifierValue`,`status`,`expires`), - KEY `expires_idx` (`expires`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `blockedKeys` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `keyHash` binary(32) NOT NULL, - `added` datetime NOT NULL, - `source` tinyint(4) NOT NULL, - `comment` varchar(255) DEFAULT NULL, - `revokedBy` bigint(20) DEFAULT 0, - `extantCertificatesChecked` tinyint(1) DEFAULT 0, - PRIMARY KEY (`id`), - UNIQUE KEY `keyHash` (`keyHash`), - KEY `extantCertificatesChecked_idx` (`extantCertificatesChecked`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `certificateStatus` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `serial` varchar(255) NOT NULL, - `subscriberApproved` tinyint(1) DEFAULT 0, - `status` varchar(255) NOT NULL, - `ocspLastUpdated` datetime NOT NULL, - `revokedDate` datetime NOT NULL, - `revokedReason` int(11) NOT NULL, - `lastExpirationNagSent` datetime NOT NULL, - `LockCol` bigint(20) DEFAULT 0, - `ocspResponse` blob DEFAULT NULL, - `notAfter` datetime DEFAULT NULL, - `isExpired` tinyint(1) DEFAULT 0, - `issuerID` bigint(20) DEFAULT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `serial` (`serial`), - KEY `isExpired_ocspLastUpdated_idx` (`isExpired`,`ocspLastUpdated`), - KEY `notAfter_idx` (`notAfter`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `certificatesPerName` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `eTLDPlusOne` varchar(255) NOT NULL, - `time` datetime NOT NULL, - `count` int(11) NOT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `eTLDPlusOne_time_idx` (`eTLDPlusOne`,`time`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `crls` ( - `serial` varchar(255) NOT NULL, - `createdAt` datetime NOT NULL, - `crl` varchar(255) NOT NULL, - PRIMARY KEY (`serial`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `fqdnSets` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `setHash` binary(32) NOT NULL, - `serial` varchar(255) NOT NULL, - `issued` datetime NOT NULL, - `expires` datetime NOT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `serial` (`serial`), - KEY `setHash_issued_idx` (`setHash`,`issued`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `issuedNames` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `reversedName` varchar(640) CHARACTER SET ascii NOT NULL, - `notBefore` datetime NOT NULL, - `serial` varchar(255) NOT NULL, - `renewal` tinyint(1) NOT NULL DEFAULT 0, - PRIMARY KEY (`id`), - KEY `reversedName_notBefore_Idx` (`reversedName`,`notBefore`), - KEY `reversedName_renewal_notBefore_Idx` (`reversedName`,`renewal`,`notBefore`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `keyHashToSerial` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `keyHash` binary(32) NOT NULL, - `certNotAfter` datetime NOT NULL, - `certSerial` varchar(255) NOT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `unique_keyHash_certserial` (`keyHash`,`certSerial`), - KEY `keyHash_certNotAfter` (`keyHash`,`certNotAfter`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `newOrdersRL` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `regID` bigint(20) NOT NULL, - `time` datetime NOT NULL, - `count` int(11) NOT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `regID_time_idx` (`regID`,`time`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `orderToAuthz2` ( - `orderID` bigint(20) NOT NULL, - `authzID` bigint(20) NOT NULL, - PRIMARY KEY (`orderID`,`authzID`), - KEY `authzID` (`authzID`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `orders` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `registrationID` bigint(20) NOT NULL, - `expires` datetime NOT NULL, - `error` mediumblob DEFAULT NULL, - `certificateSerial` varchar(255) DEFAULT NULL, - `beganProcessing` tinyint(1) NOT NULL DEFAULT 0, - `created` datetime NOT NULL, - PRIMARY KEY (`id`), - KEY `reg_status_expires` (`registrationID`,`expires`), - KEY `regID_created_idx` (`registrationID`,`created`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `registrations` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `jwk` mediumblob NOT NULL, - `jwk_sha256` varchar(255) NOT NULL, - `contact` varchar(191) CHARACTER SET utf8mb4 NOT NULL, - `agreement` varchar(255) NOT NULL, - `LockCol` bigint(20) NOT NULL, - `initialIP` binary(16) NOT NULL DEFAULT '\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', - `createdAt` datetime NOT NULL, - `status` varchar(255) NOT NULL DEFAULT 'valid', - PRIMARY KEY (`id`), - UNIQUE KEY `jwk_sha256` (`jwk_sha256`), - KEY `initialIP_createdAt` (`initialIP`,`createdAt`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - --- Tables below have foreign key constraints, so are created after all other tables. - -CREATE TABLE `certificates` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `registrationID` bigint(20) NOT NULL, - `serial` varchar(255) NOT NULL, - `digest` varchar(255) NOT NULL, - `der` mediumblob NOT NULL, - `issued` datetime NOT NULL, - `expires` datetime NOT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `serial` (`serial`), - KEY `regId_certificates_idx` (`registrationID`) COMMENT 'Common lookup', - KEY `issued_idx` (`issued`), - CONSTRAINT `regId_certificates` FOREIGN KEY (`registrationID`) REFERENCES `registrations` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `orderFqdnSets` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `setHash` binary(32) NOT NULL, - `orderID` bigint(20) NOT NULL, - `registrationID` bigint(20) NOT NULL, - `expires` datetime NOT NULL, - PRIMARY KEY (`id`), - KEY `setHash_expires_idx` (`setHash`,`expires`), - KEY `orderID_idx` (`orderID`), - KEY `orderFqdnSets_registrationID_registrations` (`registrationID`), - CONSTRAINT `orderFqdnSets_orderID_orders` FOREIGN KEY (`orderID`) REFERENCES `orders` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION, - CONSTRAINT `orderFqdnSets_registrationID_registrations` FOREIGN KEY (`registrationID`) REFERENCES `registrations` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `precertificates` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `registrationID` bigint(20) NOT NULL, - `serial` varchar(255) NOT NULL, - `der` mediumblob NOT NULL, - `issued` datetime NOT NULL, - `expires` datetime NOT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `serial` (`serial`), - KEY `regId_precertificates_idx` (`registrationID`), - KEY `issued_precertificates_idx` (`issued`), - CONSTRAINT `regId_precertificates` FOREIGN KEY (`registrationID`) REFERENCES `registrations` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `requestedNames` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `orderID` bigint(20) NOT NULL, - `reversedName` varchar(253) CHARACTER SET ascii NOT NULL, - PRIMARY KEY (`id`), - KEY `orderID_idx` (`orderID`), - KEY `reversedName_idx` (`reversedName`), - CONSTRAINT `orderID_orders` FOREIGN KEY (`orderID`) REFERENCES `orders` (`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -CREATE TABLE `serials` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `registrationID` bigint(20) NOT NULL, - `serial` varchar(255) NOT NULL, - `created` datetime NOT NULL, - `expires` datetime NOT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `serial` (`serial`), - KEY `regId_serials_idx` (`registrationID`), - CONSTRAINT `regId_serials` FOREIGN KEY (`registrationID`) REFERENCES `registrations` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - --- +goose Down --- SQL section 'Down' is executed when this migration is rolled back - --- First set of tables have foreign key constraints, so are dropped first. -DROP TABLE `certificates` -DROP TABLE `orderFqdnSets` -DROP TABLE `precertificates` -DROP TABLE `requestedNames` -DROP TABLE `serials` - -DROP TABLE `authz2` -DROP TABLE `blockedKeys` -DROP TABLE `certificateStatus` -DROP TABLE `certificatesPerName` -DROP TABLE `crls` -DROP TABLE `fqdnSets` -DROP TABLE `issuedNames` -DROP TABLE `keyHashToSerial` -DROP TABLE `newOrdersRL` -DROP TABLE `orderToAuthz2` -DROP TABLE `orders` -DROP TABLE `registrations` diff --git a/sa/_db/migrations/20210308140000_SimplePartitioning.sql b/sa/_db/migrations/20210308140000_SimplePartitioning.sql deleted file mode 100644 index 3b1d16aac80..00000000000 --- a/sa/_db/migrations/20210308140000_SimplePartitioning.sql +++ /dev/null @@ -1,53 +0,0 @@ - --- +goose Up --- SQL in section 'Up' is executed when this migration is applied - -ALTER TABLE authz2 DROP INDEX IF EXISTS token; -ALTER TABLE authz2 PARTITION BY RANGE(id) ( - PARTITION p_start VALUES LESS THAN MAXVALUE); - -ALTER TABLE certificates DROP FOREIGN KEY IF EXISTS regId_certificates; -ALTER TABLE certificates DROP INDEX IF EXISTS serial, ADD INDEX serial (serial); -ALTER TABLE certificates PARTITION BY RANGE(id) ( - PARTITION p_start VALUES LESS THAN MAXVALUE); - -ALTER TABLE fqdnSets DROP INDEX IF EXISTS serial, ADD INDEX serial (serial); -ALTER TABLE fqdnSets PARTITION BY RANGE(id) ( - PARTITION p_start VALUES LESS THAN MAXVALUE); - -ALTER TABLE issuedNames PARTITION BY RANGE(id) ( - PARTITION p_start VALUES LESS THAN MAXVALUE); - -ALTER TABLE orderFqdnSets DROP FOREIGN KEY IF EXISTS orderFqdnSets_orderID_orders; -ALTER TABLE orderFqdnSets DROP FOREIGN KEY IF EXISTS orderFqdnSets_registrationID_registrations; -ALTER TABLE orderFqdnSets PARTITION BY RANGE (id) ( - PARTITION p_start VALUES LESS THAN MAXVALUE); - -ALTER TABLE orderToAuthz2 PARTITION BY RANGE COLUMNS(orderID, authzID) ( - PARTITION p_start VALUES LESS THAN (MAXVALUE, MAXVALUE)); - --- Must be before orders, to remove the foreign key before partitioning orders. -ALTER TABLE requestedNames DROP FOREIGN KEY IF EXISTS orderID_orders; -ALTER TABLE requestedNames PARTITION BY RANGE (id) ( - PARTITION p_start VALUES LESS THAN MAXVALUE); - -ALTER TABLE orders PARTITION BY RANGE (id) ( - PARTITION p_start VALUES LESS THAN MAXVALUE); - -ALTER TABLE precertificates DROP FOREIGN KEY IF EXISTS regId_precertificates; -ALTER TABLE precertificates DROP INDEX IF EXISTS serial, ADD INDEX serial (serial); -ALTER TABLE precertificates PARTITION BY RANGE(id) ( - PARTITION p_start VALUES LESS THAN MAXVALUE); - --- +goose Down --- SQL section 'Down' is executed when this migration is rolled back - -ALTER TABLE authz2 REMOVE PARTITIONING; -ALTER TABLE certificates REMOVE PARTITIONING; -ALTER TABLE fqdnSets REMOVE PARTITIONING; -ALTER TABLE issuedNames REMOVE PARTITIONING; -ALTER TABLE orderFqdnSets REMOVE PARTITIONING; -ALTER TABLE orderToAuthz2 REMOVE PARTITIONING; -ALTER TABLE orders REMOVE PARTITIONING; -ALTER TABLE precertificates REMOVE PARTITIONING; -ALTER TABLE requestedNames REMOVE PARTITIONING; diff --git a/sa/_db/migrations/20210924100000_OldFQDNSets.sql b/sa/_db/migrations/20210924100000_OldFQDNSets.sql deleted file mode 100644 index 46180ecf868..00000000000 --- a/sa/_db/migrations/20210924100000_OldFQDNSets.sql +++ /dev/null @@ -1,24 +0,0 @@ --- TODO(#5670): Remove this file and the _db-next pointer to it. - --- +goose Up --- SQL in section 'Up' is executed when this migration is applied - -CREATE TABLE `fqdnSets_old` ( - `id` bigint(20) NOT NULL AUTO_INCREMENT, - `setHash` binary(32) NOT NULL, - `serial` varchar(255) NOT NULL, - `issued` datetime NOT NULL, - `expires` datetime NOT NULL, - PRIMARY KEY (`id`), - UNIQUE KEY `serial` (`serial`), - KEY `setHash_issued_idx` (`setHash`,`issued`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - -ALTER TABLE fqdnSets DROP INDEX IF EXISTS serial, ADD INDEX serial (serial); -ALTER TABLE fqdnSets PARTITION BY RANGE(id) ( - PARTITION p_start VALUES LESS THAN MAXVALUE); - --- +goose Down --- SQL section 'Down' is executed when this migration is rolled back - -DROP TABLE `fqdnSets_old` diff --git a/sa/database.go b/sa/database.go index ccf02a12278..c5b09532831 100644 --- a/sa/database.go +++ b/sa/database.go @@ -5,10 +5,11 @@ import ( "fmt" "time" - "github.com/go-gorp/gorp/v3" "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" + "github.com/letsencrypt/borp" + "github.com/letsencrypt/boulder/cmd" "github.com/letsencrypt/boulder/core" boulderDB "github.com/letsencrypt/boulder/db" @@ -45,9 +46,9 @@ type DbSettings struct { ConnMaxIdleTime time.Duration } -// InitWrappedDb constructs a wrapped gorp mapping object with the provided -// settings. If scope is non-Nil database metrics will be initialized. If logger -// is non-Nil (gorp) SQL debugging will be enabled. The only required parameter +// InitWrappedDb constructs a wrapped borp mapping object with the provided +// settings. If scope is non-nil, Prometheus metrics will be exported. If logger +// is non-nil, SQL debug-level logging will be enabled. The only required parameter // is config. func InitWrappedDb(config cmd.DBConfig, scope prometheus.Registerer, logger blog.Logger) (*boulderDB.WrappedMap, error) { url, err := config.URL() @@ -62,77 +63,30 @@ func InitWrappedDb(config cmd.DBConfig, scope prometheus.Registerer, logger blog ConnMaxIdleTime: config.ConnMaxIdleTime.Duration, } - dbMap, err := NewDbMap(url, settings) + mysqlConfig, err := mysql.ParseDSN(url) if err != nil { - return nil, fmt.Errorf("while initializing database connection: %s", err) - } - - if logger != nil { - SetSQLDebug(dbMap, logger) - } - - addr, user, err := config.DSNAddressAndUser() - cmd.FailOnError(err, "while parsing DSN") - - if scope != nil { - InitDBMetrics(dbMap.Db, scope, settings, addr, user) - } - return dbMap, nil -} - -// InitSqlDb constructs a *sql.DB object using the provided settings and enables -// 'interpolateParams' and 'parseTime'. If scope is non-Nil database metrics -// will also be initialized. The only required parameter is config. -func InitSqlDb(config cmd.DBConfig, scope prometheus.Registerer) (*sql.DB, error) { - url, err := config.URL() - if err != nil { - return nil, fmt.Errorf("failed to load DBConnect URL: %s", err) - } - - conf, err := mysql.ParseDSN(url) - if err != nil { - return nil, fmt.Errorf("while parsing DSN from 'DBConnectFile': %s", err) - } - - if len(conf.Params) == 0 { - conf.Params = map[string]string{ - "interpolateParams": "true", - "parseTime": "true", - } - } else { - conf.Params["interpolateParams"] = "true" - conf.Params["parseTime"] = "true" + return nil, err } - db, err := sql.Open("mysql", conf.FormatDSN()) + dbMap, err := newDbMapFromMySQLConfig(mysqlConfig, settings, scope, logger) if err != nil { - return nil, fmt.Errorf("couldn't setup database client: %s", err) + return nil, err } - db.SetMaxOpenConns(config.MaxOpenConns) - db.SetMaxIdleConns(config.MaxIdleConns) - db.SetConnMaxLifetime(config.ConnMaxLifetime.Duration) - db.SetConnMaxIdleTime(config.ConnMaxIdleTime.Duration) - - addr, user, err := config.DSNAddressAndUser() - cmd.FailOnError(err, "while parsing DSN") - if scope != nil { - settings := DbSettings{ - MaxOpenConns: config.MaxOpenConns, - MaxIdleConns: config.MaxIdleConns, - ConnMaxLifetime: config.ConnMaxLifetime.Duration, - ConnMaxIdleTime: config.ConnMaxIdleTime.Duration, - } - InitDBMetrics(db, scope, settings, addr, user) - } - return db, nil + return dbMap, nil } -// NewDbMap creates a wrapped root gorp mapping object. Create one of these for +// DBMapForTest creates a wrapped root borp mapping object. Create one of these for // each database schema you wish to map. Each DbMap contains a list of mapped // tables. It automatically maps the tables for the primary parts of Boulder // around the Storage Authority. -func NewDbMap(dbConnect string, settings DbSettings) (*boulderDB.WrappedMap, error) { +func DBMapForTest(dbConnect string) (*boulderDB.WrappedMap, error) { + return DBMapForTestWithLog(dbConnect, nil) +} + +// DBMapForTestWithLog does the same as DBMapForTest but also routes the debug logs +// from the database driver to the given log (usually a `blog.NewMock`). +func DBMapForTestWithLog(dbConnect string, log blog.Logger) (*boulderDB.WrappedMap, error) { var err error var config *mysql.Config @@ -141,7 +95,7 @@ func NewDbMap(dbConnect string, settings DbSettings) (*boulderDB.WrappedMap, err return nil, err } - return NewDbMapFromConfig(config, settings) + return newDbMapFromMySQLConfig(config, DbSettings{}, nil, log) } // sqlOpen is used in the tests to check that the arguments are properly @@ -178,10 +132,22 @@ var setConnMaxIdleTime = func(db *sql.DB, connMaxIdleTime time.Duration) { } } -// NewDbMapFromConfig functions similarly to NewDbMap, but it takes the -// decomposed form of the connection string, a *mysql.Config. -func NewDbMapFromConfig(config *mysql.Config, settings DbSettings) (*boulderDB.WrappedMap, error) { - adjustMySQLConfig(config) +// newDbMapFromMySQLConfig opens a database connection given the provided *mysql.Config, plus some Boulder-specific +// required and default settings, plus some additional config in the sa.DbSettings object. The sa.DbSettings object +// is usually provided from JSON config. +// +// This function also: +// - pings the database (and errors if it's unreachable) +// - wraps the connection in a borp.DbMap so we can use the handy Get/Insert methods borp provides +// - wraps that in a db.WrappedMap to get more useful error messages +// +// If logger is non-nil, it will receive debug log messages from borp. +// If scope is non-nil, it will be used to register Prometheus metrics. +func newDbMapFromMySQLConfig(config *mysql.Config, settings DbSettings, scope prometheus.Registerer, logger blog.Logger) (*boulderDB.WrappedMap, error) { + err := adjustMySQLConfig(config) + if err != nil { + return nil, err + } db, err := sqlOpen("mysql", config.FormatDSN()) if err != nil { @@ -195,16 +161,26 @@ func NewDbMapFromConfig(config *mysql.Config, settings DbSettings) (*boulderDB.W setConnMaxLifetime(db, settings.ConnMaxLifetime) setConnMaxIdleTime(db, settings.ConnMaxIdleTime) - dialect := gorp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8"} - dbmap := &gorp.DbMap{Db: db, Dialect: dialect, TypeConverter: BoulderTypeConverter{}} + if scope != nil { + err = initDBMetrics(db, scope, settings, config.Addr, config.User) + if err != nil { + return nil, fmt.Errorf("while initializing metrics: %w", err) + } + } - initTables(dbmap) + dialect := borp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8"} + dbmap := &borp.DbMap{Db: db, Dialect: dialect, TypeConverter: BoulderTypeConverter{}} - return &boulderDB.WrappedMap{DbMap: dbmap}, nil + if logger != nil { + dbmap.TraceOn("SQL: ", &SQLLogger{logger}) + } + + initTables(dbmap) + return boulderDB.NewWrappedMap(dbmap), nil } // adjustMySQLConfig sets certain flags that we want on every connection. -func adjustMySQLConfig(conf *mysql.Config) *mysql.Config { +func adjustMySQLConfig(conf *mysql.Config) error { // Required to turn DATETIME fields into time.Time conf.ParseTime = true @@ -212,43 +188,49 @@ func adjustMySQLConfig(conf *mysql.Config) *mysql.Config { // instead of the number of rows changed by the UPDATE. conf.ClientFoundRows = true + if conf.Params == nil { + conf.Params = make(map[string]string) + } + + // If a given parameter is not already set in conf.Params from the DSN, set it. + setDefault := func(name, value string) { + _, ok := conf.Params[name] + if !ok { + conf.Params[name] = value + } + } + // Ensures that MySQL/MariaDB warnings are treated as errors. This // avoids a number of nasty edge conditions we could wander into. // Common things this discovers includes places where data being sent // had a different type than what is in the schema, strings being // truncated, writing null to a NOT NULL column, and so on. See // . - conf.Params = make(map[string]string) - conf.Params["sql_mode"] = "STRICT_ALL_TABLES" - - // If a read timeout is set, we set max_statement_time to 95% of that, and - // long_query_time to 80% of that. That way we get logs of queries that are - // close to timing out but not yet doing so, and our queries get stopped by - // max_statement_time before timing out the read. This generates clearer - // errors, and avoids unnecessary reconnects. - if conf.ReadTimeout != 0 { - // In MariaDB, max_statement_time and long_query_time are both seconds. - // Note: in MySQL (which we don't use), max_statement_time is millis. - readTimeout := conf.ReadTimeout.Seconds() - conf.Params["max_statement_time"] = fmt.Sprintf("%g", readTimeout*0.95) - conf.Params["long_query_time"] = fmt.Sprintf("%g", readTimeout*0.80) + setDefault("sql_mode", "'STRICT_ALL_TABLES'") + + // Omit max_statement_time and max_execution_time from the DSN. Query + // timeouts are managed exclusively by ProxySQL and/or Vitess. + delete(conf.Params, "max_statement_time") + delete(conf.Params, "max_execution_time") + + // Finally, perform validation over all variables set by the DSN and via Boulder. + for k, v := range conf.Params { + err := checkMariaDBSystemVariables(k, v) + if err != nil { + return err + } } - return conf -} - -// SetSQLDebug enables GORP SQL-level Debugging -func SetSQLDebug(dbMap *boulderDB.WrappedMap, log blog.Logger) { - dbMap.TraceOn("SQL: ", &SQLLogger{log}) + return nil } -// SQLLogger adapts the Boulder Logger to a format GORP can use. +// SQLLogger adapts the Boulder Logger to a format borp can use. type SQLLogger struct { blog.Logger } -// Printf adapts the AuditLogger to GORP's interface -func (log *SQLLogger) Printf(format string, v ...interface{}) { +// Printf adapts the Logger to borp's interface +func (log *SQLLogger) Printf(format string, v ...any) { log.Debugf(format, v...) } @@ -257,26 +239,31 @@ func (log *SQLLogger) Printf(format string, v ...interface{}) { // it is very important to declare them as a such here. It produces a side // effect in Insert() where the inserted object has its id field set to the // autoincremented value that resulted from the insert. See -// https://godoc.org/github.com/coopernurse/gorp#DbMap.Insert -func initTables(dbMap *gorp.DbMap) { +// https://godoc.org/github.com/coopernurse/borp#DbMap.Insert +func initTables(dbMap *borp.DbMap) { regTable := dbMap.AddTableWithName(regModel{}, "registrations").SetKeys(true, "ID") - regTable.SetVersionCol("LockCol") regTable.ColMap("Key").SetNotNull(true) regTable.ColMap("KeySHA256").SetNotNull(true).SetUnique(true) - dbMap.AddTableWithName(authzModel{}, "authz").SetKeys(false, "ID") - dbMap.AddTableWithName(challModel{}, "challenges").SetKeys(true, "ID") dbMap.AddTableWithName(issuedNameModel{}, "issuedNames").SetKeys(true, "ID") dbMap.AddTableWithName(core.Certificate{}, "certificates").SetKeys(true, "ID") - dbMap.AddTableWithName(core.CertificateStatus{}, "certificateStatus").SetKeys(true, "ID") - dbMap.AddTableWithName(core.FQDNSet{}, "fqdnSets").SetKeys(true, "ID") + dbMap.AddTableWithName(certificateStatusModel{}, "certificateStatus").SetKeys(true, "ID") + dbMap.AddTableWithName(fqdnSet{}, "fqdnSets").SetKeys(true, "ID") dbMap.AddTableWithName(orderModel{}, "orders").SetKeys(true, "ID") - dbMap.AddTableWithName(orderToAuthzModel{}, "orderToAuthz").SetKeys(false, "OrderID", "AuthzID") - dbMap.AddTableWithName(requestedNameModel{}, "requestedNames").SetKeys(false, "OrderID") dbMap.AddTableWithName(orderFQDNSet{}, "orderFqdnSets").SetKeys(true, "ID") dbMap.AddTableWithName(authzModel{}, "authz2").SetKeys(true, "ID") - dbMap.AddTableWithName(orderToAuthzModel{}, "orderToAuthz2").SetKeys(false, "OrderID", "AuthzID") dbMap.AddTableWithName(recordedSerialModel{}, "serials").SetKeys(true, "ID") - dbMap.AddTableWithName(precertificateModel{}, "precertificates").SetKeys(true, "ID") + dbMap.AddTableWithName(lintingCertModel{}, "precertificates").SetKeys(true, "ID") dbMap.AddTableWithName(keyHashModel{}, "keyHashToSerial").SetKeys(true, "ID") + dbMap.AddTableWithName(incidentModel{}, "incidents").SetKeys(true, "ID") + dbMap.AddTable(incidentSerialModel{}) + dbMap.AddTableWithName(crlShardModel{}, "crlShards").SetKeys(true, "ID") + dbMap.AddTableWithName(revokedCertModel{}, "revokedCertificates").SetKeys(true, "ID") + dbMap.AddTableWithName(replacementOrderModel{}, "replacementOrders").SetKeys(true, "ID") + dbMap.AddTableWithName(pausedModel{}, "paused") + dbMap.AddTableWithName(overrideModel{}, "overrides").SetKeys(false, "limitEnum", "bucketKey") + + // Read-only maps used for selecting subsets of columns. + dbMap.AddTableWithName(CertStatusMetadata{}, "certificateStatus") + dbMap.AddTableWithName(crlEntryModel{}, "certificateStatus") } diff --git a/sa/database_test.go b/sa/database_test.go index 6a290eb669e..1dab7052566 100644 --- a/sa/database_test.go +++ b/sa/database_test.go @@ -1,19 +1,41 @@ package sa import ( + "context" "database/sql" "errors" + "os" + "path" "strings" "testing" "time" + "github.com/go-sql-driver/mysql" + "github.com/letsencrypt/boulder/cmd" + "github.com/letsencrypt/boulder/config" "github.com/letsencrypt/boulder/test" "github.com/letsencrypt/boulder/test/vars" ) func TestInvalidDSN(t *testing.T) { - _, err := NewDbMap("invalid", DbSettings{}) + _, err := DBMapForTest("invalid") test.AssertError(t, err, "DB connect string missing the slash separating the database name") + + DSN := "policy:password@tcp(foo-database:1337)/boulder_policy_integration?readTimeout=800ms&writeTimeout=800ms&stringVarThatDoesntExist=%27whoopsidaisies" + _, err = DBMapForTest(DSN) + test.AssertError(t, err, "Variable does not exist in curated system var list, but didn't return an error and should have") + + DSN = "policy:password@tcp(foo-database:1337)/boulder_policy_integration?readTimeout=800ms&writeTimeout=800ms&concurrent_insert=2" + _, err = DBMapForTest(DSN) + test.AssertError(t, err, "Variable is unable to be set in the SESSION scope, but was declared") + + DSN = "policy:password@tcp(foo-database:1337)/boulder_policy_integration?readTimeout=800ms&writeTimeout=800ms&optimizer_switch=incorrect-quoted-string" + _, err = DBMapForTest(DSN) + test.AssertError(t, err, "Variable declared with incorrect quoting") + + DSN = "policy:password@tcp(foo-database:1337)/boulder_policy_integration?readTimeout=800ms&writeTimeout=800ms&concurrent_insert=%272%27" + _, err = DBMapForTest(DSN) + test.AssertError(t, err, "Integer enum declared, but should not have been quoted") } var errExpected = errors.New("expected") @@ -52,33 +74,40 @@ func TestDbSettings(t *testing.T) { connMaxIdleTime = c oldSetConnMaxIdleTime(db, connMaxIdleTime) } - dbSettings := DbSettings{ + dsnFile := path.Join(t.TempDir(), "dbconnect") + + err := os.WriteFile(dsnFile, []byte(vars.DBConnSA), os.ModeAppend) + test.AssertNotError(t, err, "writing dbconnect file") + + config := cmd.DBConfig{ + DBConnectFile: dsnFile, MaxOpenConns: 100, MaxIdleConns: 100, - ConnMaxLifetime: 100, - ConnMaxIdleTime: 100, + ConnMaxLifetime: config.Duration{Duration: 100 * time.Second}, + ConnMaxIdleTime: config.Duration{Duration: 100 * time.Second}, } - _, err := NewDbMap("sa@tcp(boulder-mysql:3306)/boulder_sa_integration", dbSettings) + _, err = InitWrappedDb(config, nil, nil) if err != nil { t.Errorf("connecting to DB: %s", err) } if maxOpenConns != 100 { - t.Errorf("maxOpenConns was not set: expected %d, got %d", 100, maxOpenConns) + t.Errorf("maxOpenConns was not set: expected 100, got %d", maxOpenConns) } if maxIdleConns != 100 { - t.Errorf("maxIdleConns was not set: expected %d, got %d", 100, maxIdleConns) + t.Errorf("maxIdleConns was not set: expected 100, got %d", maxIdleConns) } - if connMaxLifetime != 100 { - t.Errorf("connMaxLifetime was not set: expected %d, got %d", 100, connMaxLifetime) + if connMaxLifetime != 100*time.Second { + t.Errorf("connMaxLifetime was not set: expected 100s, got %s", connMaxLifetime) } - if connMaxIdleTime != 100 { - t.Errorf("connMaxIdleTime was not set: expected %d, got %d", 100, connMaxIdleTime) + if connMaxIdleTime != 100*time.Second { + t.Errorf("connMaxIdleTime was not set: expected 100s, got %s", connMaxIdleTime) } } +// TODO: Change this to test `newDbMapFromMySQLConfig` instead? func TestNewDbMap(t *testing.T) { - const mysqlConnectURL = "policy:password@tcp(boulder-mysql:3306)/boulder_policy_integration?readTimeout=800ms&writeTimeout=800ms" - const expected = "policy:password@tcp(boulder-mysql:3306)/boulder_policy_integration?clientFoundRows=true&parseTime=true&readTimeout=800ms&writeTimeout=800ms&long_query_time=0.6400000000000001&max_statement_time=0.76&sql_mode=STRICT_ALL_TABLES" + const mysqlConnectURL = "policy:password@tcp(boulder-proxysql:6033)/boulder_policy_integration?readTimeout=800ms&writeTimeout=800ms" + const expected = "policy:password@tcp(boulder-proxysql:6033)/boulder_policy_integration?clientFoundRows=true&parseTime=true&readTimeout=800ms&writeTimeout=800ms&sql_mode=%27STRICT_ALL_TABLES%27" oldSQLOpen := sqlOpen defer func() { sqlOpen = oldSQLOpen @@ -90,7 +119,7 @@ func TestNewDbMap(t *testing.T) { return nil, errExpected } - dbMap, err := NewDbMap(mysqlConnectURL, DbSettings{}) + dbMap, err := DBMapForTest(mysqlConnectURL) if err != errExpected { t.Errorf("got incorrect error. Got %v, expected %v", err, errExpected) } @@ -101,13 +130,16 @@ func TestNewDbMap(t *testing.T) { } func TestStrictness(t *testing.T) { - dbMap, err := NewDbMap(vars.DBConnSA, DbSettings{1, 0, 0, 0}) + dbMap, err := DBMapForTest(vars.DBConnSA) if err != nil { t.Fatal(err) } - _, err = dbMap.Exec(`insert into orderToAuthz2 set - orderID=999999999999999999999999999, - authzID=999999999999999999999999999;`) + _, err = dbMap.ExecContext(ctx, `insert into serials set + id=999999999999999999999999999, + serial="abcd", + registrationID=99, + created="2026-01-01", + expires="2026-02-01";`) if err == nil { t.Fatal("Expected error when providing out of range value, got none.") } @@ -116,41 +148,30 @@ func TestStrictness(t *testing.T) { } } -func TestTimeouts(t *testing.T) { - dbMap, err := NewDbMap(vars.DBConnSA+"?readTimeout=1s", DbSettings{1, 0, 0, 0}) - if err != nil { - t.Fatal("Error setting up DB:", err) - } - // SLEEP is defined to return 1 if it was interrupted, but we want to actually - // get an error to simulate what would happen with a slow query. So we wrap - // the SLEEP in a subselect. - _, err = dbMap.Exec(`SELECT 1 FROM (SELECT SLEEP(5)) as subselect;`) - if err == nil { - t.Fatal("Expected error when running slow query, got none.") - } - - // We expect to get: - // Error 1969: Query execution was interrupted (max_statement_time exceeded) - // https://mariadb.com/kb/en/mariadb/mariadb-error-codes/ - if !strings.Contains(err.Error(), "Error 1969") { - t.Fatalf("Got wrong type of error: %s", err) - } -} - // TestAutoIncrementSchema tests that all of the tables in the boulder_* // databases that have auto_increment columns use BIGINT for the data type. Our // data is too big for INT. func TestAutoIncrementSchema(t *testing.T) { - dbMap, err := NewDbMap(vars.DBInfoSchemaRoot, DbSettings{1, 0, 0, 0}) + dbMap, err := DBMapForTest(vars.DBInfoSchemaRoot) test.AssertNotError(t, err, "unexpected err making NewDbMap") var count int64 err = dbMap.SelectOne( + context.Background(), &count, - `SELECT COUNT(1) FROM columns WHERE + `SELECT COUNT(*) FROM columns WHERE table_schema LIKE 'boulder%' AND extra LIKE '%auto_increment%' AND data_type != "bigint"`) test.AssertNotError(t, err, "unexpected err querying columns") test.AssertEquals(t, count, int64(0)) } + +func TestAdjustMySQLConfig(t *testing.T) { + conf := &mysql.Config{} + err := adjustMySQLConfig(conf) + test.AssertNotError(t, err, "unexpected err setting server variables") + test.Assert(t, conf.ParseTime, "ParseTime should be enabled") + test.Assert(t, conf.ClientFoundRows, "ClientFoundRows should be enabled") + test.AssertDeepEquals(t, conf.Params, map[string]string{"sql_mode": "'STRICT_ALL_TABLES'"}) +} diff --git a/sa/db-next/boulder_sa/20230419000000_CombinedSchema.sql b/sa/db-next/boulder_sa/20230419000000_CombinedSchema.sql new file mode 120000 index 00000000000..d9ffbb391b6 --- /dev/null +++ b/sa/db-next/boulder_sa/20230419000000_CombinedSchema.sql @@ -0,0 +1 @@ +../../db/boulder_sa/20230419000000_CombinedSchema.sql \ No newline at end of file diff --git a/sa/db-next/boulder_sa/20230419000001_DropCertStatusSubscriberApproved.sql b/sa/db-next/boulder_sa/20230419000001_DropCertStatusSubscriberApproved.sql new file mode 100644 index 00000000000..f1dfadabb0a --- /dev/null +++ b/sa/db-next/boulder_sa/20230419000001_DropCertStatusSubscriberApproved.sql @@ -0,0 +1,10 @@ + +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied + +ALTER TABLE `certificateStatus` DROP COLUMN `subscriberApproved`; + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back + +ALTER TABLE `certificateStatus` ADD COLUMN `subscriberApproved` TINYINT(1) DEFAULT 0; diff --git a/sa/db-next/boulder_sa/20230419000002_DropCertStatusLockCol.sql b/sa/db-next/boulder_sa/20230419000002_DropCertStatusLockCol.sql new file mode 100644 index 00000000000..f634cac259f --- /dev/null +++ b/sa/db-next/boulder_sa/20230419000002_DropCertStatusLockCol.sql @@ -0,0 +1,10 @@ + +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied + +ALTER TABLE `certificateStatus` DROP COLUMN `LockCol`; + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back + +ALTER TABLE `certificateStatus` ADD COLUMN `LockCol` BIGINT(20) DEFAULT 0; diff --git a/sa/db-next/boulder_sa/20250708000000_DropRegistrationsLockCol.sql b/sa/db-next/boulder_sa/20250708000000_DropRegistrationsLockCol.sql new file mode 100644 index 00000000000..5675854556f --- /dev/null +++ b/sa/db-next/boulder_sa/20250708000000_DropRegistrationsLockCol.sql @@ -0,0 +1,9 @@ +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied + +ALTER TABLE `registrations` DROP COLUMN `LockCol`; + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back + +ALTER TABLE `registrations` ADD COLUMN `LockCol` BIGINT(20) NOT NULL DEFAULT 0; diff --git a/sa/db-next/boulder_sa/20251002000000_AddRevokedSerialsIndex.sql b/sa/db-next/boulder_sa/20251002000000_AddRevokedSerialsIndex.sql new file mode 100644 index 00000000000..44815cc4edf --- /dev/null +++ b/sa/db-next/boulder_sa/20251002000000_AddRevokedSerialsIndex.sql @@ -0,0 +1,9 @@ +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied + +ALTER TABLE `revokedCertificates` ADD KEY `serial` (`serial`); + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back + +ALTER TABLE `revokedCertificates` DROP KEY `serial`; diff --git a/sa/db-next/dbconfig.mysql8.yml b/sa/db-next/dbconfig.mysql8.yml new file mode 120000 index 00000000000..bc7d1365f7f --- /dev/null +++ b/sa/db-next/dbconfig.mysql8.yml @@ -0,0 +1 @@ +../db/dbconfig.mysql8.yml \ No newline at end of file diff --git a/sa/db-next/incidents_sa/20220328100000_Incidents.sql b/sa/db-next/incidents_sa/20220328100000_Incidents.sql new file mode 120000 index 00000000000..957d6471097 --- /dev/null +++ b/sa/db-next/incidents_sa/20220328100000_Incidents.sql @@ -0,0 +1 @@ +../../db/incidents_sa/20220328100000_Incidents.sql \ No newline at end of file diff --git a/sa/db-users/boulder_sa.sql b/sa/db-users/boulder_sa.sql new file mode 100644 index 00000000000..e4845b2c2ab --- /dev/null +++ b/sa/db-users/boulder_sa.sql @@ -0,0 +1,85 @@ +-- this file is run by test/create_db.sh to create users for each +-- component with the appropriate permissions. + +-- These lines require MariaDB 10.1+ +CREATE USER IF NOT EXISTS 'policy'@'localhost'; +CREATE USER IF NOT EXISTS 'sa'@'localhost'; +CREATE USER IF NOT EXISTS 'sa_ro'@'localhost'; +CREATE USER IF NOT EXISTS 'revoker'@'localhost'; +CREATE USER IF NOT EXISTS 'importer'@'localhost'; +CREATE USER IF NOT EXISTS 'mailer'@'localhost'; +CREATE USER IF NOT EXISTS 'cert_checker'@'localhost'; +CREATE USER IF NOT EXISTS 'test_setup'@'localhost'; +CREATE USER IF NOT EXISTS 'badkeyrevoker'@'localhost'; +CREATE USER IF NOT EXISTS 'proxysql'@'localhost'; + +-- Storage Authority +GRANT SELECT,INSERT ON certificates TO 'sa'@'localhost'; +GRANT SELECT,INSERT,UPDATE ON certificateStatus TO 'sa'@'localhost'; +GRANT SELECT,INSERT ON issuedNames TO 'sa'@'localhost'; +GRANT SELECT,INSERT,UPDATE ON registrations TO 'sa'@'localhost'; +GRANT SELECT,INSERT on fqdnSets TO 'sa'@'localhost'; +GRANT SELECT,INSERT,UPDATE ON orders TO 'sa'@'localhost'; +GRANT SELECT,INSERT,DELETE ON orderFqdnSets TO 'sa'@'localhost'; +GRANT SELECT,INSERT,UPDATE ON authz2 TO 'sa'@'localhost'; +GRANT INSERT,SELECT ON serials TO 'sa'@'localhost'; +GRANT SELECT,INSERT ON precertificates TO 'sa'@'localhost'; +GRANT SELECT,INSERT ON keyHashToSerial TO 'sa'@'localhost'; +GRANT SELECT,INSERT ON blockedKeys TO 'sa'@'localhost'; +GRANT SELECT ON incidents TO 'sa'@'localhost'; +GRANT SELECT,INSERT,UPDATE ON crlShards TO 'sa'@'localhost'; +GRANT SELECT,INSERT,UPDATE ON revokedCertificates TO 'sa'@'localhost'; +GRANT SELECT,INSERT,UPDATE ON replacementOrders TO 'sa'@'localhost'; +GRANT SELECT,INSERT,UPDATE ON overrides TO 'sa'@'localhost'; +-- Tests need to be able to remove rows from this table, so DELETE,DROP is necessary. +GRANT SELECT,INSERT,UPDATE,DELETE,DROP ON paused TO 'sa'@'localhost'; + +GRANT SELECT ON certificates TO 'sa_ro'@'localhost'; +GRANT SELECT ON certificateStatus TO 'sa_ro'@'localhost'; +GRANT SELECT ON issuedNames TO 'sa_ro'@'localhost'; +GRANT SELECT ON registrations TO 'sa_ro'@'localhost'; +GRANT SELECT on fqdnSets TO 'sa_ro'@'localhost'; +GRANT SELECT ON orders TO 'sa_ro'@'localhost'; +GRANT SELECT ON orderFqdnSets TO 'sa_ro'@'localhost'; +GRANT SELECT ON authz2 TO 'sa_ro'@'localhost'; +GRANT SELECT ON serials TO 'sa_ro'@'localhost'; +GRANT SELECT ON precertificates TO 'sa_ro'@'localhost'; +GRANT SELECT ON keyHashToSerial TO 'sa_ro'@'localhost'; +GRANT SELECT ON blockedKeys TO 'sa_ro'@'localhost'; +GRANT SELECT ON incidents TO 'sa_ro'@'localhost'; +GRANT SELECT ON crlShards TO 'sa_ro'@'localhost'; +GRANT SELECT ON revokedCertificates TO 'sa_ro'@'localhost'; +GRANT SELECT ON replacementOrders TO 'sa_ro'@'localhost'; +GRANT SELECT ON paused TO 'sa_ro'@'localhost'; +GRANT SELECT ON overrides TO 'sa_ro'@'localhost'; + +-- Revoker Tool +GRANT SELECT,UPDATE ON registrations TO 'revoker'@'localhost'; +GRANT SELECT ON certificates TO 'revoker'@'localhost'; +GRANT SELECT ON precertificates TO 'revoker'@'localhost'; +GRANT SELECT ON keyHashToSerial TO 'revoker'@'localhost'; +GRANT SELECT,UPDATE ON blockedKeys TO 'revoker'@'localhost'; + +-- Expiration mailer +GRANT SELECT ON certificates TO 'mailer'@'localhost'; +GRANT SELECT ON registrations TO 'mailer'@'localhost'; +GRANT SELECT,UPDATE ON certificateStatus TO 'mailer'@'localhost'; +GRANT SELECT ON fqdnSets TO 'mailer'@'localhost'; + +-- Cert checker +GRANT SELECT ON certificates TO 'cert_checker'@'localhost'; +GRANT SELECT ON authz2 TO 'cert_checker'@'localhost'; +GRANT SELECT ON precertificates TO 'cert_checker'@'localhost'; + +-- Bad Key Revoker +GRANT SELECT,UPDATE ON blockedKeys TO 'badkeyrevoker'@'localhost'; +GRANT SELECT ON keyHashToSerial TO 'badkeyrevoker'@'localhost'; +GRANT SELECT ON certificateStatus TO 'badkeyrevoker'@'localhost'; +GRANT SELECT ON precertificates TO 'badkeyrevoker'@'localhost'; +GRANT SELECT ON registrations TO 'badkeyrevoker'@'localhost'; + +-- ProxySQL -- +GRANT ALL PRIVILEGES ON monitor TO 'proxysql'@'localhost'; + +-- Test setup and teardown +GRANT ALL PRIVILEGES ON * to 'test_setup'@'localhost'; diff --git a/sa/db-users/incidents_sa.sql b/sa/db-users/incidents_sa.sql new file mode 100644 index 00000000000..5fa61fc84fa --- /dev/null +++ b/sa/db-users/incidents_sa.sql @@ -0,0 +1,12 @@ +-- this file is run by test/create_db.sh to create users for each +-- component with the appropriate permissions. + +-- These lines require MariaDB 10.1+ +CREATE USER IF NOT EXISTS 'incidents_sa'@'localhost'; +CREATE USER IF NOT EXISTS 'test_setup'@'localhost'; + +-- Storage Authority +GRANT SELECT ON * TO 'incidents_sa'@'localhost'; + +-- Test setup and teardown +GRANT ALL PRIVILEGES ON * to 'test_setup'@'localhost'; diff --git a/sa/db/boulder_sa/20230419000000_CombinedSchema.sql b/sa/db/boulder_sa/20230419000000_CombinedSchema.sql new file mode 100644 index 00000000000..4479770fd20 --- /dev/null +++ b/sa/db/boulder_sa/20230419000000_CombinedSchema.sql @@ -0,0 +1,265 @@ +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied + +CREATE TABLE `authz2` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `identifierType` tinyint(4) NOT NULL, + `identifierValue` varchar(255) NOT NULL, + `registrationID` bigint(20) NOT NULL, + `status` tinyint(4) NOT NULL, + `expires` datetime NOT NULL, + `challenges` tinyint(4) NOT NULL, + `attempted` tinyint(4) DEFAULT NULL, + `attemptedAt` datetime DEFAULT NULL, + `token` binary(32) NOT NULL, + `validationError` mediumblob DEFAULT NULL, + `validationRecord` mediumblob DEFAULT NULL, + `certificateProfileName` varchar(32) DEFAULT NULL, + `created` datetime DEFAULT current_timestamp(), + PRIMARY KEY (`id`), + KEY `regID_expires_idx` (`registrationID`,`status`,`expires`), + KEY `regID_identifier_status_expires_idx` (`registrationID`,`identifierType`,`identifierValue`,`status`,`expires`), + KEY `expires_idx` (`expires`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +CREATE TABLE `blockedKeys` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `keyHash` binary(32) NOT NULL, + `added` datetime NOT NULL, + `source` tinyint(4) NOT NULL, + `comment` varchar(255) DEFAULT NULL, + `revokedBy` bigint(20) DEFAULT 0, + `extantCertificatesChecked` tinyint(1) DEFAULT 0, + PRIMARY KEY (`id`), + UNIQUE KEY `keyHash` (`keyHash`), + KEY `extantCertificatesChecked_idx` (`extantCertificatesChecked`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +CREATE TABLE `certificateStatus` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `serial` varchar(255) NOT NULL, + `subscriberApproved` tinyint(1) DEFAULT 0, + `status` varchar(255) NOT NULL, + `ocspLastUpdated` datetime NOT NULL, + `revokedDate` datetime NOT NULL, + `revokedReason` int(11) NOT NULL, + `lastExpirationNagSent` datetime NOT NULL, + `LockCol` bigint(20) DEFAULT 0, + `ocspResponse` blob DEFAULT NULL, + `notAfter` datetime DEFAULT NULL, + `isExpired` tinyint(1) DEFAULT 0, + `issuerID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `isExpired_ocspLastUpdated_idx` (`isExpired`,`ocspLastUpdated`), + KEY `notAfter_idx` (`notAfter`), + KEY `serial` (`serial`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +CREATE TABLE `certificates` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `registrationID` bigint(20) NOT NULL, + `serial` varchar(255) NOT NULL, + `digest` varchar(255) NOT NULL, + `der` mediumblob NOT NULL, + `issued` datetime NOT NULL, + `expires` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `regId_certificates_idx` (`registrationID`) COMMENT 'Common lookup', + KEY `issued_idx` (`issued`), + KEY `serial` (`serial`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +CREATE TABLE `crlShards` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `issuerID` bigint(20) NOT NULL, + `idx` int(10) unsigned NOT NULL, + `thisUpdate` datetime DEFAULT NULL, + `nextUpdate` datetime DEFAULT NULL, + `leasedUntil` datetime NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `shardID` (`issuerID`,`idx`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; + +CREATE TABLE `crls` ( + `serial` varchar(255) NOT NULL, + `createdAt` datetime NOT NULL, + `crl` varchar(255) NOT NULL, + PRIMARY KEY (`serial`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +CREATE TABLE `fqdnSets` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `setHash` binary(32) NOT NULL, + `serial` varchar(255) NOT NULL, + `issued` datetime NOT NULL, + `expires` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `setHash_issued_idx` (`setHash`,`issued`), + KEY `serial` (`serial`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +CREATE TABLE `incidents` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `serialTable` varchar(128) NOT NULL, + `url` varchar(1024) NOT NULL, + `renewBy` datetime NOT NULL, + `enabled` tinyint(1) DEFAULT 0, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; + +CREATE TABLE `issuedNames` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `reversedName` varchar(640) CHARACTER SET ascii COLLATE ascii_general_ci NOT NULL, + `notBefore` datetime NOT NULL, + `serial` varchar(255) NOT NULL, + `renewal` tinyint(1) NOT NULL DEFAULT 0, + PRIMARY KEY (`id`), + KEY `reversedName_notBefore_Idx` (`reversedName`,`notBefore`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +CREATE TABLE `keyHashToSerial` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `keyHash` binary(32) NOT NULL, + `certNotAfter` datetime NOT NULL, + `certSerial` varchar(255) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `unique_keyHash_certserial` (`keyHash`,`certSerial`), + KEY `keyHash_certNotAfter` (`keyHash`,`certNotAfter`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +CREATE TABLE `orderFqdnSets` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `setHash` binary(32) NOT NULL, + `orderID` bigint(20) NOT NULL, + `registrationID` bigint(20) NOT NULL, + `expires` datetime NOT NULL, + `created` datetime DEFAULT current_timestamp(), + PRIMARY KEY (`id`), + KEY `setHash_expires_idx` (`setHash`,`expires`), + KEY `orderID_idx` (`orderID`), + KEY `orderFqdnSets_registrationID_registrations` (`registrationID`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +CREATE TABLE `orders` ( + `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `registrationID` bigint(20) NOT NULL, + `expires` datetime NOT NULL, + `error` mediumblob DEFAULT NULL, + `certificateSerial` varchar(255) DEFAULT NULL, + `beganProcessing` tinyint(1) NOT NULL DEFAULT 0, + `created` datetime NOT NULL, + `certificateProfileName` varchar(32) DEFAULT NULL, + `replaces` varchar(255) DEFAULT NULL, + `authzs` blob DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `reg_expires` (`registrationID`,`expires`), + KEY `regID_created_idx` (`registrationID`,`created`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +CREATE TABLE `overrides` ( + `limitEnum` tinyint(4) unsigned NOT NULL, + `bucketKey` varchar(255) NOT NULL, + `comment` varchar(255) NOT NULL, + `periodNS` bigint(20) unsigned NOT NULL, + `count` int(10) unsigned NOT NULL, + `burst` int(10) unsigned NOT NULL, + `updatedAt` datetime NOT NULL, + `enabled` tinyint(1) NOT NULL DEFAULT 0, + UNIQUE KEY `limitEnum_bucketKey` (`limitEnum`,`bucketKey`), + KEY `idx_enabled` (`enabled`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; + +CREATE TABLE `paused` ( + `registrationID` bigint(20) unsigned NOT NULL, + `identifierType` tinyint(4) NOT NULL, + `identifierValue` varchar(255) NOT NULL, + `pausedAt` datetime NOT NULL, + `unpausedAt` datetime DEFAULT NULL, + PRIMARY KEY (`registrationID`,`identifierValue`,`identifierType`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci; + +-- Note: This table's name is a historical artifact and it is now +-- used to store linting certificates, not precertificates. +-- See #6807. +CREATE TABLE `precertificates` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `registrationID` bigint(20) NOT NULL, + `serial` varchar(255) NOT NULL, + `der` mediumblob NOT NULL, + `issued` datetime NOT NULL, + `expires` datetime NOT NULL, + PRIMARY KEY (`id`), + KEY `regId_precertificates_idx` (`registrationID`), + KEY `issued_precertificates_idx` (`issued`), + KEY `serial` (`serial`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +CREATE TABLE `registrations` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `jwk` mediumblob NOT NULL, + `jwk_sha256` varchar(255) NOT NULL, + `agreement` varchar(255) NOT NULL, + `LockCol` bigint(20) NOT NULL DEFAULT 0, + `createdAt` datetime NOT NULL, + `status` varchar(255) NOT NULL DEFAULT 'valid', + PRIMARY KEY (`id`), + UNIQUE KEY `jwk_sha256` (`jwk_sha256`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +CREATE TABLE `replacementOrders` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `serial` varchar(255) NOT NULL, + `orderID` bigint(20) NOT NULL, + `orderExpires` datetime NOT NULL, + `replaced` tinyint(1) DEFAULT 0, + PRIMARY KEY (`id`), + KEY `serial_idx` (`serial`), + KEY `orderID_idx` (`orderID`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; + +CREATE TABLE `revokedCertificates` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `issuerID` bigint(20) NOT NULL, + `serial` varchar(255) NOT NULL, + `notAfterHour` datetime NOT NULL, + `shardIdx` bigint(20) NOT NULL, + `revokedDate` datetime NOT NULL, + `revokedReason` int(11) NOT NULL, + PRIMARY KEY (`id`), + KEY `issuerID_shardIdx_notAfterHour_idx` (`issuerID`,`shardIdx`,`notAfterHour`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; + +CREATE TABLE `serials` ( + `id` bigint(20) NOT NULL AUTO_INCREMENT, + `registrationID` bigint(20) NOT NULL, + `serial` varchar(255) NOT NULL, + `created` datetime NOT NULL, + `expires` datetime NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `serial` (`serial`), + KEY `regId_serials_idx` (`registrationID`), + CONSTRAINT `regId_serials` FOREIGN KEY (`registrationID`) REFERENCES `registrations` (`id`) ON DELETE NO ACTION ON UPDATE NO ACTION +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_general_ci; + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back + +-- First set of tables have foreign key constraints, so are dropped first. +DROP TABLE `serials`; + +DROP TABLE `authz2`; +DROP TABLE `blockedKeys`; +DROP TABLE `certificateStatus`; +DROP TABLE `certificatesPerName`; +DROP TABLE `certificates`; +DROP TABLE `fqdnSets`; +DROP TABLE `incidents`; +DROP TABLE `issuedNames`; +DROP TABLE `keyHashToSerial`; +DROP TABLE `newOrdersRL`; +DROP TABLE `orderFqdnSets`; +DROP TABLE `orderToAuthz2`; +DROP TABLE `orders`; +DROP TABLE `precertificates`; +DROP TABLE `registrations`; +DROP TABLE `requestedNames`; diff --git a/sa/db/dbconfig.mariadb.yml b/sa/db/dbconfig.mariadb.yml new file mode 100644 index 00000000000..747ce0365fb --- /dev/null +++ b/sa/db/dbconfig.mariadb.yml @@ -0,0 +1,20 @@ +# https://github.com/rubenv/sql-migrate#readme +boulder_sa_test: + dialect: mysql + datasource: root@tcp(boulder-proxysql:6033)/boulder_sa_test?parseTime=true + dir: boulder_sa + +boulder_sa_integration: + dialect: mysql + datasource: root@tcp(boulder-proxysql:6033)/boulder_sa_integration?parseTime=true + dir: boulder_sa + +incidents_sa_test: + dialect: mysql + datasource: root@tcp(boulder-proxysql:6033)/incidents_sa_test?parseTime=true + dir: incidents_sa + +incidents_sa_integration: + dialect: mysql + datasource: root@tcp(boulder-proxysql:6033)/incidents_sa_integration?parseTime=true + dir: incidents_sa diff --git a/sa/db/dbconfig.mysql8.yml b/sa/db/dbconfig.mysql8.yml new file mode 100644 index 00000000000..6f9c9bf6578 --- /dev/null +++ b/sa/db/dbconfig.mysql8.yml @@ -0,0 +1,20 @@ +# https://github.com/rubenv/sql-migrate#readme +boulder_sa_test: + dialect: mysql + datasource: root@tcp(boulder-vitess:33577)/boulder_sa_test?parseTime=true + dir: boulder_sa + +boulder_sa_integration: + dialect: mysql + datasource: root@tcp(boulder-vitess:33577)/boulder_sa_integration?parseTime=true + dir: boulder_sa + +incidents_sa_test: + dialect: mysql + datasource: root@tcp(boulder-vitess:33577)/incidents_sa_test?parseTime=true + dir: incidents_sa + +incidents_sa_integration: + dialect: mysql + datasource: root@tcp(boulder-vitess:33577)/incidents_sa_integration?parseTime=true + dir: incidents_sa diff --git a/sa/db/incidents_sa/20220328100000_Incidents.sql b/sa/db/incidents_sa/20220328100000_Incidents.sql new file mode 100644 index 00000000000..dec39f18e18 --- /dev/null +++ b/sa/db/incidents_sa/20220328100000_Incidents.sql @@ -0,0 +1,28 @@ +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied + +CREATE TABLE `incident_foo` ( + `serial` varchar(255) NOT NULL, + `registrationID` bigint(20) unsigned NULL, + `orderID` bigint(20) unsigned NULL, + `lastNoticeSent` datetime NULL, + PRIMARY KEY (`serial`), + KEY `registrationID_idx` (`registrationID`), + KEY `orderID_idx` (`orderID`) +) CHARSET=utf8mb4; + +CREATE TABLE `incident_bar` ( + `serial` varchar(255) NOT NULL, + `registrationID` bigint(20) unsigned NULL, + `orderID` bigint(20) unsigned NULL, + `lastNoticeSent` datetime NULL, + PRIMARY KEY (`serial`), + KEY `registrationID_idx` (`registrationID`), + KEY `orderID_idx` (`orderID`) +) CHARSET=utf8mb4; + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back + +DROP TABLE `incident_foo`; +DROP TABLE `incident_bar`; diff --git a/sa/ip_range_test.go b/sa/ip_range_test.go deleted file mode 100644 index a92fc7b928a..00000000000 --- a/sa/ip_range_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package sa - -import ( - "net" - "testing" -) - -func TestIncrementIP(t *testing.T) { - testCases := []struct { - ip string - index int - expected string - }{ - {"0.0.0.0", 128, "0.0.0.1"}, - {"0.0.0.255", 128, "0.0.1.0"}, - {"127.0.0.1", 128, "127.0.0.2"}, - {"1.2.3.4", 120, "1.2.4.4"}, - {"::1", 128, "::2"}, - {"2002:1001:4008::", 128, "2002:1001:4008::1"}, - {"2002:1001:4008::", 48, "2002:1001:4009::"}, - {"2002:1001:ffff::", 48, "2002:1002::"}, - {"ffff:ffff:ffff::", 48, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"}, - } - for _, tc := range testCases { - ip := net.ParseIP(tc.ip).To16() - actual := incrementIP(ip, tc.index) - expectedIP := net.ParseIP(tc.expected) - if !actual.Equal(expectedIP) { - t.Errorf("Expected incrementIP(%s, %d) to be %s, instead got %s", - tc.ip, tc.index, expectedIP, actual.String()) - } - } -} - -func TestIPRange(t *testing.T) { - testCases := []struct { - ip string - expectedBegin string - expectedEnd string - }{ - {"28.45.45.28", "28.45.45.28", "28.45.45.29"}, - {"2002:1001:4008::", "2002:1001:4008::", "2002:1001:4009::"}, - } - for _, tc := range testCases { - ip := net.ParseIP(tc.ip) - expectedBegin := net.ParseIP(tc.expectedBegin) - expectedEnd := net.ParseIP(tc.expectedEnd) - actualBegin, actualEnd := ipRange(ip) - if !expectedBegin.Equal(actualBegin) || !expectedEnd.Equal(actualEnd) { - t.Errorf("Expected ipRange(%s) to be (%s, %s), got (%s, %s)", - tc.ip, tc.expectedBegin, tc.expectedEnd, actualBegin, actualEnd) - } - } -} diff --git a/sa/metrics.go b/sa/metrics.go index 96ca248c56f..34b56203eab 100644 --- a/sa/metrics.go +++ b/sa/metrics.go @@ -61,10 +61,11 @@ func (dbc dbMetricsCollector) Collect(ch chan<- prometheus.Metric) { writeCounter(dbc.maxLifetimeClosed, float64(dbMapStats.MaxLifetimeClosed)) } -// InitDBMetrics will register a Collector that translates the provided dbMap's -// stats and DbSettings into Prometheus metrics on the fly. The stat values will -// be translated from the gorp dbMap's inner sql.DBMap's DBStats structure values -func InitDBMetrics(db *sql.DB, stats prometheus.Registerer, dbSettings DbSettings, address string, user string) { +// initDBMetrics will register a Collector that translates the provided dbMap's +// stats and DbSettings into Prometheus metrics on the fly. The exported metrics +// all start with `db_`. The underlying data comes from sql.DBStats: +// https://pkg.go.dev/database/sql#DBStats +func initDBMetrics(db *sql.DB, stats prometheus.Registerer, dbSettings DbSettings, address string, user string) error { // Create a dbMetricsCollector and register it dbc := dbMetricsCollector{db: db, dbSettings: dbSettings} @@ -125,5 +126,5 @@ func InitDBMetrics(db *sql.DB, stats prometheus.Registerer, dbSettings DbSetting "Total number of connections closed due to SetConnMaxLifetime.", nil, labels) - stats.MustRegister(dbc) + return stats.Register(dbc) } diff --git a/sa/migrations.sh b/sa/migrations.sh index 4f2b4e2bc14..f849934e038 100755 --- a/sa/migrations.sh +++ b/sa/migrations.sh @@ -13,11 +13,12 @@ res="${esc}0m" # # Defaults # -DB_NEXT_PATH="_db-next/migrations" -DB_PATH="_db/migrations" +DB_NEXT_PATH="db-next" +DB_PATH="db" OUTCOME="ERROR" PROMOTE=() RUN=() +DB="" # # Print Functions @@ -63,7 +64,14 @@ function print_linking () { echo -e "to: ${esc}0;39;1m${to}${res}" } -function print_migrations(){ +function check_arg() { + if [ -z "${OPTARG}" ] + then + exit_msg "No arg for --${OPT} option, use: -h for help">&2 + fi +} + +function print_migrations() { iter=1 for file in "${migrations[@]}" do @@ -83,27 +91,29 @@ function exit_msg() { # function get_promotable_migrations() { local migrations=() - for file in "${DB_NEXT_PATH}"/*.sql; do + local migpath="${DB_NEXT_PATH}/${1}" + for file in "${migpath}"/*.sql; do [[ -f "${file}" && ! -L "${file}" ]] || continue migrations+=("${file}") done if [[ "${migrations[@]}" ]]; then echo "${migrations[@]}" else - exit_msg "There are no promotable migrations at path: "\"${DB_NEXT_PATH}\""" + exit_msg "There are no promotable migrations at path: "\"${migpath}\""" fi } function get_demotable_migrations() { local migrations=() - for file in "${DB_NEXT_PATH}"/*.sql; do + local migpath="${DB_NEXT_PATH}/${1}" + for file in "${migpath}"/*.sql; do [[ -L "${file}" ]] || continue migrations+=("${file}") done if [[ "${migrations[@]}" ]]; then echo "${migrations[@]}" else - exit_msg "There are no demotable migrations at path: "\"${DB_NEXT_PATH}\""" + exit_msg "There are no demotable migrations at path: "\"${migpath}\""" fi } @@ -116,26 +126,27 @@ Usage: Boulder DB Migrations CLI - Helper for listing, promoting, and demoting Boulder schema files + Helper for listing, promoting, and demoting migration files ./$(basename "${0}") [OPTION]... - - -l, --list-next Lists schema files present in sa/_db-next - -c, --list-current Lists schema files promoted from sa/_db-next to sa/_db - -p, --promote Select and promote a schema from sa/_db-next to sa/_db - -d, --demote Select and demote a schema from sa/_db to sa/_db-next + -b --db Name of the database, this is required (e.g. boulder_sa or incidents_sa) + -n, --list-next Lists migration files present in sa/db-next/ + -c, --list-current Lists migration files promoted from sa/db-next/ to sa/db/ + -p, --promote Select and promote a migration from sa/db-next/ to sa/db/ + -d, --demote Select and demote a migration from sa/db/ to sa/db-next/ -h, --help Shows this help message EOM )" -while getopts nchpd-: OPT; do +while getopts nchpd-:b:-: OPT; do if [ "$OPT" = - ]; then # long option: reformulate OPT and OPTARG OPT="${OPTARG%%=*}" # extract long option name OPTARG="${OPTARG#$OPT}" # extract long option argument (may be empty) OPTARG="${OPTARG#=}" # if long option argument, remove assigning `=` fi case "${OPT}" in + b | db ) check_arg; DB="${OPTARG}" ;; n | list-next ) RUN+=("list_next") ;; c | list-current ) RUN+=("list_current") ;; p | promote ) RUN+=("promote") ;; @@ -150,24 +161,26 @@ shift $((OPTIND-1)) # remove parsed opts and args from $@ list # On EXIT, trap and print outcome trap "print_outcome" EXIT +[ -z "${DB}" ] && exit_msg "You must specify a database with flag -b \"foo\" or --db=\"foo\"" + STEP="list_next" if [[ "${RUN[@]}" =~ "${STEP}" ]] ; then - print_heading "Next Schemas" - migrations=($(get_promotable_migrations)) + print_heading "Next Migrations" + migrations=($(get_promotable_migrations "${DB}")) print_migrations "${migrations[@]}" fi STEP="list_current" if [[ "${RUN[@]}" =~ "${STEP}" ]] ; then - print_heading "Current Schemas" - migrations=($(get_demotable_migrations)) + print_heading "Current Migrations" + migrations=($(get_demotable_migrations "${DB}")) print_migrations "${migrations[@]}" fi STEP="promote" if [[ "${RUN[@]}" =~ "${STEP}" ]] ; then - print_heading "Promote Schema" - migrations=($(get_promotable_migrations)) + print_heading "Promote Migration" + migrations=($(get_promotable_migrations "${DB}")) declare -a mig_index=() declare -A mig_file=() for i in "${!migrations[@]}"; do @@ -176,7 +189,7 @@ if [[ "${RUN[@]}" =~ "${STEP}" ]] ; then done promote="" - PS3='Which schema would you like to promote? (q to cancel): ' + PS3='Which migration would you like to promote? (q to cancel): ' select opt in "${mig_index[@]}"; do case "${opt}" in @@ -186,23 +199,23 @@ if [[ "${RUN[@]}" =~ "${STEP}" ]] ; then done if [[ "${mig_file_path}" ]] then - print_heading "Promoting Schema" + print_heading "Promoting Migration" promote_mig_name="$(basename -- "${mig_file_path}")" - promoted_mig_file_path="${DB_PATH}/${promote_mig_name}" - symlink_relpath="$(realpath --relative-to=${DB_NEXT_PATH} ${promoted_mig_file_path})" + promoted_mig_file_path="${DB_PATH}/${DB}/${promote_mig_name}" + symlink_relpath="$(realpath --relative-to=${DB_NEXT_PATH}/${DB} ${promoted_mig_file_path})" print_moving "${mig_file_path}" "${promoted_mig_file_path}" mv "${mig_file_path}" "${promoted_mig_file_path}" print_linking "${mig_file_path}" "${symlink_relpath}" - ln -s "${symlink_relpath}" "${DB_NEXT_PATH}" + ln -s "${symlink_relpath}" "${DB_NEXT_PATH}/${DB}" fi fi STEP="demote" if [[ "${RUN[@]}" =~ "${STEP}" ]] ; then - print_heading "Demote Schema" - migrations=($(get_demotable_migrations)) + print_heading "Demote Migration" + migrations=($(get_demotable_migrations "${DB}")) declare -a mig_index=() declare -A mig_file=() for i in "${!migrations[@]}"; do @@ -211,7 +224,7 @@ if [[ "${RUN[@]}" =~ "${STEP}" ]] ; then done demote_mig="" - PS3='Which schema would you like to demote? (q to cancel): ' + PS3='Which migration would you like to demote? (q to cancel): ' select opt in "${mig_index[@]}"; do case "${opt}" in @@ -221,9 +234,9 @@ if [[ "${RUN[@]}" =~ "${STEP}" ]] ; then done if [[ "${mig_link_path}" ]] then - print_heading "Demoting Schema" + print_heading "Demoting Migration" demote_mig_name="$(basename -- "${mig_link_path}")" - demote_mig_from="${DB_PATH}/${demote_mig_name}" + demote_mig_from="${DB_PATH}/${DB}/${demote_mig_name}" print_unlinking "${mig_link_path}" rm "${mig_link_path}" diff --git a/sa/model.go b/sa/model.go index 3c8a0911a0e..85a9aa9dee7 100644 --- a/sa/model.go +++ b/sa/model.go @@ -1,24 +1,35 @@ package sa import ( + "context" + "crypto/sha256" + "crypto/x509" "database/sql" "encoding/base64" "encoding/json" "errors" "fmt" + "google.golang.org/protobuf/proto" "math" - "net" + "net/netip" + "net/url" "strconv" "strings" "time" - jose "gopkg.in/square/go-jose.v2" + "github.com/go-jose/go-jose/v4" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" "github.com/letsencrypt/boulder/core" corepb "github.com/letsencrypt/boulder/core/proto" "github.com/letsencrypt/boulder/db" + berrors "github.com/letsencrypt/boulder/errors" "github.com/letsencrypt/boulder/grpc" + "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/probs" + "github.com/letsencrypt/boulder/revocation" + sapb "github.com/letsencrypt/boulder/sa/proto" ) // errBadJSON is an error type returned when a json.Unmarshal performed by the @@ -50,154 +61,140 @@ func badJSONError(msg string, jsonData []byte, err error) error { } } -const regFields = "id, jwk, jwk_sha256, contact, agreement, initialIP, createdAt, LockCol, status" +const regFields = "id, jwk, jwk_sha256, agreement, createdAt, status" // selectRegistration selects all fields of one registration model -func selectRegistration(s db.OneSelector, q string, args ...interface{}) (*regModel, error) { +func selectRegistration(ctx context.Context, s db.OneSelector, whereCol string, args ...any) (*regModel, error) { + if whereCol != "id" && whereCol != "jwk_sha256" { + return nil, fmt.Errorf("column name %q invalid for registrations table WHERE clause", whereCol) + } + var model regModel err := s.SelectOne( + ctx, &model, - "SELECT "+regFields+" FROM registrations "+q, + "SELECT "+regFields+" FROM registrations WHERE "+whereCol+" = ? LIMIT 1", args..., ) return &model, err } -const certFields = "registrationID, serial, digest, der, issued, expires" +const certFields = "id, registrationID, serial, digest, der, issued, expires" // SelectCertificate selects all fields of one certificate object identified by // a serial. If more than one row contains the same serial only the first is // returned. -func SelectCertificate(s db.OneSelector, serial string) (core.Certificate, error) { - var model core.Certificate +func SelectCertificate(ctx context.Context, s db.OneSelector, serial string) (*corepb.Certificate, error) { + var model certificateModel err := s.SelectOne( + ctx, &model, "SELECT "+certFields+" FROM certificates WHERE serial = ? LIMIT 1", serial, ) - return model, err + return model.toPb(), err } const precertFields = "registrationID, serial, der, issued, expires" // SelectPrecertificate selects all fields of one precertificate object // identified by serial. -func SelectPrecertificate(s db.OneSelector, serial string) (core.Certificate, error) { - var model precertificateModel +func SelectPrecertificate(ctx context.Context, s db.OneSelector, serial string) (*corepb.Certificate, error) { + var model lintingCertModel err := s.SelectOne( + ctx, &model, - "SELECT "+precertFields+" FROM precertificates WHERE serial = ?", + "SELECT "+precertFields+" FROM precertificates WHERE serial = ? LIMIT 1", serial) - return core.Certificate{ - RegistrationID: model.RegistrationID, - Serial: model.Serial, - DER: model.DER, - Issued: model.Issued, - Expires: model.Expires, - }, err -} - -type CertWithID struct { - ID int64 - core.Certificate + if err != nil { + return nil, err + } + return model.toPb(), nil } // SelectCertificates selects all fields of multiple certificate objects -func SelectCertificates(s db.Selector, q string, args map[string]interface{}) ([]CertWithID, error) { - var models []CertWithID - _, err := s.Select( - &models, - "SELECT id, "+certFields+" FROM certificates "+q, args) - return models, err -} - -// SelectPrecertificates selects all fields of multiple precertificate objects. -func SelectPrecertificates(s db.Selector, q string, args map[string]interface{}) ([]CertWithID, error) { - var models []CertWithID +// +// Returns a slice of *corepb.Certificate along with the highest ID field seen +// (which can be used as input to a subsequent query when iterating in primary +// key order). +func SelectCertificates(ctx context.Context, s db.Selector, q string, args map[string]any) ([]*corepb.Certificate, int64, error) { + var models []certificateModel _, err := s.Select( + ctx, &models, - "SELECT id, "+precertFields+" FROM precertificates "+q, args) - return models, err -} - -type CertStatusMetadata struct { - core.CertificateStatus -} - -// CertStatusMetadataFields returns a slice of column names for rows in the -// certificateStatus table. Changes to the ordering of this list returned by -// this function should also be made in `ScanCertStatusRow()`. -func CertStatusMetadataFields() []string { - return []string{ - "id", - "serial", - "status", - "ocspLastUpdated", - "revokedDate", - "revokedReason", - "lastExpirationNagSent", - "notAfter", - "isExpired", - "issuerID", - } -} - -// ScanCertStatusRow is a helper function expored from SA so that we can readily -// check that there's a 1:1 correspondence between the column name in the DB, -// `CertStatusMetadataFields()`, and the `*core.CerticateStatus` field name -// being copied to. -func ScanCertStatusMetadataRow(rows *sql.Rows, status *CertStatusMetadata) error { - columns, err := rows.Columns() - if err != nil { - return err - } - expectedColumns := CertStatusMetadataFields() - if len(columns) != len(expectedColumns) { - return fmt.Errorf("incorrect number of columns in scanned rows: got %d, expected %d", len(columns), len(expectedColumns)) - } - for i, v := range columns { - if v != expectedColumns[i] { - return fmt.Errorf("incorrect column %d in scanned rows: got %q, expected %q", i, v, expectedColumns[i]) + "SELECT "+certFields+" FROM certificates "+q, args) + var pbs []*corepb.Certificate + var highestID int64 + for _, m := range models { + pbs = append(pbs, m.toPb()) + if m.ID > highestID { + highestID = m.ID } } - err = rows.Scan( - &status.ID, - &status.Serial, - &status.Status, - &status.OCSPLastUpdated, - &status.RevokedDate, - &status.RevokedReason, - &status.LastExpirationNagSent, - &status.NotAfter, - &status.IsExpired, - &status.IssuerID, - ) - if err != nil { - return err - } - return nil + return pbs, highestID, err } -func certStatusFields() []string { - // Add the full response bytes. - return append(CertStatusMetadataFields(), "ocspResponse") +type CertStatusMetadata struct { + ID int64 `db:"id"` + Serial string `db:"serial"` + Status core.OCSPStatus `db:"status"` + OCSPLastUpdated time.Time `db:"ocspLastUpdated"` + RevokedDate time.Time `db:"revokedDate"` + RevokedReason revocation.Reason `db:"revokedReason"` + LastExpirationNagSent time.Time `db:"lastExpirationNagSent"` + NotAfter time.Time `db:"notAfter"` + IsExpired bool `db:"isExpired"` + IssuerID int64 `db:"issuerID"` } -func certStatusFieldsSelect(restOfQuery string) string { - fields := strings.Join(certStatusFields(), ",") - return fmt.Sprintf("SELECT %s FROM certificateStatus %s", fields, restOfQuery) -} +const certStatusFields = "id, serial, status, ocspLastUpdated, revokedDate, revokedReason, lastExpirationNagSent, notAfter, isExpired, issuerID" // SelectCertificateStatus selects all fields of one certificate status model // identified by serial -func SelectCertificateStatus(s db.OneSelector, serial string) (core.CertificateStatus, error) { - var model core.CertificateStatus +func SelectCertificateStatus(ctx context.Context, s db.OneSelector, serial string) (*corepb.CertificateStatus, error) { + var model certificateStatusModel + err := s.SelectOne( + ctx, + &model, + "SELECT "+certStatusFields+" FROM certificateStatus WHERE serial = ? LIMIT 1", + serial, + ) + return model.toPb(), err +} + +// RevocationStatusModel represents a small subset of the columns in the +// certificateStatus table, used to determine the authoritative revocation +// status of a certificate. +type RevocationStatusModel struct { + Status core.OCSPStatus `db:"status"` + RevokedDate time.Time `db:"revokedDate"` + RevokedReason revocation.Reason `db:"revokedReason"` +} + +// SelectRevocationStatus returns the authoritative revocation information for +// the certificate with the given serial. +func SelectRevocationStatus(ctx context.Context, s db.OneSelector, serial string) (*sapb.RevocationStatus, error) { + var model RevocationStatusModel err := s.SelectOne( + ctx, &model, - certStatusFieldsSelect("WHERE serial = ?"), + "SELECT status, revokedDate, revokedReason FROM certificateStatus WHERE serial = ? LIMIT 1", serial, ) - return model, err + if err != nil { + return nil, err + } + + statusInt, ok := core.OCSPStatusToInt[model.Status] + if !ok { + return nil, fmt.Errorf("got unrecognized status %q", model.Status) + } + + return &sapb.RevocationStatus{ + Status: int64(statusInt), + RevokedDate: timestamppb.New(model.RevokedDate), + RevokedReason: int64(model.RevokedReason), + }, nil } var mediumBlobSize = int(math.Pow(2, 24)) @@ -211,36 +208,12 @@ type issuedNameModel struct { // regModel is the description of a core.Registration in the database before type regModel struct { - ID int64 `db:"id"` - Key []byte `db:"jwk"` - KeySHA256 string `db:"jwk_sha256"` - Contact []string `db:"contact"` - Agreement string `db:"agreement"` - // InitialIP is stored as sixteen binary bytes, regardless of whether it - // represents a v4 or v6 IP address. - InitialIP []byte `db:"initialIp"` + ID int64 `db:"id"` + Key []byte `db:"jwk"` + KeySHA256 string `db:"jwk_sha256"` + Agreement string `db:"agreement"` CreatedAt time.Time `db:"createdAt"` - LockCol int64 - Status string `db:"status"` -} - -// challModel is the description of a core.Challenge in the database -// -// The Validation field is a stub; the column is only there for backward compatibility. -type challModel struct { - ID int64 `db:"id"` - AuthorizationID string `db:"authorizationID"` - - Type core.AcmeChallenge `db:"type"` - Status core.AcmeStatus `db:"status"` - Error []byte `db:"error"` - Token string `db:"token"` - KeyAuthorization string `db:"keyAuthorization"` - ValidationRecord []byte `db:"validationRecord"` - AttemptedAt time.Time `db:"attemptedAt"` - - // TODO(#1818): Remove, this field is unused, but is kept temporarily to avoid a database migration. - Validated bool `db:"validated"` + Status string `db:"status"` } func registrationPbToModel(reg *corepb.Registration) (*regModel, error) { @@ -257,100 +230,35 @@ func registrationPbToModel(reg *corepb.Registration) (*regModel, error) { return nil, err } - // For some reason we use different serialization formats for InitialIP - // in database models and in protobufs, despite the fact that both formats - // are just []byte. - var initialIP net.IP - err = initialIP.UnmarshalText(reg.InitialIP) - if err != nil { - return nil, err - } - - // Converting the int64 zero-value to a unix timestamp does not produce - // the time.Time zero-value (the former is 1970; the latter is year 0), - // so we have to do this check. var createdAt time.Time - if reg.CreatedAt != 0 { - createdAt = time.Unix(0, reg.CreatedAt) + if !core.IsAnyNilOrZero(reg.CreatedAt) { + createdAt = reg.CreatedAt.AsTime() } return ®Model{ ID: reg.Id, Key: reg.Key, KeySHA256: sha, - Contact: reg.Contact, Agreement: reg.Agreement, - InitialIP: []byte(initialIP.To16()), CreatedAt: createdAt, Status: reg.Status, }, nil } func registrationModelToPb(reg *regModel) (*corepb.Registration, error) { - if reg.ID == 0 || len(reg.Key) == 0 || len(reg.InitialIP) == 0 { + if reg.ID == 0 || len(reg.Key) == 0 { return nil, errors.New("incomplete Registration retrieved from DB") } - var contact []string - contactsPresent := false - if len(reg.Contact) != 0 { - contact = reg.Contact - contactsPresent = true - } - - // For some reason we use different serialization formats for InitialIP - // in database models and in protobufs, despite the fact that both formats - // are just []byte. - ipBytes, err := net.IP(reg.InitialIP).MarshalText() - if err != nil { - return nil, err - } - return &corepb.Registration{ - Id: reg.ID, - Key: reg.Key, - Contact: contact, - ContactsPresent: contactsPresent, - Agreement: reg.Agreement, - InitialIP: ipBytes, - CreatedAt: reg.CreatedAt.UTC().UnixNano(), - Status: reg.Status, + Id: reg.ID, + Key: reg.Key, + Agreement: reg.Agreement, + CreatedAt: timestamppb.New(reg.CreatedAt.UTC()), + Status: reg.Status, }, nil } -func modelToChallenge(cm *challModel) (core.Challenge, error) { - c := core.Challenge{ - Type: cm.Type, - Status: cm.Status, - Token: cm.Token, - ProvidedKeyAuthorization: cm.KeyAuthorization, - Validated: &cm.AttemptedAt, - } - if len(cm.Error) > 0 { - var problem probs.ProblemDetails - err := json.Unmarshal(cm.Error, &problem) - if err != nil { - return core.Challenge{}, badJSONError( - "failed to unmarshal challenge model's error", - cm.Error, - err) - } - c.Error = &problem - } - if len(cm.ValidationRecord) > 0 { - var vr []core.ValidationRecord - err := json.Unmarshal(cm.ValidationRecord, &vr) - if err != nil { - return core.Challenge{}, badJSONError( - "failed to unmarshal challenge model's validation record", - cm.ValidationRecord, - err) - } - c.ValidationRecord = vr - } - return c, nil -} - type recordedSerialModel struct { ID int64 Serial string @@ -359,7 +267,7 @@ type recordedSerialModel struct { Expires time.Time } -type precertificateModel struct { +type lintingCertModel struct { ID int64 Serial string RegistrationID int64 @@ -368,58 +276,108 @@ type precertificateModel struct { Expires time.Time } -type orderModel struct { - ID int64 - RegistrationID int64 - Expires time.Time - Created time.Time - Error []byte - CertificateSerial string - BeganProcessing bool +func (model lintingCertModel) toPb() *corepb.Certificate { + return &corepb.Certificate{ + RegistrationID: model.RegistrationID, + Serial: model.Serial, + Digest: "", + Der: model.DER, + Issued: timestamppb.New(model.Issued), + Expires: timestamppb.New(model.Expires), + } } -type requestedNameModel struct { - ID int64 - OrderID int64 - ReversedName string +type certificateModel struct { + ID int64 `db:"id"` + RegistrationID int64 `db:"registrationID"` + Serial string `db:"serial"` + Digest string `db:"digest"` + DER []byte `db:"der"` + Issued time.Time `db:"issued"` + Expires time.Time `db:"expires"` +} + +func (model certificateModel) toPb() *corepb.Certificate { + return &corepb.Certificate{ + RegistrationID: model.RegistrationID, + Serial: model.Serial, + Digest: model.Digest, + Der: model.DER, + Issued: timestamppb.New(model.Issued), + Expires: timestamppb.New(model.Expires), + } } -type orderToAuthzModel struct { - OrderID int64 - AuthzID int64 +type certificateStatusModel struct { + ID int64 `db:"id"` + Serial string `db:"serial"` + Status core.OCSPStatus `db:"status"` + OCSPLastUpdated time.Time `db:"ocspLastUpdated"` + RevokedDate time.Time `db:"revokedDate"` + RevokedReason revocation.Reason `db:"revokedReason"` + LastExpirationNagSent time.Time `db:"lastExpirationNagSent"` + NotAfter time.Time `db:"notAfter"` + IsExpired bool `db:"isExpired"` + IssuerID int64 `db:"issuerID"` } -func orderToModel(order *corepb.Order) (*orderModel, error) { - om := &orderModel{ - ID: order.Id, - RegistrationID: order.RegistrationID, - Expires: time.Unix(0, order.Expires), - Created: time.Unix(0, order.Created), - BeganProcessing: order.BeganProcessing, - CertificateSerial: order.CertificateSerial, +func (model certificateStatusModel) toPb() *corepb.CertificateStatus { + return &corepb.CertificateStatus{ + Serial: model.Serial, + Status: string(model.Status), + OcspLastUpdated: timestamppb.New(model.OCSPLastUpdated), + RevokedDate: timestamppb.New(model.RevokedDate), + RevokedReason: int64(model.RevokedReason), + LastExpirationNagSent: timestamppb.New(model.LastExpirationNagSent), + NotAfter: timestamppb.New(model.NotAfter), + IsExpired: model.IsExpired, + IssuerID: model.IssuerID, } +} - if order.Error != nil { - errJSON, err := json.Marshal(order.Error) +// orderModel represents one row in the orders table. The CertificateProfileName +// column is a pointer because the column is NULL-able. +type orderModel struct { + ID int64 + RegistrationID int64 + Expires time.Time + Created time.Time + Error []byte + CertificateSerial string + BeganProcessing bool + CertificateProfileName *string + Replaces *string + Authzs []byte +} + +func modelToOrder(om *orderModel) (*corepb.Order, error) { + profile := "" + if om.CertificateProfileName != nil { + profile = *om.CertificateProfileName + } + replaces := "" + if om.Replaces != nil { + replaces = *om.Replaces + } + var v2Authorizations []int64 + if len(om.Authzs) > 0 { + var decodedAuthzs sapb.Authzs + err := proto.Unmarshal(om.Authzs, &decodedAuthzs) if err != nil { return nil, err } - if len(errJSON) > mediumBlobSize { - return nil, fmt.Errorf("Error object is too large to store in the database") - } - om.Error = errJSON + v2Authorizations = decodedAuthzs.AuthzIDs } - return om, nil -} - -func modelToOrder(om *orderModel) (*corepb.Order, error) { order := &corepb.Order{ - Id: om.ID, - RegistrationID: om.RegistrationID, - Expires: om.Expires.UnixNano(), - Created: om.Created.UnixNano(), - CertificateSerial: om.CertificateSerial, - BeganProcessing: om.BeganProcessing, + Id: om.ID, + RegistrationID: om.RegistrationID, + Expires: timestamppb.New(om.Expires), + Created: timestamppb.New(om.Created), + CertificateSerial: om.CertificateSerial, + BeganProcessing: om.BeganProcessing, + CertificateProfileName: profile, + Replaces: replaces, + V2Authorizations: v2Authorizations, } if len(om.Error) > 0 { var problem corepb.ProblemDetails @@ -436,23 +394,27 @@ func modelToOrder(om *orderModel) (*corepb.Order, error) { } var challTypeToUint = map[string]uint8{ - "http-01": 0, - "dns-01": 1, - "tls-alpn-01": 2, + "http-01": 0, + "dns-01": 1, + "tls-alpn-01": 2, + "dns-account-01": 3, } var uintToChallType = map[uint8]string{ 0: "http-01", 1: "dns-01", 2: "tls-alpn-01", + 3: "dns-account-01", } var identifierTypeToUint = map[string]uint8{ "dns": 0, + "ip": 1, } -var uintToIdentifierType = map[uint8]string{ +var uintToIdentifierType = map[uint8]identifier.IdentifierType{ 0: "dns", + 1: "ip", } var statusToUint = map[core.AcmeStatus]uint8{ @@ -477,21 +439,134 @@ func statusUint(status core.AcmeStatus) uint8 { // authzFields is used in a variety of places in sa.go, and modifications to // it must be carried through to every use in sa.go -const authzFields = "id, identifierType, identifierValue, registrationID, status, expires, challenges, attempted, attemptedAt, token, validationError, validationRecord" +const authzFields = "id, identifierType, identifierValue, registrationID, certificateProfileName, status, expires, challenges, attempted, attemptedAt, token, validationError, validationRecord" +// authzModel represents one row in the authz2 table. The CertificateProfileName +// column is a pointer because the column is NULL-able. type authzModel struct { - ID int64 `db:"id"` - IdentifierType uint8 `db:"identifierType"` - IdentifierValue string `db:"identifierValue"` - RegistrationID int64 `db:"registrationID"` - Status uint8 `db:"status"` - Expires time.Time `db:"expires"` - Challenges uint8 `db:"challenges"` - Attempted *uint8 `db:"attempted"` - AttemptedAt *time.Time `db:"attemptedAt"` - Token []byte `db:"token"` - ValidationError []byte `db:"validationError"` - ValidationRecord []byte `db:"validationRecord"` + ID int64 `db:"id"` + IdentifierType uint8 `db:"identifierType"` + IdentifierValue string `db:"identifierValue"` + RegistrationID int64 `db:"registrationID"` + CertificateProfileName *string `db:"certificateProfileName"` + Status uint8 `db:"status"` + Expires time.Time `db:"expires"` + Challenges uint8 `db:"challenges"` + Attempted *uint8 `db:"attempted"` + AttemptedAt *time.Time `db:"attemptedAt"` + Token []byte `db:"token"` + ValidationError []byte `db:"validationError"` + ValidationRecord []byte `db:"validationRecord"` +} + +// rehydrateHostPort mutates a validation record. If the URL in the validation +// record cannot be parsed, an error will be returned. If the Hostname and Port +// fields already exist in the validation record, they will be retained. +// Otherwise, the Hostname and Port will be derived and set from the URL field +// of the validation record. +func rehydrateHostPort(vr *core.ValidationRecord) error { + if vr.URL == "" { + return fmt.Errorf("rehydrating validation record, URL field cannot be empty") + } + + parsedUrl, err := url.Parse(vr.URL) + if err != nil { + return fmt.Errorf("parsing validation record URL %q: %w", vr.URL, err) + } + + if vr.Hostname == "" { + hostname := parsedUrl.Hostname() + if hostname == "" { + return fmt.Errorf("hostname missing in URL %q", vr.URL) + } + vr.Hostname = hostname + } + + if vr.Port == "" { + // CABF BRs section 1.6.1: Authorized Ports: One of the following ports: 80 + // (http), 443 (https) + if parsedUrl.Port() == "" { + // If there is only a scheme, then we'll determine the appropriate port. + switch parsedUrl.Scheme { + case "https": + vr.Port = "443" + case "http": + vr.Port = "80" + default: + // This should never happen since the VA should have already + // checked the scheme. + return fmt.Errorf("unknown scheme %q in URL %q", parsedUrl.Scheme, vr.URL) + } + } else if parsedUrl.Port() == "80" || parsedUrl.Port() == "443" { + // If :80 or :443 were embedded in the URL field + // e.g. '"url":"https://example.com:443"' + vr.Port = parsedUrl.Port() + } else { + return fmt.Errorf("only ports 80/tcp and 443/tcp are allowed in URL %q", vr.URL) + } + } + + return nil +} + +// SelectAuthzsMatchingIssuance looks for a set of authzs that would have +// authorized a given issuance that is known to have occurred. The returned +// authzs will all belong to the given regID, will have potentially been valid +// at the time of issuance, and will have the appropriate identifier type and +// value. This may return multiple authzs for the same identifier type and value. +// +// This returns "potentially" valid authzs because a client may have set an +// authzs status to deactivated after issuance, so we return both valid and +// deactivated authzs. It also uses a small amount of leeway (1s) to account +// for possible clock skew. +// +// This function doesn't do anything special for authzs with an expiration in +// the past. If the stored authz has a valid status, it is returned with a +// valid status regardless of whether it is also expired. +func SelectAuthzsMatchingIssuance( + ctx context.Context, + s db.Selector, + regID int64, + issued time.Time, + idents identifier.ACMEIdentifiers, +) ([]*corepb.Authorization, error) { + // The WHERE clause returned by this function does not contain any + // user-controlled strings; all user-controlled input ends up in the + // returned placeholder args. + identConditions, identArgs := buildIdentifierQueryConditions(idents) + query := fmt.Sprintf(`SELECT %s FROM authz2 WHERE + registrationID = ? AND + status IN (?, ?) AND + expires >= ? AND + attemptedAt <= ? AND + (%s)`, + authzFields, + identConditions) + var args []any + args = append(args, + regID, + statusToUint[core.StatusValid], statusToUint[core.StatusDeactivated], + issued.Add(-1*time.Second), // leeway for clock skew + issued.Add(1*time.Second), // leeway for clock skew + ) + args = append(args, identArgs...) + + var authzModels []authzModel + _, err := s.Select(ctx, &authzModels, query, args...) + if err != nil { + return nil, err + } + + var authzs []*corepb.Authorization + for _, model := range authzModels { + authz, err := modelToAuthzPB(model) + if err != nil { + return nil, err + } + authzs = append(authzs, authz) + + } + return authzs, err } // hasMultipleNonPendingChallenges checks if a slice of challenges contains @@ -510,14 +585,53 @@ func hasMultipleNonPendingChallenges(challenges []*corepb.Challenge) bool { return false } +// newAuthzReqToModel converts an sapb.NewAuthzRequest to the authzModel storage +// representation. It hardcodes the status to "pending" because it should be +// impossible to create an authz in any other state. +func newAuthzReqToModel(authz *sapb.NewAuthzRequest, profile string) (*authzModel, error) { + am := &authzModel{ + IdentifierType: identifierTypeToUint[authz.Identifier.Type], + IdentifierValue: authz.Identifier.Value, + RegistrationID: authz.RegistrationID, + Status: statusToUint[core.StatusPending], + Expires: authz.Expires.AsTime(), + } + + if profile != "" { + am.CertificateProfileName = &profile + } + + for _, challType := range authz.ChallengeTypes { + // Set the challenge type bit in the bitmap + am.Challenges |= 1 << challTypeToUint[challType] + } + + token, err := base64.RawURLEncoding.DecodeString(authz.Token) + if err != nil { + return nil, err + } + am.Token = token + + return am, nil +} + // authzPBToModel converts a protobuf authorization representation to the // authzModel storage representation. +// Deprecated: this function is only used as part of test setup, do not +// introduce any new uses in production code. func authzPBToModel(authz *corepb.Authorization) (*authzModel, error) { + ident := identifier.FromProto(authz.Identifier) + am := &authzModel{ - IdentifierValue: authz.Identifier, + IdentifierType: identifierTypeToUint[ident.ToProto().Type], + IdentifierValue: ident.Value, RegistrationID: authz.RegistrationID, Status: statusToUint[core.AcmeStatus(authz.Status)], - Expires: time.Unix(0, authz.Expires).UTC(), + Expires: authz.Expires.AsTime(), + } + if authz.CertificateProfileName != "" { + profile := authz.CertificateProfileName + am.CertificateProfileName = &profile } if authz.Id != "" { // The v1 internal authorization objects use a string for the ID, the v2 @@ -555,8 +669,8 @@ func authzPBToModel(authz *corepb.Authorization) (*authzModel, error) { // If validated Unix timestamp is zero then keep the core.Challenge Validated object nil. var validated *time.Time - if chall.Validated != 0 { - val := time.Unix(0, chall.Validated).UTC() + if !core.IsAnyNilOrZero(chall.Validated) { + val := chall.Validated.AsTime() validated = &val } am.AttemptedAt = validated @@ -565,6 +679,12 @@ func authzPBToModel(authz *corepb.Authorization) (*authzModel, error) { // can marshal them to JSON. records := make([]core.ValidationRecord, len(chall.Validationrecords)) for i, recordPB := range chall.Validationrecords { + if chall.Type == string(core.ChallengeTypeHTTP01) { + // Remove these fields because they can be rehydrated later + // on from the URL field. + recordPB.Hostname = "" + recordPB.Port = "" + } var err error records[i], err = grpc.PBToValidationRecord(recordPB) if err != nil { @@ -600,7 +720,7 @@ func authzPBToModel(authz *corepb.Authorization) (*authzModel, error) { } // populateAttemptedFields takes a challenge and populates it with the validation fields status, -// validation records, and error (the latter only if the validation failed) from a authzModel. +// validation records, and error (the latter only if the validation failed) from an authzModel. func populateAttemptedFields(am authzModel, challenge *corepb.Challenge) error { if len(am.ValidationError) != 0 { // If the error is non-empty the challenge must be invalid. @@ -613,10 +733,7 @@ func populateAttemptedFields(am authzModel, challenge *corepb.Challenge) error { am.ValidationError, err) } - challenge.Error, err = grpc.ProblemDetailsToPB(&prob) - if err != nil { - return err - } + challenge.Error = grpc.ProblemDetailsToPB(&prob) } else { // If the error is empty the challenge must be valid. challenge.Status = string(core.StatusValid) @@ -631,6 +748,14 @@ func populateAttemptedFields(am authzModel, challenge *corepb.Challenge) error { } challenge.Validationrecords = make([]*corepb.ValidationRecord, len(records)) for i, r := range records { + // Fixes implicit memory aliasing in for loop so we can deference r + // later on for rehydrateHostPort. + if challenge.Type == string(core.ChallengeTypeHTTP01) { + err := rehydrateHostPort(&r) + if err != nil { + return err + } + } challenge.Validationrecords[i], err = grpc.ValidationRecordToPB(r) if err != nil { return err @@ -640,12 +765,23 @@ func populateAttemptedFields(am authzModel, challenge *corepb.Challenge) error { } func modelToAuthzPB(am authzModel) (*corepb.Authorization, error) { + identType, ok := uintToIdentifierType[am.IdentifierType] + if !ok { + return nil, fmt.Errorf("unrecognized identifier type encoding %d", am.IdentifierType) + } + + profile := "" + if am.CertificateProfileName != nil { + profile = *am.CertificateProfileName + } + pb := &corepb.Authorization{ - Id: fmt.Sprintf("%d", am.ID), - Status: string(uintToStatus[am.Status]), - Identifier: am.IdentifierValue, - RegistrationID: am.RegistrationID, - Expires: am.Expires.UTC().UnixNano(), + Id: fmt.Sprintf("%d", am.ID), + Status: string(uintToStatus[am.Status]), + Identifier: identifier.ACMEIdentifier{Type: identType, Value: am.IdentifierValue}.ToProto(), + RegistrationID: am.RegistrationID, + Expires: timestamppb.New(am.Expires), + CertificateProfileName: profile, } // Populate authorization challenge array. We do this by iterating through // the challenge type bitmap and creating a challenge of each type if its @@ -655,7 +791,7 @@ func modelToAuthzPB(am authzModel) (*corepb.Authorization, error) { // to core.StatusValid or core.StatusInvalid depending on if there is anything // in ValidationError and populate the ValidationRecord and ValidationError // fields. - for pos := uint8(0); pos < 8; pos++ { + for pos := range uint8(8) { if (am.Challenges>>pos)&1 == 1 { challType := uintToChallType[pos] challenge := &corepb.Challenge{ @@ -674,9 +810,9 @@ func modelToAuthzPB(am authzModel) (*corepb.Authorization, error) { return nil, err } // Get the attemptedAt time and assign to the challenge validated time. - var validated int64 + var validated *timestamppb.Timestamp if am.AttemptedAt != nil { - validated = am.AttemptedAt.UTC().UnixNano() + validated = timestamppb.New(*am.AttemptedAt) } challenge.Validated = validated pb.Challenges = append(pb.Challenges, challenge) @@ -702,3 +838,579 @@ var stringToSourceInt = map[string]int{ "API": 1, "admin-revoker": 2, } + +// incidentModel represents a row in the 'incidents' table. +type incidentModel struct { + ID int64 `db:"id"` + SerialTable string `db:"serialTable"` + URL string `db:"url"` + RenewBy time.Time `db:"renewBy"` + Enabled bool `db:"enabled"` +} + +func incidentModelToPB(i incidentModel) sapb.Incident { + return sapb.Incident{ + Id: i.ID, + SerialTable: i.SerialTable, + Url: i.URL, + RenewBy: timestamppb.New(i.RenewBy), + Enabled: i.Enabled, + } +} + +// incidentSerialModel represents a row in an 'incident_*' table. +type incidentSerialModel struct { + Serial string `db:"serial"` + RegistrationID *int64 `db:"registrationID"` + OrderID *int64 `db:"orderID"` + LastNoticeSent *time.Time `db:"lastNoticeSent"` +} + +// crlEntryModel has just the certificate status fields necessary to construct +// an entry in a CRL. +type crlEntryModel struct { + Serial string `db:"serial"` + Status core.OCSPStatus `db:"status"` + RevokedReason revocation.Reason `db:"revokedReason"` + RevokedDate time.Time `db:"revokedDate"` +} + +// fqdnSet contains the SHA256 hash of the lowercased, comma joined dNSNames +// contained in a certificate. +type fqdnSet struct { + ID int64 + SetHash []byte + Serial string + Issued time.Time + Expires time.Time +} + +// orderFQDNSet contains the SHA256 hash of the lowercased, comma joined names +// from a new-order request, along with the corresponding orderID, the +// registration ID, and the order expiry. This is used to find +// existing orders for reuse. +type orderFQDNSet struct { + ID int64 + SetHash []byte + OrderID int64 + RegistrationID int64 + Expires time.Time +} + +func addFQDNSet(ctx context.Context, db db.Inserter, idents identifier.ACMEIdentifiers, serial string, issued time.Time, expires time.Time) error { + return db.Insert(ctx, &fqdnSet{ + SetHash: core.HashIdentifiers(idents), + Serial: serial, + Issued: issued, + Expires: expires, + }) +} + +// addOrderFQDNSet creates a new OrderFQDNSet row using the provided +// information. This function accepts a transaction so that the orderFqdnSet +// addition can take place within the order addition transaction. The caller is +// required to rollback the transaction if an error is returned. +func addOrderFQDNSet( + ctx context.Context, + db db.Inserter, + idents identifier.ACMEIdentifiers, + orderID int64, + regID int64, + expires time.Time) error { + return db.Insert(ctx, &orderFQDNSet{ + SetHash: core.HashIdentifiers(idents), + OrderID: orderID, + RegistrationID: regID, + Expires: expires, + }) +} + +// deleteOrderFQDNSet deletes a OrderFQDNSet row that matches the provided +// orderID. This function accepts a transaction so that the deletion can +// take place within the finalization transaction. The caller is required to +// rollback the transaction if an error is returned. +func deleteOrderFQDNSet( + ctx context.Context, + db db.Execer, + orderID int64) error { + + result, err := db.ExecContext(ctx, ` + DELETE FROM orderFqdnSets + WHERE orderID = ?`, + orderID) + if err != nil { + return err + } + rowsDeleted, err := result.RowsAffected() + if err != nil { + return err + } + // We always expect there to be an order FQDN set row for each + // pending/processing order that is being finalized. If there isn't one then + // something is amiss and should be raised as an internal server error + if rowsDeleted == 0 { + return berrors.InternalServerError("No orderFQDNSet exists to delete") + } + return nil +} + +func addIssuedNames(ctx context.Context, queryer db.Execer, cert *x509.Certificate, isRenewal bool) error { + if len(cert.DNSNames) == 0 && len(cert.IPAddresses) == 0 { + return berrors.InternalServerError("certificate has no DNSNames or IPAddresses") + } + + multiInserter, err := db.NewMultiInserter("issuedNames", []string{"reversedName", "serial", "notBefore", "renewal"}) + if err != nil { + return err + } + for _, name := range cert.DNSNames { + err = multiInserter.Add([]any{ + reverseFQDN(name), + core.SerialToString(cert.SerialNumber), + cert.NotBefore.Truncate(24 * time.Hour), + isRenewal, + }) + if err != nil { + return err + } + } + for _, ip := range cert.IPAddresses { + err = multiInserter.Add([]any{ + ip.String(), + core.SerialToString(cert.SerialNumber), + cert.NotBefore.Truncate(24 * time.Hour), + isRenewal, + }) + if err != nil { + return err + } + } + return multiInserter.Insert(ctx, queryer) +} + +// EncodeIssuedName translates a FQDN to/from the issuedNames table by reversing +// its dot-separated elements, and translates an IP address by returning its +// normal string form. +// +// This is for strings of ambiguous identifier values. If you know your string +// is a FQDN, use reverseFQDN(). If you have an IP address, use +// netip.Addr.String() or net.IP.String(). +func EncodeIssuedName(name string) string { + netIP, err := netip.ParseAddr(name) + if err == nil { + return netIP.String() + } + return reverseFQDN(name) +} + +// reverseFQDN reverses the elements of a dot-separated FQDN. +// +// If your string might be an IP address, use EncodeIssuedName() instead. +func reverseFQDN(fqdn string) string { + labels := strings.Split(fqdn, ".") + for i, j := 0, len(labels)-1; i < j; i, j = i+1, j-1 { + labels[i], labels[j] = labels[j], labels[i] + } + return strings.Join(labels, ".") +} + +func addKeyHash(ctx context.Context, db db.Inserter, cert *x509.Certificate) error { + if cert.RawSubjectPublicKeyInfo == nil { + return errors.New("certificate has a nil RawSubjectPublicKeyInfo") + } + h := sha256.Sum256(cert.RawSubjectPublicKeyInfo) + khm := &keyHashModel{ + KeyHash: h[:], + CertNotAfter: cert.NotAfter, + CertSerial: core.SerialToString(cert.SerialNumber), + } + return db.Insert(ctx, khm) +} + +var blockedKeysColumns = "keyHash, added, source, comment" + +// statusForOrder examines the status of a provided order's authorizations to +// determine what the overall status of the order should be. In summary: +// - If the order has an error, the order is invalid +// - If any of the order's authorizations are in any state other than +// valid or pending, the order is invalid. +// - If any of the order's authorizations are pending, the order is pending. +// - If all of the order's authorizations are valid, and there is +// a certificate serial, the order is valid. +// - If all of the order's authorizations are valid, and we have began +// processing, but there is no certificate serial, the order is processing. +// - If all of the order's authorizations are valid, and we haven't begun +// processing, then the order is status ready. +// +// An error is returned for any other case. +func statusForOrder(order *corepb.Order, authzValidityInfo []authzValidity, now time.Time) (string, error) { + // Without any further work we know an order with an error is invalid + if order.Error != nil { + return string(core.StatusInvalid), nil + } + + // If the order is expired the status is invalid and we don't need to get + // order authorizations. Its important to exit early in this case because an + // order that references an expired authorization will be itself have been + // expired (because we match the order expiry to the associated authz expiries + // in ra.NewOrder), and expired authorizations may be purged from the DB. + // Because of this purging fetching the authz's for an expired order may + // return fewer authz objects than expected, triggering a 500 error response. + if order.Expires.AsTime().Before(now) { + return string(core.StatusInvalid), nil + } + + // If getAuthorizationStatuses returned a different number of authorization + // objects than the order's slice of authorization IDs something has gone + // wrong worth raising an internal error about. + if len(authzValidityInfo) != len(order.V2Authorizations) { + return "", berrors.InternalServerError( + "getAuthorizationStatuses returned the wrong number of authorization statuses "+ + "(%d vs expected %d) for order %d", + len(authzValidityInfo), len(order.V2Authorizations), order.Id) + } + + // Keep a count of the authorizations seen + pendingAuthzs := 0 + validAuthzs := 0 + otherAuthzs := 0 + expiredAuthzs := 0 + + // Loop over each of the order's authorization objects to examine the authz status + for _, info := range authzValidityInfo { + switch uintToStatus[info.Status] { + case core.StatusPending: + pendingAuthzs++ + case core.StatusValid: + validAuthzs++ + case core.StatusInvalid: + otherAuthzs++ + case core.StatusDeactivated: + otherAuthzs++ + case core.StatusRevoked: + otherAuthzs++ + default: + return "", berrors.InternalServerError( + "Order is in an invalid state. Authz has invalid status %d", + info.Status) + } + if info.Expires.Before(now) { + expiredAuthzs++ + } + } + + // An order is invalid if **any** of its authzs are invalid, deactivated, + // revoked, or expired, see https://tools.ietf.org/html/rfc8555#section-7.1.6 + if otherAuthzs > 0 || expiredAuthzs > 0 { + return string(core.StatusInvalid), nil + } + // An order is pending if **any** of its authzs are pending + if pendingAuthzs > 0 { + return string(core.StatusPending), nil + } + + // An order is fully authorized if it has valid authzs for each of the order + // identifiers + fullyAuthorized := len(order.Identifiers) == validAuthzs + + // If the order isn't fully authorized we've encountered an internal error: + // Above we checked for any invalid or pending authzs and should have returned + // early. Somehow we made it this far but also don't have the correct number + // of valid authzs. + if !fullyAuthorized { + return "", berrors.InternalServerError( + "Order has the incorrect number of valid authorizations & no pending, " + + "deactivated or invalid authorizations") + } + + // If the order is fully authorized and the certificate serial is set then the + // order is valid + if fullyAuthorized && order.CertificateSerial != "" { + return string(core.StatusValid), nil + } + + // If the order is fully authorized, and we have began processing it, then the + // order is processing. + if fullyAuthorized && order.BeganProcessing { + return string(core.StatusProcessing), nil + } + + if fullyAuthorized && !order.BeganProcessing { + return string(core.StatusReady), nil + } + + return "", berrors.InternalServerError( + "Order %d is in an invalid state. No state known for this order's "+ + "authorizations", order.Id) +} + +// authzValidity is a subset of authzModel +type authzValidity struct { + IdentifierType uint8 `db:"identifierType"` + IdentifierValue string `db:"identifierValue"` + Status uint8 `db:"status"` + Expires time.Time `db:"expires"` +} + +// getAuthorizationStatuses takes a sequence of authz IDs, and returns the +// status and expiration date of each of them. +func getAuthorizationStatuses(ctx context.Context, s db.Selector, ids []int64) ([]authzValidity, error) { + var params []any + for _, id := range ids { + params = append(params, id) + } + var validities []authzValidity + _, err := s.Select( + ctx, + &validities, + fmt.Sprintf("SELECT identifierType, identifierValue, status, expires FROM authz2 WHERE id IN (%s)", + db.QuestionMarks(len(ids))), + params..., + ) + if err != nil { + return nil, err + } + + return validities, nil +} + +// crlShardModel represents one row in the crlShards table. The ThisUpdate and +// NextUpdate fields are pointers because they are NULL-able columns. +type crlShardModel struct { + ID int64 `db:"id"` + IssuerID int64 `db:"issuerID"` + Idx int `db:"idx"` + ThisUpdate *time.Time `db:"thisUpdate"` + NextUpdate *time.Time `db:"nextUpdate"` + LeasedUntil time.Time `db:"leasedUntil"` +} + +// revokedCertModel represents one row in the revokedCertificates table. It +// contains all of the information necessary to populate a CRL entry or OCSP +// response for the indicated certificate. +type revokedCertModel struct { + ID int64 `db:"id"` + IssuerID int64 `db:"issuerID"` + Serial string `db:"serial"` + NotAfterHour time.Time `db:"notAfterHour"` + ShardIdx int64 `db:"shardIdx"` + RevokedDate time.Time `db:"revokedDate"` + RevokedReason revocation.Reason `db:"revokedReason"` +} + +// replacementOrderModel represents one row in the replacementOrders table. It +// contains all of the information necessary to link a renewal order to the +// certificate it replaces. +type replacementOrderModel struct { + // ID is an auto-incrementing row ID. + ID int64 `db:"id"` + // Serial is the serial number of the replaced certificate. + Serial string `db:"serial"` + // OrderId is the ID of the replacement order + OrderID int64 `db:"orderID"` + // OrderExpiry is the expiry time of the new order. This is used to + // determine if we can accept a new replacement order for the same Serial. + OrderExpires time.Time `db:"orderExpires"` + // Replaced is a boolean indicating whether the certificate has been + // replaced, i.e. whether the new order has been finalized. Once this is + // true, no new replacement orders can be accepted for the same Serial. + Replaced bool `db:"replaced"` +} + +// addReplacementOrder inserts or updates the replacementOrders row matching the +// provided serial with the details provided. This function accepts a +// transaction so that the insert or update takes place within the new order +// transaction. +func addReplacementOrder(ctx context.Context, db db.SelectExecer, serial string, orderID int64, orderExpires time.Time) error { + var existingID []int64 + _, err := db.Select(ctx, &existingID, ` + SELECT id + FROM replacementOrders + WHERE serial = ? + LIMIT 1`, + serial, + ) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("checking for existing replacement order: %w", err) + } + + if len(existingID) > 0 { + // Update existing replacementOrder row. + _, err = db.ExecContext(ctx, ` + UPDATE replacementOrders + SET orderID = ?, orderExpires = ? + WHERE id = ?`, + orderID, orderExpires, + existingID[0], + ) + if err != nil { + return fmt.Errorf("updating replacement order: %w", err) + } + } else { + // Insert new replacementOrder row. + _, err = db.ExecContext(ctx, ` + INSERT INTO replacementOrders (serial, orderID, orderExpires) + VALUES (?, ?, ?)`, + serial, orderID, orderExpires, + ) + if err != nil { + return fmt.Errorf("creating replacement order: %w", err) + } + } + return nil +} + +// setReplacementOrderFinalized sets the replaced flag for the replacementOrder +// row matching the provided orderID to true. This function accepts a +// transaction so that the update can take place within the finalization +// transaction. +func setReplacementOrderFinalized(ctx context.Context, db db.Execer, orderID int64) error { + _, err := db.ExecContext(ctx, ` + UPDATE replacementOrders + SET replaced = true + WHERE orderID = ? + LIMIT 1`, + orderID, + ) + if err != nil { + return err + } + return nil +} + +type identifierModel struct { + Type uint8 `db:"identifierType"` + Value string `db:"identifierValue"` +} + +func newIdentifierModelFromPB(pb *corepb.Identifier) (identifierModel, error) { + idType, ok := identifierTypeToUint[pb.Type] + if !ok { + return identifierModel{}, fmt.Errorf("unsupported identifier type %q", pb.Type) + } + + return identifierModel{ + Type: idType, + Value: pb.Value, + }, nil +} + +func newPBFromIdentifierModel(id identifierModel) (*corepb.Identifier, error) { + idType, ok := uintToIdentifierType[id.Type] + if !ok { + return nil, fmt.Errorf("unsupported identifier type %d", id.Type) + } + + return &corepb.Identifier{ + Type: string(idType), + Value: id.Value, + }, nil +} + +func newIdentifierModelsFromPB(pbs []*corepb.Identifier) ([]identifierModel, error) { + ids := make([]identifierModel, 0, len(pbs)) + for _, pb := range pbs { + id, err := newIdentifierModelFromPB(pb) + if err != nil { + return nil, err + } + ids = append(ids, id) + } + return ids, nil +} + +func newPBFromIdentifierModels(ids []identifierModel) (*sapb.Identifiers, error) { + pbs := make([]*corepb.Identifier, 0, len(ids)) + for _, id := range ids { + pb, err := newPBFromIdentifierModel(id) + if err != nil { + return nil, err + } + pbs = append(pbs, pb) + } + return &sapb.Identifiers{Identifiers: pbs}, nil +} + +// buildIdentifierQueryConditions takes a slice of identifiers and returns a +// string (conditions to use within the prepared statement) and a slice of anys +// (arguments for the prepared statement), both to use within a WHERE clause for +// queries against the authz2 table. +// +// Although this function takes user-controlled input, it does not include any +// of that input directly in the returned SQL string. The resulting string +// contains only column names, boolean operators, and questionmark placeholders. +func buildIdentifierQueryConditions(idents identifier.ACMEIdentifiers) (string, []any) { + if len(idents) == 0 { + // No identifier values to check. + return "FALSE", []any{} + } + + identsByType := map[identifier.IdentifierType][]string{} + for _, id := range idents { + identsByType[id.Type] = append(identsByType[id.Type], id.Value) + } + + var conditions []string + var args []any + for idType, idValues := range identsByType { + conditions = append(conditions, + fmt.Sprintf("identifierType = ? AND identifierValue IN (%s)", + db.QuestionMarks(len(idValues)), + ), + ) + args = append(args, identifierTypeToUint[string(idType)]) + for _, idValue := range idValues { + args = append(args, idValue) + } + } + + return strings.Join(conditions, " OR "), args +} + +// pausedModel represents a row in the paused table. It contains the +// registrationID of the paused account, the time the (account, identifier) pair +// was paused, and the time the pair was unpaused. The UnpausedAt field is +// nullable because the pair may not have been unpaused yet. A pair is +// considered paused if there is a matching row in the paused table with a NULL +// UnpausedAt time. +type pausedModel struct { + identifierModel + RegistrationID int64 `db:"registrationID"` + PausedAt time.Time `db:"pausedAt"` + UnpausedAt *time.Time `db:"unpausedAt"` +} + +type overrideModel struct { + LimitEnum int64 `db:"limitEnum"` + BucketKey string `db:"bucketKey"` + Comment string `db:"comment"` + PeriodNS int64 `db:"periodNS"` + Count int64 `db:"count"` + Burst int64 `db:"burst"` + UpdatedAt time.Time `db:"updatedAt"` + Enabled bool `db:"enabled"` +} + +func overrideModelForPB(pb *sapb.RateLimitOverride, updatedAt time.Time, enabled bool) overrideModel { + return overrideModel{ + LimitEnum: pb.LimitEnum, + BucketKey: pb.BucketKey, + Comment: pb.Comment, + PeriodNS: pb.Period.AsDuration().Nanoseconds(), + Count: pb.Count, + Burst: pb.Burst, + UpdatedAt: updatedAt, + Enabled: enabled, + } +} + +func newPBFromOverrideModel(m *overrideModel) *sapb.RateLimitOverride { + return &sapb.RateLimitOverride{ + LimitEnum: m.LimitEnum, + BucketKey: m.BucketKey, + Comment: m.Comment, + Period: durationpb.New(time.Duration(m.PeriodNS)), + Count: m.Count, + Burst: m.Burst, + } +} diff --git a/sa/model_test.go b/sa/model_test.go index 7395825cc0f..0670b5703c0 100644 --- a/sa/model_test.go +++ b/sa/model_test.go @@ -1,20 +1,30 @@ package sa import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" - "crypto/rsa" "crypto/x509" "crypto/x509/pkix" - "encoding/base64" + "database/sql" + "fmt" "math/big" - "net" + "net/netip" + "slices" "testing" "time" "github.com/jmhodges/clock" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/letsencrypt/boulder/db" "github.com/letsencrypt/boulder/grpc" + "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/probs" + sapb "github.com/letsencrypt/boulder/sa/proto" + "github.com/letsencrypt/boulder/test/vars" "github.com/letsencrypt/boulder/core" corepb "github.com/letsencrypt/boulder/core/proto" @@ -28,19 +38,11 @@ func TestRegistrationModelToPb(t *testing.T) { }{ { name: "No ID", - input: regModel{ID: 0, Key: []byte("foo"), InitialIP: []byte("foo")}, + input: regModel{ID: 0, Key: []byte("foo")}, }, { name: "No Key", - input: regModel{ID: 1, Key: nil, InitialIP: []byte("foo")}, - }, - { - name: "No IP", - input: regModel{ID: 1, Key: []byte("foo"), InitialIP: nil}, - }, - { - name: "Bad IP", - input: regModel{ID: 1, Key: []byte("foo"), InitialIP: []byte("foo")}, + input: regModel{ID: 1, Key: nil}, }, } for _, tc := range badCases { @@ -50,133 +52,163 @@ func TestRegistrationModelToPb(t *testing.T) { }) } - _, err := registrationModelToPb(®Model{ - ID: 1, Key: []byte("foo"), InitialIP: net.ParseIP("1.2.3.4"), - }) + _, err := registrationModelToPb(®Model{ID: 1, Key: []byte("foo")}) test.AssertNotError(t, err, "Should pass") } -func TestRegistrationPbToModel(t *testing.T) {} - func TestAuthzModel(t *testing.T) { - authzPB := &corepb.Authorization{ - Id: "1", - Identifier: "example.com", - RegistrationID: 1, - Status: string(core.StatusValid), - Expires: 1234, - Challenges: []*corepb.Challenge{ - { - Type: string(core.ChallengeTypeHTTP01), - Status: string(core.StatusValid), - Token: "MTIz", - Validated: 1234, - Validationrecords: []*corepb.ValidationRecord{ - { - Hostname: "hostname", - Port: "port", - AddressUsed: []byte("1.2.3.4"), - Url: "url", - AddressesResolved: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}}, - AddressesTried: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}}, + // newTestAuthzPB returns a new *corepb.Authorization for `example.com` that + // is valid, and contains a single valid HTTP-01 challenge. These are the + // most common authorization attributes used in tests. Some tests will + // customize them after calling this. + newTestAuthzPB := func(validated time.Time) *corepb.Authorization { + return &corepb.Authorization{ + Id: "1", + Identifier: identifier.NewDNS("example.com").ToProto(), + RegistrationID: 1, + Status: string(core.StatusValid), + Expires: timestamppb.New(validated.Add(24 * time.Hour)), + Challenges: []*corepb.Challenge{ + { + Type: string(core.ChallengeTypeHTTP01), + Status: string(core.StatusValid), + Token: "MTIz", + Validated: timestamppb.New(validated), + Validationrecords: []*corepb.ValidationRecord{ + { + AddressUsed: []byte("1.2.3.4"), + Url: "https://example.com", + Hostname: "example.com", + Port: "443", + AddressesResolved: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}}, + AddressesTried: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}}, + }, }, }, }, - }, + } } + clk := clock.New() + + authzPB := newTestAuthzPB(clk.Now()) + authzPB.CertificateProfileName = "test" + model, err := authzPBToModel(authzPB) test.AssertNotError(t, err, "authzPBToModel failed") authzPBOut, err := modelToAuthzPB(*model) test.AssertNotError(t, err, "modelToAuthzPB failed") + if authzPB.Challenges[0].Validationrecords[0].Hostname != "" { + test.Assert(t, false, fmt.Sprintf("dehydrated http-01 validation record expected hostname field to be missing, but found %v", authzPB.Challenges[0].Validationrecords[0].Hostname)) + } + if authzPB.Challenges[0].Validationrecords[0].Port != "" { + test.Assert(t, false, fmt.Sprintf("rehydrated http-01 validation record expected port field to be missing, but found %v", authzPB.Challenges[0].Validationrecords[0].Port)) + } + // Shoving the Hostname and Port back into the validation record should + // succeed because authzPB validation record should match the retrieved + // model from the database with the rehydrated Hostname and Port. + authzPB.Challenges[0].Validationrecords[0].Hostname = "example.com" + authzPB.Challenges[0].Validationrecords[0].Port = "443" test.AssertDeepEquals(t, authzPB.Challenges, authzPBOut.Challenges) + test.AssertEquals(t, authzPBOut.CertificateProfileName, authzPB.CertificateProfileName) + + authzPB = newTestAuthzPB(clk.Now()) + + validationErr := probs.Connection("weewoo") - validationErr := probs.ConnectionFailure("weewoo") authzPB.Challenges[0].Status = string(core.StatusInvalid) - authzPB.Challenges[0].Error, err = grpc.ProblemDetailsToPB(validationErr) - test.AssertNotError(t, err, "grpc.ProblemDetailsToPB failed") + authzPB.Challenges[0].Error = grpc.ProblemDetailsToPB(validationErr) model, err = authzPBToModel(authzPB) test.AssertNotError(t, err, "authzPBToModel failed") authzPBOut, err = modelToAuthzPB(*model) test.AssertNotError(t, err, "modelToAuthzPB failed") + if authzPB.Challenges[0].Validationrecords[0].Hostname != "" { + test.Assert(t, false, fmt.Sprintf("dehydrated http-01 validation record expected hostname field to be missing, but found %v", authzPB.Challenges[0].Validationrecords[0].Hostname)) + } + if authzPB.Challenges[0].Validationrecords[0].Port != "" { + test.Assert(t, false, fmt.Sprintf("rehydrated http-01 validation record expected port field to be missing, but found %v", authzPB.Challenges[0].Validationrecords[0].Port)) + } + // Shoving the Hostname and Port back into the validation record should + // succeed because authzPB validation record should match the retrieved + // model from the database with the rehydrated Hostname and Port. + authzPB.Challenges[0].Validationrecords[0].Hostname = "example.com" + authzPB.Challenges[0].Validationrecords[0].Port = "443" test.AssertDeepEquals(t, authzPB.Challenges, authzPBOut.Challenges) - authzPB = &corepb.Authorization{ - Id: "1", - Identifier: "example.com", - RegistrationID: 1, - Status: string(core.StatusInvalid), - Expires: 1234, - Challenges: []*corepb.Challenge{ - { - Type: string(core.ChallengeTypeHTTP01), - Status: string(core.StatusInvalid), - Token: "MTIz", - Validationrecords: []*corepb.ValidationRecord{ - { - Hostname: "hostname", - Port: "port", - AddressUsed: []byte("1.2.3.4"), - Url: "url", - AddressesResolved: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}}, - AddressesTried: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}}, - }, + authzPB = newTestAuthzPB(clk.Now()) + authzPB.Status = string(core.StatusInvalid) + authzPB.Challenges = []*corepb.Challenge{ + { + Type: string(core.ChallengeTypeHTTP01), + Status: string(core.StatusInvalid), + Token: "MTIz", + Validationrecords: []*corepb.ValidationRecord{ + { + AddressUsed: []byte("1.2.3.4"), + Url: "url", + AddressesResolved: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}}, + AddressesTried: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}}, }, }, - { - Type: string(core.ChallengeTypeDNS01), - Status: string(core.StatusInvalid), - Token: "MTIz", - Validationrecords: []*corepb.ValidationRecord{ - { - Hostname: "hostname", - Port: "port", - AddressUsed: []byte("1.2.3.4"), - Url: "url", - AddressesResolved: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}}, - AddressesTried: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}}, - }, + }, + { + Type: string(core.ChallengeTypeDNS01), + Status: string(core.StatusInvalid), + Token: "MTIz", + Validationrecords: []*corepb.ValidationRecord{ + { + AddressUsed: []byte("1.2.3.4"), + Url: "url", + AddressesResolved: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}}, + AddressesTried: [][]byte{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}}, }, }, }, } _, err = authzPBToModel(authzPB) test.AssertError(t, err, "authzPBToModel didn't fail with multiple non-pending challenges") -} -// TestModelToChallengeBadJSON tests that converting a challenge model with an -// invalid validation error field or validation record field produces the -// expected bad JSON error. -func TestModelToChallengeBadJSON(t *testing.T) { - badJSON := []byte(`{`) + // Test that the caller Hostname and Port rehydration returns the expected + // data in the expected fields. + authzPB = newTestAuthzPB(clk.Now()) - testCases := []struct { - Name string - Model *challModel - }{ - { - Name: "Bad error field", - Model: &challModel{ - Error: badJSON, - }, - }, - { - Name: "Bad validation record field", - Model: &challModel{ - ValidationRecord: badJSON, - }, - }, + model, err = authzPBToModel(authzPB) + test.AssertNotError(t, err, "authzPBToModel failed") + + authzPBOut, err = modelToAuthzPB(*model) + test.AssertNotError(t, err, "modelToAuthzPB failed") + if authzPBOut.Challenges[0].Validationrecords[0].Hostname != "example.com" { + test.Assert(t, false, fmt.Sprintf("rehydrated http-01 validation record expected hostname example.com but found %v", authzPBOut.Challenges[0].Validationrecords[0].Hostname)) } - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - _, err := modelToChallenge(tc.Model) - test.AssertError(t, err, "expected error from modelToChallenge") - var badJSONErr errBadJSON - test.AssertErrorWraps(t, err, &badJSONErr) - test.AssertEquals(t, string(badJSONErr.json), string(badJSON)) - }) + if authzPBOut.Challenges[0].Validationrecords[0].Port != "443" { + test.Assert(t, false, fmt.Sprintf("rehydrated http-01 validation record expected port 443 but found %v", authzPBOut.Challenges[0].Validationrecords[0].Port)) + } + + authzPB = newTestAuthzPB(clk.Now()) + authzPB.Identifier = identifier.NewIP(netip.MustParseAddr("1.2.3.4")).ToProto() + authzPB.Challenges[0].Validationrecords[0].Url = "https://1.2.3.4" + authzPB.Challenges[0].Validationrecords[0].Hostname = "1.2.3.4" + + model, err = authzPBToModel(authzPB) + test.AssertNotError(t, err, "authzPBToModel failed") + authzPBOut, err = modelToAuthzPB(*model) + test.AssertNotError(t, err, "modelToAuthzPB failed") + + identOut := identifier.FromProto(authzPBOut.Identifier) + if identOut.Type != identifier.TypeIP { + test.Assert(t, false, fmt.Sprintf("expected identifier type ip but found %s", identOut.Type)) + } + if identOut.Value != "1.2.3.4" { + test.Assert(t, false, fmt.Sprintf("expected identifier value 1.2.3.4 but found %s", identOut.Value)) + } + + if authzPBOut.Challenges[0].Validationrecords[0].Hostname != "1.2.3.4" { + test.Assert(t, false, fmt.Sprintf("rehydrated http-01 validation record expected hostname 1.2.3.4 but found %v", authzPBOut.Challenges[0].Validationrecords[0].Hostname)) + } + if authzPBOut.Challenges[0].Validationrecords[0].Port != "443" { + test.Assert(t, false, fmt.Sprintf("rehydrated http-01 validation record expected port 443 but found %v", authzPBOut.Challenges[0].Validationrecords[0].Port)) } } @@ -187,12 +219,48 @@ func TestModelToOrderBadJSON(t *testing.T) { _, err := modelToOrder(&orderModel{ Error: badJSON, }) - test.AssertError(t, err, "expected error from modelToOrder") + test.AssertError(t, err, "expected error from modelToOrderv2") var badJSONErr errBadJSON test.AssertErrorWraps(t, err, &badJSONErr) test.AssertEquals(t, string(badJSONErr.json), string(badJSON)) } +// TestModelToOrderAuthzs tests that the Authzs field is properly decoded and +// assigned to V2Authorizations. +func TestModelToOrderAuthzs(t *testing.T) { + expectedAuthzIDs := []int64{1, 2, 3, 42} + encodedAuthzs, err := proto.Marshal(&sapb.Authzs{AuthzIDs: expectedAuthzIDs}) + test.AssertNotError(t, err, "failed to marshal authzs") + + testCases := []struct { + name string + model *orderModel + expectedAuthzIDs []int64 + }{ + { + name: "with authzs", + model: &orderModel{Authzs: encodedAuthzs}, + expectedAuthzIDs: expectedAuthzIDs, + }, + { + name: "without authzs", + model: &orderModel{}, + expectedAuthzIDs: nil, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + order, err := modelToOrder(tc.model) + if err != nil { + t.Fatalf("modelToOrder(%v) = %s, want success", tc.model, err) + } + if !slices.Equal(order.V2Authorizations, tc.expectedAuthzIDs) { + t.Errorf("modelToOrder(%v) = %v, want %v", tc.model, order.V2Authorizations, tc.expectedAuthzIDs) + } + }) + } +} + // TestPopulateAttemptedFieldsBadJSON tests that populating a challenge from an // authz2 model with an invalid validation error or an invalid validation record // produces the expected bad JSON error. @@ -227,80 +295,35 @@ func TestPopulateAttemptedFieldsBadJSON(t *testing.T) { } } -func TestScanCertStatusMetadataRow(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() - - inputStatus := core.CertificateStatus{ - Serial: "ff00ff00", - Status: "good", - } - err := sa.dbMap.Insert(&inputStatus) - test.AssertNotError(t, err, "couldn't insert certificateStatus") - - rows, err := sa.dbMap.Query("SELECT serial, status, ocspLastUpdated FROM certificateStatus") - test.AssertNotError(t, err, "selecting") - - if !rows.Next() { - t.Fatal("got no rows") - } - var certStatus CertStatusMetadata - err = ScanCertStatusMetadataRow(rows, &certStatus) - - if err == nil { - t.Fatal("expected error, got none") - } - expected := "incorrect number of columns in scanned rows: got 3, expected 10" - if err.Error() != expected { - t.Errorf("wrong error: got %q, expected %q", err, expected) - } - - rows, err = sa.dbMap.Query("SELECT id, status, serial, ocspLastUpdated, revokedDate, revokedReason, lastExpirationNagSent, notAfter, isExpired, issuerID FROM certificateStatus") - test.AssertNotError(t, err, "selecting") - - if !rows.Next() { - t.Fatal("got no rows") - } - - err = ScanCertStatusMetadataRow(rows, &certStatus) - - if err == nil { - t.Fatal("expected error, got none") - } - expected = "incorrect column 1 in scanned rows: got \"status\", expected \"serial\"" - if err.Error() != expected { - t.Errorf("wrong error: got %q, expected %q", err, expected) - } -} - func TestCertificatesTableContainsDuplicateSerials(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + ctx := context.Background() + + sa, fc := initSA(t) serialString := core.SerialToString(big.NewInt(1337)) // Insert a certificate with a serial of `1337`. - err := insertCertificate(sa.dbMap, fc, "1337.com", "leet", 1337, 1) + err := insertCertificate(ctx, sa.dbMap, fc, "1337.com", "leet", 1337, 1) test.AssertNotError(t, err, "couldn't insert valid certificate") // This should return the certificate that we just inserted. - certA, err := SelectCertificate(sa.dbMap, serialString) + certA, err := SelectCertificate(ctx, sa.dbMap, serialString) test.AssertNotError(t, err, "received an error for a valid query") // Insert a certificate with a serial of `1337` but for a different // hostname. - err = insertCertificate(sa.dbMap, fc, "1337.net", "leet", 1337, 1) + err = insertCertificate(ctx, sa.dbMap, fc, "1337.net", "leet", 1337, 1) test.AssertNotError(t, err, "couldn't insert valid certificate") // Despite a duplicate being present, this shouldn't error. - certB, err := SelectCertificate(sa.dbMap, serialString) + certB, err := SelectCertificate(ctx, sa.dbMap, serialString) test.AssertNotError(t, err, "received an error for a valid query") // Ensure that `certA` and `certB` are the same. - test.AssertByteEquals(t, certA.DER, certB.DER) + test.AssertByteEquals(t, certA.Der, certB.Der) } -func insertCertificate(dbMap *db.WrappedMap, fc clock.FakeClock, hostname, cn string, serial, regID int64) error { +func insertCertificate(ctx context.Context, dbMap *db.WrappedMap, fc clock.FakeClock, hostname, cn string, serial, regID int64) error { serialBigInt := big.NewInt(serial) serialString := core.SerialToString(serialBigInt) @@ -313,33 +336,161 @@ func insertCertificate(dbMap *db.WrappedMap, fc clock.FakeClock, hostname, cn st SerialNumber: serialBigInt, } - testKey := makeKey() - certDer, _ := x509.CreateCertificate(rand.Reader, &template, &template, &testKey.PublicKey, &testKey) + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return fmt.Errorf("generating test key: %w", err) + } + certDer, err := x509.CreateCertificate(rand.Reader, &template, &template, key.Public(), key) + if err != nil { + return fmt.Errorf("generating test cert: %w", err) + } cert := &core.Certificate{ RegistrationID: regID, + Issued: fc.Now(), Serial: serialString, Expires: template.NotAfter, DER: certDer, } - err := dbMap.Insert(cert) + err = dbMap.Insert(ctx, cert) if err != nil { return err } return nil } -func bigIntFromB64(b64 string) *big.Int { - bytes, _ := base64.URLEncoding.DecodeString(b64) - x := big.NewInt(0) - x.SetBytes(bytes) - return x +func TestIncidentSerialModel(t *testing.T) { + ctx := context.Background() + + testIncidentsDbMap, err := DBMapForTest(vars.DBConnIncidentsFullPerms) + test.AssertNotError(t, err, "Couldn't create test dbMap") + defer test.ResetIncidentsTestDatabase(t) + + // Inserting and retrieving a row with only the serial populated should work. + _, err = testIncidentsDbMap.ExecContext(ctx, + "INSERT INTO incident_foo (serial) VALUES (?)", + "1337", + ) + test.AssertNotError(t, err, "inserting row with only serial") + + var res1 incidentSerialModel + err = testIncidentsDbMap.SelectOne( + ctx, + &res1, + "SELECT * FROM incident_foo WHERE serial = ?", + "1337", + ) + test.AssertNotError(t, err, "selecting row with only serial") + + test.AssertEquals(t, res1.Serial, "1337") + test.AssertBoxedNil(t, res1.RegistrationID, "registrationID should be NULL") + test.AssertBoxedNil(t, res1.OrderID, "orderID should be NULL") + test.AssertBoxedNil(t, res1.LastNoticeSent, "lastNoticeSent should be NULL") + + // Inserting and retrieving a row with all columns populated should work. + _, err = testIncidentsDbMap.ExecContext(ctx, + "INSERT INTO incident_foo (serial, registrationID, orderID, lastNoticeSent) VALUES (?, ?, ?, ?)", + "1338", + 1, + 2, + time.Date(2023, 06, 29, 16, 9, 00, 00, time.UTC), + ) + test.AssertNotError(t, err, "inserting row with only serial") + + var res2 incidentSerialModel + err = testIncidentsDbMap.SelectOne( + ctx, + &res2, + "SELECT * FROM incident_foo WHERE serial = ?", + "1338", + ) + test.AssertNotError(t, err, "selecting row with only serial") + + test.AssertEquals(t, res2.Serial, "1338") + test.AssertEquals(t, *res2.RegistrationID, int64(1)) + test.AssertEquals(t, *res2.OrderID, int64(2)) + test.AssertEquals(t, *res2.LastNoticeSent, time.Date(2023, 06, 29, 16, 9, 00, 00, time.UTC)) +} + +func TestAddReplacementOrder(t *testing.T) { + sa, _ := initSA(t) + + oldCertSerial := "1234567890" + orderId := int64(1337) + orderExpires := time.Now().Add(24 * time.Hour).UTC().Truncate(time.Second) + + // Add a replacement order which doesn't exist. + err := addReplacementOrder(ctx, sa.dbMap, oldCertSerial, orderId, orderExpires) + test.AssertNotError(t, err, "addReplacementOrder failed") + + // Fetch the replacement order so we can ensure it was added. + var replacementRow replacementOrderModel + err = sa.dbReadOnlyMap.SelectOne( + ctx, + &replacementRow, + "SELECT * FROM replacementOrders WHERE serial = ? LIMIT 1", + oldCertSerial, + ) + test.AssertNotError(t, err, "SELECT from replacementOrders failed") + test.AssertEquals(t, oldCertSerial, replacementRow.Serial) + test.AssertEquals(t, orderId, replacementRow.OrderID) + test.AssertEquals(t, orderExpires, replacementRow.OrderExpires) + + nextOrderId := int64(1338) + nextOrderExpires := time.Now().Add(48 * time.Hour).UTC().Truncate(time.Second) + + // Add a replacement order which already exists. + err = addReplacementOrder(ctx, sa.dbMap, oldCertSerial, nextOrderId, nextOrderExpires) + test.AssertNotError(t, err, "addReplacementOrder failed") + + // Fetch the replacement order so we can ensure it was updated. + err = sa.dbReadOnlyMap.SelectOne( + ctx, + &replacementRow, + "SELECT * FROM replacementOrders WHERE serial = ? LIMIT 1", + oldCertSerial, + ) + test.AssertNotError(t, err, "SELECT from replacementOrders failed") + test.AssertEquals(t, oldCertSerial, replacementRow.Serial) + test.AssertEquals(t, nextOrderId, replacementRow.OrderID) + test.AssertEquals(t, nextOrderExpires, replacementRow.OrderExpires) } -func makeKey() rsa.PrivateKey { - n := bigIntFromB64("n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw==") - e := int(bigIntFromB64("AQAB").Int64()) - d := bigIntFromB64("bWUC9B-EFRIo8kpGfh0ZuyGPvMNKvYWNtB_ikiH9k20eT-O1q_I78eiZkpXxXQ0UTEs2LsNRS-8uJbvQ-A1irkwMSMkK1J3XTGgdrhCku9gRldY7sNA_AKZGh-Q661_42rINLRCe8W-nZ34ui_qOfkLnK9QWDDqpaIsA-bMwWWSDFu2MUBYwkHTMEzLYGqOe04noqeq1hExBTHBOBdkMXiuFhUq1BU6l-DqEiWxqg82sXt2h-LMnT3046AOYJoRioz75tSUQfGCshWTBnP5uDjd18kKhyv07lhfSJdrPdM5Plyl21hsFf4L_mHCuoFau7gdsPfHPxxjVOcOpBrQzwQ==") - p := bigIntFromB64("uKE2dh-cTf6ERF4k4e_jy78GfPYUIaUyoSSJuBzp3Cubk3OCqs6grT8bR_cu0Dm1MZwWmtdqDyI95HrUeq3MP15vMMON8lHTeZu2lmKvwqW7anV5UzhM1iZ7z4yMkuUwFWoBvyY898EXvRD-hdqRxHlSqAZ192zB3pVFJ0s7pFc=") - q := bigIntFromB64("uKE2dh-cTf6ERF4k4e_jy78GfPYUIaUyoSSJuBzp3Cubk3OCqs6grT8bR_cu0Dm1MZwWmtdqDyI95HrUeq3MP15vMMON8lHTeZu2lmKvwqW7anV5UzhM1iZ7z4yMkuUwFWoBvyY898EXvRD-hdqRxHlSqAZ192zB3pVFJ0s7pFc=") - return rsa.PrivateKey{PublicKey: rsa.PublicKey{N: n, E: e}, D: d, Primes: []*big.Int{p, q}} +func TestSetReplacementOrderFinalized(t *testing.T) { + sa, _ := initSA(t) + + oldCertSerial := "1234567890" + orderId := int64(1337) + orderExpires := time.Now().Add(24 * time.Hour).UTC().Truncate(time.Second) + + // Mark a non-existent certificate as finalized/replaced. + err := setReplacementOrderFinalized(ctx, sa.dbMap, orderId) + test.AssertNotError(t, err, "setReplacementOrderFinalized failed") + + // Ensure no replacement order was added for some reason. + var replacementRow replacementOrderModel + err = sa.dbReadOnlyMap.SelectOne( + ctx, + &replacementRow, + "SELECT * FROM replacementOrders WHERE serial = ? LIMIT 1", + oldCertSerial, + ) + test.AssertErrorIs(t, err, sql.ErrNoRows) + + // Add a replacement order. + err = addReplacementOrder(ctx, sa.dbMap, oldCertSerial, orderId, orderExpires) + test.AssertNotError(t, err, "addReplacementOrder failed") + + // Mark the certificate as finalized/replaced. + err = setReplacementOrderFinalized(ctx, sa.dbMap, orderId) + test.AssertNotError(t, err, "setReplacementOrderFinalized failed") + + // Fetch the replacement order so we can ensure it was finalized. + err = sa.dbReadOnlyMap.SelectOne( + ctx, + &replacementRow, + "SELECT * FROM replacementOrders WHERE serial = ? LIMIT 1", + oldCertSerial, + ) + test.AssertNotError(t, err, "SELECT from replacementOrders failed") + test.Assert(t, replacementRow.Replaced, "replacement order should be marked as finalized") } diff --git a/sa/precertificates.go b/sa/precertificates.go deleted file mode 100644 index 20d0c9ec6a6..00000000000 --- a/sa/precertificates.go +++ /dev/null @@ -1,195 +0,0 @@ -package sa - -import ( - "context" - "crypto/x509" - "fmt" - "time" - - "google.golang.org/protobuf/types/known/emptypb" - - "github.com/letsencrypt/boulder/core" - corepb "github.com/letsencrypt/boulder/core/proto" - "github.com/letsencrypt/boulder/db" - berrors "github.com/letsencrypt/boulder/errors" - bgrpc "github.com/letsencrypt/boulder/grpc" - sapb "github.com/letsencrypt/boulder/sa/proto" -) - -// AddSerial writes a record of a serial number generation to the DB. -func (ssa *SQLStorageAuthority) AddSerial(ctx context.Context, req *sapb.AddSerialRequest) (*emptypb.Empty, error) { - if req.Serial == "" || req.RegID == 0 || req.Created == 0 || req.Expires == 0 { - return nil, errIncompleteRequest - } - err := ssa.dbMap.WithContext(ctx).Insert(&recordedSerialModel{ - Serial: req.Serial, - RegistrationID: req.RegID, - Created: time.Unix(0, req.Created), - Expires: time.Unix(0, req.Expires), - }) - if err != nil { - return nil, err - } - return &emptypb.Empty{}, nil -} - -// GetSerialMetadata returns metadata stored alongside the serial number, -// such as the RegID whose certificate request created that serial, and when -// the certificate with that serial will expire. -func (ssa *SQLStorageAuthority) GetSerialMetadata(ctx context.Context, req *sapb.Serial) (*sapb.SerialMetadata, error) { - if req == nil || req.Serial == "" { - return nil, errIncompleteRequest - } - - if !core.ValidSerial(req.Serial) { - return nil, fmt.Errorf("invalid serial %q", req.Serial) - } - - recordedSerial := recordedSerialModel{} - err := ssa.dbReadOnlyMap.WithContext(ctx).SelectOne( - &recordedSerial, - "SELECT * FROM serials WHERE serial = ?", - req.Serial, - ) - if err != nil { - if db.IsNoRows(err) { - return nil, berrors.NotFoundError("serial %q not found", req.Serial) - } - return nil, err - } - - return &sapb.SerialMetadata{ - Serial: recordedSerial.Serial, - RegistrationID: recordedSerial.RegistrationID, - Created: recordedSerial.Created.UnixNano(), - Expires: recordedSerial.Expires.UnixNano(), - }, nil -} - -// AddPrecertificate writes a record of a precertificate generation to the DB. -// Note: this is not idempotent: it does not protect against inserting the same -// certificate multiple times. Calling code needs to first insert the cert's -// serial into the Serials table to ensure uniqueness. -func (ssa *SQLStorageAuthority) AddPrecertificate(ctx context.Context, req *sapb.AddCertificateRequest) (*emptypb.Empty, error) { - if len(req.Der) == 0 || req.RegID == 0 || req.Issued == 0 || req.IssuerID == 0 { - return nil, errIncompleteRequest - } - parsed, err := x509.ParseCertificate(req.Der) - if err != nil { - return nil, err - } - serialHex := core.SerialToString(parsed.SerialNumber) - - preCertModel := &precertificateModel{ - Serial: serialHex, - RegistrationID: req.RegID, - DER: req.Der, - Issued: time.Unix(0, req.Issued), - Expires: parsed.NotAfter, - } - - _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Executor) (interface{}, error) { - // Select to see if precert exists - var row struct { - Count int64 - } - err := txWithCtx.SelectOne(&row, "SELECT count(1) as count FROM precertificates WHERE serial=?", serialHex) - if err != nil { - return nil, err - } - if row.Count > 0 { - return nil, berrors.DuplicateError("cannot add a duplicate cert") - } - - err = txWithCtx.Insert(preCertModel) - if err != nil { - return nil, err - } - - err = ssa.dbMap.WithContext(ctx).Insert( - &core.CertificateStatus{ - Serial: serialHex, - Status: core.OCSPStatusGood, - OCSPLastUpdated: ssa.clk.Now(), - RevokedDate: time.Time{}, - RevokedReason: 0, - LastExpirationNagSent: time.Time{}, - OCSPResponse: req.Ocsp, - NotAfter: parsed.NotAfter, - IsExpired: false, - IssuerID: req.IssuerID, - }, - ) - if err != nil { - return nil, err - } - - // NOTE(@cpu): When we collect up names to check if an FQDN set exists (e.g. - // that it is a renewal) we use just the DNSNames from the certificate and - // ignore the Subject Common Name (if any). This is a safe assumption because - // if a certificate we issued were to have a Subj. CN not present as a SAN it - // would be a misissuance and miscalculating whether the cert is a renewal or - // not for the purpose of rate limiting is the least of our troubles. - isRenewal, err := ssa.checkFQDNSetExists( - txWithCtx.SelectOne, - parsed.DNSNames) - if err != nil { - return nil, err - } - - err = addIssuedNames(txWithCtx, parsed, isRenewal) - if err != nil { - return nil, err - } - - err = addKeyHash(txWithCtx, parsed) - if err != nil { - return nil, err - } - - return nil, nil - }) - if overallError != nil { - return nil, overallError - } - - // Store the OCSP response in Redis (if configured) on a best effort - // basis. We don't want to fail on an error here while mysql is the - // source of truth. - if ssa.rocspWriteClient != nil { - // Use a new context for the goroutine. We aren't going to wait on - // the goroutine to complete, so we don't want it to be canceled - // when the parent function ends. The rocsp client has a - // configurable timeout that can be set during creation. - rocspCtx := context.Background() - - // Send the response off to redis in a goroutine. - go func() { - err = ssa.storeOCSPRedis(rocspCtx, req.Ocsp, req.IssuerID) - ssa.log.Debugf("failed to store OCSP response in redis: %v", err) - }() - } - return &emptypb.Empty{}, nil -} - -// GetPrecertificate takes a serial number and returns the corresponding -// precertificate, or error if it does not exist. -func (ssa *SQLStorageAuthority) GetPrecertificate(ctx context.Context, req *sapb.Serial) (*corepb.Certificate, error) { - if req == nil || req.Serial == "" { - return nil, errIncompleteRequest - } - if !core.ValidSerial(req.Serial) { - return nil, fmt.Errorf("Invalid precertificate serial %q", req.Serial) - } - cert, err := SelectPrecertificate(ssa.dbMap.WithContext(ctx), req.Serial) - if err != nil { - if db.IsNoRows(err) { - return nil, berrors.NotFoundError( - "precertificate with serial %q not found", - req.Serial) - } - return nil, err - } - - return bgrpc.CertToPB(cert), nil -} diff --git a/sa/precertificates_test.go b/sa/precertificates_test.go deleted file mode 100644 index f1b06582b8e..00000000000 --- a/sa/precertificates_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package sa - -import ( - "bytes" - "context" - "crypto/sha256" - "fmt" - "testing" - "time" - - "github.com/letsencrypt/boulder/core" - "github.com/letsencrypt/boulder/db" - berrors "github.com/letsencrypt/boulder/errors" - sapb "github.com/letsencrypt/boulder/sa/proto" - "github.com/letsencrypt/boulder/test" -) - -// findIssuedName is a small helper test function to directly query the -// issuedNames table for a given name to find a serial (or return an err). -func findIssuedName(dbMap db.OneSelector, name string) (string, error) { - var issuedNamesSerial string - err := dbMap.SelectOne( - &issuedNamesSerial, - `SELECT serial FROM issuedNames - WHERE reversedName = ? - ORDER BY notBefore DESC - LIMIT 1`, - ReverseName(name)) - return issuedNamesSerial, err -} - -func TestAddSerial(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() - - reg := createWorkingRegistration(t, sa) - serial, testCert := test.ThrowAwayCert(t, 1) - - _, err := sa.AddSerial(context.Background(), &sapb.AddSerialRequest{ - RegID: reg.Id, - Created: testCert.NotBefore.UnixNano(), - Expires: testCert.NotAfter.UnixNano(), - }) - test.AssertError(t, err, "adding without serial should fail") - - _, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{ - Serial: serial, - Created: testCert.NotBefore.UnixNano(), - Expires: testCert.NotAfter.UnixNano(), - }) - test.AssertError(t, err, "adding without regid should fail") - - _, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{ - Serial: serial, - RegID: reg.Id, - Expires: testCert.NotAfter.UnixNano(), - }) - test.AssertError(t, err, "adding without created should fail") - - _, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{ - Serial: serial, - RegID: reg.Id, - Created: testCert.NotBefore.UnixNano(), - }) - test.AssertError(t, err, "adding without expires should fail") - - _, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{ - Serial: serial, - RegID: reg.Id, - Created: testCert.NotBefore.UnixNano(), - Expires: testCert.NotAfter.UnixNano(), - }) - test.AssertNotError(t, err, "adding serial should have succeeded") -} - -func TestGetSerialMetadata(t *testing.T) { - sa, clk, cleanUp := initSA(t) - defer cleanUp() - - reg := createWorkingRegistration(t, sa) - serial, _ := test.ThrowAwayCert(t, 1) - - _, err := sa.GetSerialMetadata(context.Background(), &sapb.Serial{Serial: serial}) - test.AssertError(t, err, "getting nonexistent serial should have failed") - - _, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{ - Serial: serial, - RegID: reg.Id, - Created: clk.Now().UnixNano(), - Expires: clk.Now().Add(time.Hour).UnixNano(), - }) - test.AssertNotError(t, err, "failed to add test serial") - - m, err := sa.GetSerialMetadata(context.Background(), &sapb.Serial{Serial: serial}) - - test.AssertNotError(t, err, "getting serial should have succeeded") - test.AssertEquals(t, m.Serial, serial) - test.AssertEquals(t, m.RegistrationID, reg.Id) - test.AssertEquals(t, time.Unix(0, m.Created).UTC(), clk.Now()) - test.AssertEquals(t, time.Unix(0, m.Expires).UTC(), clk.Now().Add(time.Hour)) -} - -func TestAddPrecertificate(t *testing.T) { - sa, clk, cleanUp := initSA(t) - defer cleanUp() - - reg := createWorkingRegistration(t, sa) - - addPrecert := func(expectIssuedNamesUpdate bool) { - // Create a throw-away self signed certificate with a random name and - // serial number - serial, testCert := test.ThrowAwayCert(t, 1) - - // Add the cert as a precertificate - ocspResp := []byte{0, 0, 1} - regID := reg.Id - issuedTime := time.Date(2018, 4, 1, 7, 0, 0, 0, time.UTC) - _, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: testCert.Raw, - RegID: regID, - Ocsp: ocspResp, - Issued: issuedTime.UnixNano(), - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test cert") - - // It should have the expected certificate status - certStatus, err := sa.GetCertificateStatus(ctx, &sapb.Serial{Serial: serial}) - test.AssertNotError(t, err, "Couldn't get status for test cert") - test.Assert( - t, - bytes.Equal(certStatus.OcspResponse, ocspResp), - fmt.Sprintf("OCSP responses don't match, expected: %x, got %x", certStatus.OcspResponse, ocspResp), - ) - test.AssertEquals(t, clk.Now().UnixNano(), certStatus.OcspLastUpdated) - - issuedNamesSerial, err := findIssuedName(sa.dbMap, testCert.DNSNames[0]) - if expectIssuedNamesUpdate { - // If we expectIssuedNamesUpdate then there should be no err and the - // expected serial - test.AssertNotError(t, err, "expected no err querying issuedNames for precert") - test.AssertEquals(t, issuedNamesSerial, serial) - - // We should also be able to call AddCertificate with the same cert - // without it being an error. The duplicate err on inserting to - // issuedNames should be ignored. - _, err := sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: testCert.Raw, - RegID: regID, - Issued: issuedTime.UnixNano(), - }) - test.AssertNotError(t, err, "unexpected err adding final cert after precert") - } else { - // Otherwise we expect an ErrDatabaseOp that indicates NoRows because - // AddCertificate not AddPrecertificate will be updating this table. - test.AssertEquals(t, db.IsNoRows(err), true) - } - } - - addPrecert(true) -} - -func TestAddPreCertificateDuplicate(t *testing.T) { - sa, clk, cleanUp := initSA(t) - defer cleanUp() - - reg := createWorkingRegistration(t, sa) - - _, testCert := test.ThrowAwayCert(t, 1) - - _, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: testCert.Raw, - Issued: clk.Now().UnixNano(), - RegID: reg.Id, - IssuerID: 1, - }) - test.AssertNotError(t, err, "Couldn't add test certificate") - - _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: testCert.Raw, - Issued: clk.Now().UnixNano(), - RegID: reg.Id, - IssuerID: 1, - }) - test.AssertDeepEquals(t, err, berrors.DuplicateError("cannot add a duplicate cert")) - -} - -func TestAddPrecertificateIncomplete(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() - - reg := createWorkingRegistration(t, sa) - - // Create a throw-away self signed certificate with a random name and - // serial number - _, testCert := test.ThrowAwayCert(t, 1) - - // Add the cert as a precertificate - ocspResp := []byte{0, 0, 1} - regID := reg.Id - _, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: testCert.Raw, - RegID: regID, - Ocsp: ocspResp, - Issued: time.Date(2018, 4, 1, 7, 0, 0, 0, time.UTC).UnixNano(), - // Leaving out IssuerID - }) - - test.AssertError(t, err, "Adding precert with no issuer did not fail") -} - -func TestAddPrecertificateKeyHash(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() - reg := createWorkingRegistration(t, sa) - - serial, testCert := test.ThrowAwayCert(t, 1) - _, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: testCert.Raw, - RegID: reg.Id, - Ocsp: []byte{1, 2, 3}, - Issued: testCert.NotBefore.UnixNano(), - IssuerID: 1, - }) - test.AssertNotError(t, err, "failed to add precert") - - var keyHashes []keyHashModel - _, err = sa.dbMap.Select(&keyHashes, "SELECT * FROM keyHashToSerial") - test.AssertNotError(t, err, "failed to retrieve rows from keyHashToSerial") - test.AssertEquals(t, len(keyHashes), 1) - test.AssertEquals(t, keyHashes[0].CertSerial, serial) - test.AssertEquals(t, keyHashes[0].CertNotAfter, testCert.NotAfter) - spkiHash := sha256.Sum256(testCert.RawSubjectPublicKeyInfo) - test.Assert(t, bytes.Equal(keyHashes[0].KeyHash, spkiHash[:]), "spki hash mismatch") -} - -func TestAddPrecertificateStatusFail(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() - - reg := createWorkingRegistration(t, sa) - - serial, testCert := test.ThrowAwayCert(t, 1) - - // Insert an entry for the same serial, so that the normal insert as part of - // AddPrecertificate will fail due to the unique key constraint on serial. - err := sa.dbMap.Insert( - &core.CertificateStatus{ - Serial: serial, - Status: core.OCSPStatusGood, - OCSPLastUpdated: sa.clk.Now(), - RevokedDate: time.Time{}, - RevokedReason: 0, - LastExpirationNagSent: time.Time{}, - OCSPResponse: []byte{1, 2, 3}, - NotAfter: testCert.NotAfter, - IsExpired: false, - IssuerID: 1, - }, - ) - test.AssertNotError(t, err, "failed to insert fake status row") - - _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: testCert.Raw, - RegID: reg.Id, - Ocsp: []byte{4, 5, 6}, - Issued: testCert.NotBefore.UnixNano(), - IssuerID: 1, - }) - test.AssertError(t, err, "adding precert should fail when inserting ocsp fails") - test.AssertContains(t, err.Error(), "failed to insert *core.CertificateStatus") -} diff --git a/sa/proto/sa.pb.go b/sa/proto/sa.pb.go index 15e2b13f8b7..1de273e3582 100644 --- a/sa/proto/sa.pb.go +++ b/sa/proto/sa.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.6 +// protoc-gen-go v1.36.5 +// protoc v3.20.1 // source: sa.proto package proto @@ -10,9 +10,12 @@ import ( proto "github.com/letsencrypt/boulder/core/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -23,20 +26,17 @@ const ( ) type RegistrationID struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RegistrationID) Reset() { *x = RegistrationID{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sa_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RegistrationID) String() string { @@ -47,7 +47,7 @@ func (*RegistrationID) ProtoMessage() {} func (x *RegistrationID) ProtoReflect() protoreflect.Message { mi := &file_sa_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -70,20 +70,17 @@ func (x *RegistrationID) GetId() int64 { } type JSONWebKey struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Jwk []byte `protobuf:"bytes,1,opt,name=jwk,proto3" json:"jwk,omitempty"` unknownFields protoimpl.UnknownFields - - Jwk []byte `protobuf:"bytes,1,opt,name=jwk,proto3" json:"jwk,omitempty"` + sizeCache protoimpl.SizeCache } func (x *JSONWebKey) Reset() { *x = JSONWebKey{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sa_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *JSONWebKey) String() string { @@ -94,7 +91,7 @@ func (*JSONWebKey) ProtoMessage() {} func (x *JSONWebKey) ProtoReflect() protoreflect.Message { mi := &file_sa_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -117,20 +114,17 @@ func (x *JSONWebKey) GetJwk() []byte { } type AuthorizationID struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *AuthorizationID) Reset() { *x = AuthorizationID{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sa_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AuthorizationID) String() string { @@ -141,7 +135,7 @@ func (*AuthorizationID) ProtoMessage() {} func (x *AuthorizationID) ProtoReflect() protoreflect.Message { mi := &file_sa_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -163,95 +157,22 @@ func (x *AuthorizationID) GetId() string { return "" } -type GetPendingAuthorizationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - IdentifierType string `protobuf:"bytes,2,opt,name=identifierType,proto3" json:"identifierType,omitempty"` - IdentifierValue string `protobuf:"bytes,3,opt,name=identifierValue,proto3" json:"identifierValue,omitempty"` - // Result must be valid until at least this Unix timestamp (nanos) - ValidUntil int64 `protobuf:"varint,4,opt,name=validUntil,proto3" json:"validUntil,omitempty"` -} - -func (x *GetPendingAuthorizationRequest) Reset() { - *x = GetPendingAuthorizationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetPendingAuthorizationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPendingAuthorizationRequest) ProtoMessage() {} - -func (x *GetPendingAuthorizationRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPendingAuthorizationRequest.ProtoReflect.Descriptor instead. -func (*GetPendingAuthorizationRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{3} -} - -func (x *GetPendingAuthorizationRequest) GetRegistrationID() int64 { - if x != nil { - return x.RegistrationID - } - return 0 -} - -func (x *GetPendingAuthorizationRequest) GetIdentifierType() string { - if x != nil { - return x.IdentifierType - } - return "" -} - -func (x *GetPendingAuthorizationRequest) GetIdentifierValue() string { - if x != nil { - return x.IdentifierValue - } - return "" -} - -func (x *GetPendingAuthorizationRequest) GetValidUntil() int64 { - if x != nil { - return x.ValidUntil - } - return 0 -} - type GetValidAuthorizationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"` - Now int64 `protobuf:"varint,3,opt,name=now,proto3" json:"now,omitempty"` // Unix timestamp (nanoseconds) + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 7 + RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + Identifiers []*proto.Identifier `protobuf:"bytes,6,rep,name=identifiers,proto3" json:"identifiers,omitempty"` + ValidUntil *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=validUntil,proto3" json:"validUntil,omitempty"` + Profile string `protobuf:"bytes,5,opt,name=profile,proto3" json:"profile,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetValidAuthorizationsRequest) Reset() { *x = GetValidAuthorizationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sa_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetValidAuthorizationsRequest) String() string { @@ -261,8 +182,8 @@ func (x *GetValidAuthorizationsRequest) String() string { func (*GetValidAuthorizationsRequest) ProtoMessage() {} func (x *GetValidAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_sa_proto_msgTypes[3] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -274,7 +195,7 @@ func (x *GetValidAuthorizationsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetValidAuthorizationsRequest.ProtoReflect.Descriptor instead. func (*GetValidAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{4} + return file_sa_proto_rawDescGZIP(), []int{3} } func (x *GetValidAuthorizationsRequest) GetRegistrationID() int64 { @@ -284,82 +205,39 @@ func (x *GetValidAuthorizationsRequest) GetRegistrationID() int64 { return 0 } -func (x *GetValidAuthorizationsRequest) GetDomains() []string { +func (x *GetValidAuthorizationsRequest) GetIdentifiers() []*proto.Identifier { if x != nil { - return x.Domains + return x.Identifiers } return nil } -func (x *GetValidAuthorizationsRequest) GetNow() int64 { +func (x *GetValidAuthorizationsRequest) GetValidUntil() *timestamppb.Timestamp { if x != nil { - return x.Now - } - return 0 -} - -type ValidAuthorizations struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Valid []*ValidAuthorizations_MapElement `protobuf:"bytes,1,rep,name=valid,proto3" json:"valid,omitempty"` -} - -func (x *ValidAuthorizations) Reset() { - *x = ValidAuthorizations{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidAuthorizations) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidAuthorizations) ProtoMessage() {} - -func (x *ValidAuthorizations) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms + return x.ValidUntil } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidAuthorizations.ProtoReflect.Descriptor instead. -func (*ValidAuthorizations) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{5} + return nil } -func (x *ValidAuthorizations) GetValid() []*ValidAuthorizations_MapElement { +func (x *GetValidAuthorizationsRequest) GetProfile() string { if x != nil { - return x.Valid + return x.Profile } - return nil + return "" } type Serial struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` unknownFields protoimpl.UnknownFields - - Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Serial) Reset() { *x = Serial{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sa_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Serial) String() string { @@ -369,8 +247,8 @@ func (x *Serial) String() string { func (*Serial) ProtoMessage() {} func (x *Serial) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_sa_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -382,7 +260,7 @@ func (x *Serial) ProtoReflect() protoreflect.Message { // Deprecated: Use Serial.ProtoReflect.Descriptor instead. func (*Serial) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{6} + return file_sa_proto_rawDescGZIP(), []int{4} } func (x *Serial) GetSerial() string { @@ -393,23 +271,21 @@ func (x *Serial) GetSerial() string { } type SerialMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` - RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Created int64 `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` // Unix timestamp (nanoseconds) - Expires int64 `protobuf:"varint,4,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds) + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 7 + Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` + RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + Created *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=created,proto3" json:"created,omitempty"` + Expires *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=expires,proto3" json:"expires,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SerialMetadata) Reset() { *x = SerialMetadata{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sa_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SerialMetadata) String() string { @@ -419,8 +295,8 @@ func (x *SerialMetadata) String() string { func (*SerialMetadata) ProtoMessage() {} func (x *SerialMetadata) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_sa_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -432,7 +308,7 @@ func (x *SerialMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use SerialMetadata.ProtoReflect.Descriptor instead. func (*SerialMetadata) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{7} + return file_sa_proto_rawDescGZIP(), []int{5} } func (x *SerialMetadata) GetSerial() string { @@ -449,36 +325,33 @@ func (x *SerialMetadata) GetRegistrationID() int64 { return 0 } -func (x *SerialMetadata) GetCreated() int64 { +func (x *SerialMetadata) GetCreated() *timestamppb.Timestamp { if x != nil { return x.Created } - return 0 + return nil } -func (x *SerialMetadata) GetExpires() int64 { +func (x *SerialMetadata) GetExpires() *timestamppb.Timestamp { if x != nil { return x.Expires } - return 0 + return nil } type Range struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Earliest *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=earliest,proto3" json:"earliest,omitempty"` + Latest *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=latest,proto3" json:"latest,omitempty"` unknownFields protoimpl.UnknownFields - - Earliest int64 `protobuf:"varint,1,opt,name=earliest,proto3" json:"earliest,omitempty"` // Unix timestamp (nanoseconds) - Latest int64 `protobuf:"varint,2,opt,name=latest,proto3" json:"latest,omitempty"` // Unix timestamp (nanoseconds) + sizeCache protoimpl.SizeCache } func (x *Range) Reset() { *x = Range{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sa_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Range) String() string { @@ -488,8 +361,8 @@ func (x *Range) String() string { func (*Range) ProtoMessage() {} func (x *Range) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_sa_proto_msgTypes[6] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -501,38 +374,35 @@ func (x *Range) ProtoReflect() protoreflect.Message { // Deprecated: Use Range.ProtoReflect.Descriptor instead. func (*Range) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{8} + return file_sa_proto_rawDescGZIP(), []int{6} } -func (x *Range) GetEarliest() int64 { +func (x *Range) GetEarliest() *timestamppb.Timestamp { if x != nil { return x.Earliest } - return 0 + return nil } -func (x *Range) GetLatest() int64 { +func (x *Range) GetLatest() *timestamppb.Timestamp { if x != nil { return x.Latest } - return 0 + return nil } type Count struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` unknownFields protoimpl.UnknownFields - - Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Count) Reset() { *x = Count{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_sa_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Count) String() string { @@ -542,8 +412,8 @@ func (x *Count) String() string { func (*Count) ProtoMessage() {} func (x *Count) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_sa_proto_msgTypes[7] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -555,7 +425,7 @@ func (x *Count) ProtoReflect() protoreflect.Message { // Deprecated: Use Count.ProtoReflect.Descriptor instead. func (*Count) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{9} + return file_sa_proto_rawDescGZIP(), []int{7} } func (x *Count) GetCount() int64 { @@ -565,33 +435,29 @@ func (x *Count) GetCount() int64 { return 0 } -type CountCertificatesByNamesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type Timestamps struct { + state protoimpl.MessageState `protogen:"open.v1"` + Timestamps []*timestamppb.Timestamp `protobuf:"bytes,2,rep,name=timestamps,proto3" json:"timestamps,omitempty"` unknownFields protoimpl.UnknownFields - - Range *Range `protobuf:"bytes,1,opt,name=range,proto3" json:"range,omitempty"` - Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *CountCertificatesByNamesRequest) Reset() { - *x = CountCertificatesByNamesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *Timestamps) Reset() { + *x = Timestamps{} + mi := &file_sa_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *CountCertificatesByNamesRequest) String() string { +func (x *Timestamps) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CountCertificatesByNamesRequest) ProtoMessage() {} +func (*Timestamps) ProtoMessage() {} -func (x *CountCertificatesByNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { +func (x *Timestamps) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[8] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -601,51 +467,45 @@ func (x *CountCertificatesByNamesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CountCertificatesByNamesRequest.ProtoReflect.Descriptor instead. -func (*CountCertificatesByNamesRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{10} -} - -func (x *CountCertificatesByNamesRequest) GetRange() *Range { - if x != nil { - return x.Range - } - return nil +// Deprecated: Use Timestamps.ProtoReflect.Descriptor instead. +func (*Timestamps) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{8} } -func (x *CountCertificatesByNamesRequest) GetNames() []string { +func (x *Timestamps) GetTimestamps() []*timestamppb.Timestamp { if x != nil { - return x.Names + return x.Timestamps } return nil } -type CountByNames struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type CountInvalidAuthorizationsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 5 + RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + Identifier *proto.Identifier `protobuf:"bytes,4,opt,name=identifier,proto3" json:"identifier,omitempty"` + // Count authorizations that expire in this range. + Range *Range `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` unknownFields protoimpl.UnknownFields - - Counts map[string]int64 `protobuf:"bytes,1,rep,name=counts,proto3" json:"counts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } -func (x *CountByNames) Reset() { - *x = CountByNames{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *CountInvalidAuthorizationsRequest) Reset() { + *x = CountInvalidAuthorizationsRequest{} + mi := &file_sa_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *CountByNames) String() string { +func (x *CountInvalidAuthorizationsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CountByNames) ProtoMessage() {} +func (*CountInvalidAuthorizationsRequest) ProtoMessage() {} -func (x *CountByNames) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CountInvalidAuthorizationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[9] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -655,45 +515,57 @@ func (x *CountByNames) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CountByNames.ProtoReflect.Descriptor instead. -func (*CountByNames) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{11} +// Deprecated: Use CountInvalidAuthorizationsRequest.ProtoReflect.Descriptor instead. +func (*CountInvalidAuthorizationsRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{9} } -func (x *CountByNames) GetCounts() map[string]int64 { +func (x *CountInvalidAuthorizationsRequest) GetRegistrationID() int64 { + if x != nil { + return x.RegistrationID + } + return 0 +} + +func (x *CountInvalidAuthorizationsRequest) GetIdentifier() *proto.Identifier { if x != nil { - return x.Counts + return x.Identifier } return nil } -type CountRegistrationsByIPRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *CountInvalidAuthorizationsRequest) GetRange() *Range { + if x != nil { + return x.Range + } + return nil +} - Ip []byte `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` - Range *Range `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"` +type CountFQDNSetsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Identifiers []*proto.Identifier `protobuf:"bytes,5,rep,name=identifiers,proto3" json:"identifiers,omitempty"` + Window *durationpb.Duration `protobuf:"bytes,3,opt,name=window,proto3" json:"window,omitempty"` + Limit int64 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *CountRegistrationsByIPRequest) Reset() { - *x = CountRegistrationsByIPRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *CountFQDNSetsRequest) Reset() { + *x = CountFQDNSetsRequest{} + mi := &file_sa_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *CountRegistrationsByIPRequest) String() string { +func (x *CountFQDNSetsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CountRegistrationsByIPRequest) ProtoMessage() {} +func (*CountFQDNSetsRequest) ProtoMessage() {} -func (x *CountRegistrationsByIPRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CountFQDNSetsRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[10] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -703,54 +575,55 @@ func (x *CountRegistrationsByIPRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CountRegistrationsByIPRequest.ProtoReflect.Descriptor instead. -func (*CountRegistrationsByIPRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{12} +// Deprecated: Use CountFQDNSetsRequest.ProtoReflect.Descriptor instead. +func (*CountFQDNSetsRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{10} } -func (x *CountRegistrationsByIPRequest) GetIp() []byte { +func (x *CountFQDNSetsRequest) GetIdentifiers() []*proto.Identifier { if x != nil { - return x.Ip + return x.Identifiers } return nil } -func (x *CountRegistrationsByIPRequest) GetRange() *Range { +func (x *CountFQDNSetsRequest) GetWindow() *durationpb.Duration { if x != nil { - return x.Range + return x.Window } return nil } -type CountInvalidAuthorizationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *CountFQDNSetsRequest) GetLimit() int64 { + if x != nil { + return x.Limit + } + return 0 +} - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"` - // Count authorizations that expire in this range. - Range *Range `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` +type FQDNSetExistsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Identifiers []*proto.Identifier `protobuf:"bytes,2,rep,name=identifiers,proto3" json:"identifiers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *CountInvalidAuthorizationsRequest) Reset() { - *x = CountInvalidAuthorizationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *FQDNSetExistsRequest) Reset() { + *x = FQDNSetExistsRequest{} + mi := &file_sa_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *CountInvalidAuthorizationsRequest) String() string { +func (x *FQDNSetExistsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CountInvalidAuthorizationsRequest) ProtoMessage() {} +func (*FQDNSetExistsRequest) ProtoMessage() {} -func (x *CountInvalidAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { +func (x *FQDNSetExistsRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[11] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -760,59 +633,89 @@ func (x *CountInvalidAuthorizationsRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use CountInvalidAuthorizationsRequest.ProtoReflect.Descriptor instead. -func (*CountInvalidAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{13} +// Deprecated: Use FQDNSetExistsRequest.ProtoReflect.Descriptor instead. +func (*FQDNSetExistsRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{11} } -func (x *CountInvalidAuthorizationsRequest) GetRegistrationID() int64 { +func (x *FQDNSetExistsRequest) GetIdentifiers() []*proto.Identifier { if x != nil { - return x.RegistrationID + return x.Identifiers } - return 0 + return nil } -func (x *CountInvalidAuthorizationsRequest) GetHostname() string { +type Exists struct { + state protoimpl.MessageState `protogen:"open.v1"` + Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Exists) Reset() { + *x = Exists{} + mi := &file_sa_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Exists) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Exists) ProtoMessage() {} + +func (x *Exists) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[12] if x != nil { - return x.Hostname + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (x *CountInvalidAuthorizationsRequest) GetRange() *Range { +// Deprecated: Use Exists.ProtoReflect.Descriptor instead. +func (*Exists) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{12} +} + +func (x *Exists) GetExists() bool { if x != nil { - return x.Range + return x.Exists } - return nil + return false } -type CountOrdersRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type AddSerialRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 7 + RegID int64 `protobuf:"varint,1,opt,name=regID,proto3" json:"regID,omitempty"` + Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"` + Created *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=created,proto3" json:"created,omitempty"` + Expires *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=expires,proto3" json:"expires,omitempty"` unknownFields protoimpl.UnknownFields - - AccountID int64 `protobuf:"varint,1,opt,name=accountID,proto3" json:"accountID,omitempty"` - Range *Range `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *CountOrdersRequest) Reset() { - *x = CountOrdersRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *AddSerialRequest) Reset() { + *x = AddSerialRequest{} + mi := &file_sa_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *CountOrdersRequest) String() string { +func (x *AddSerialRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CountOrdersRequest) ProtoMessage() {} +func (*AddSerialRequest) ProtoMessage() {} -func (x *CountOrdersRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { +func (x *AddSerialRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[13] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -822,52 +725,66 @@ func (x *CountOrdersRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CountOrdersRequest.ProtoReflect.Descriptor instead. -func (*CountOrdersRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{14} +// Deprecated: Use AddSerialRequest.ProtoReflect.Descriptor instead. +func (*AddSerialRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{13} } -func (x *CountOrdersRequest) GetAccountID() int64 { +func (x *AddSerialRequest) GetRegID() int64 { if x != nil { - return x.AccountID + return x.RegID } return 0 } -func (x *CountOrdersRequest) GetRange() *Range { +func (x *AddSerialRequest) GetSerial() string { if x != nil { - return x.Range + return x.Serial + } + return "" +} + +func (x *AddSerialRequest) GetCreated() *timestamppb.Timestamp { + if x != nil { + return x.Created } return nil } -type CountFQDNSetsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *AddSerialRequest) GetExpires() *timestamppb.Timestamp { + if x != nil { + return x.Expires + } + return nil +} - Window int64 `protobuf:"varint,1,opt,name=window,proto3" json:"window,omitempty"` - Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"` +type AddCertificateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 8 + Der []byte `protobuf:"bytes,1,opt,name=der,proto3" json:"der,omitempty"` + RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"` + Issued *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=issued,proto3" json:"issued,omitempty"` + IssuerNameID int64 `protobuf:"varint,5,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"` // https://pkg.go.dev/github.com/letsencrypt/boulder/issuance#IssuerNameID + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *CountFQDNSetsRequest) Reset() { - *x = CountFQDNSetsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *AddCertificateRequest) Reset() { + *x = AddCertificateRequest{} + mi := &file_sa_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *CountFQDNSetsRequest) String() string { +func (x *AddCertificateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CountFQDNSetsRequest) ProtoMessage() {} +func (*AddCertificateRequest) ProtoMessage() {} -func (x *CountFQDNSetsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { +func (x *AddCertificateRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[14] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -877,51 +794,118 @@ func (x *CountFQDNSetsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CountFQDNSetsRequest.ProtoReflect.Descriptor instead. -func (*CountFQDNSetsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{15} +// Deprecated: Use AddCertificateRequest.ProtoReflect.Descriptor instead. +func (*AddCertificateRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{14} } -func (x *CountFQDNSetsRequest) GetWindow() int64 { +func (x *AddCertificateRequest) GetDer() []byte { if x != nil { - return x.Window + return x.Der + } + return nil +} + +func (x *AddCertificateRequest) GetRegID() int64 { + if x != nil { + return x.RegID } return 0 } -func (x *CountFQDNSetsRequest) GetDomains() []string { +func (x *AddCertificateRequest) GetIssued() *timestamppb.Timestamp { if x != nil { - return x.Domains + return x.Issued } return nil } -type FQDNSetExistsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +func (x *AddCertificateRequest) GetIssuerNameID() int64 { + if x != nil { + return x.IssuerNameID + } + return 0 +} + +type OrderRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - Domains []string `protobuf:"bytes,1,rep,name=domains,proto3" json:"domains,omitempty"` +func (x *OrderRequest) Reset() { + *x = OrderRequest{} + mi := &file_sa_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *FQDNSetExistsRequest) Reset() { - *x = FQDNSetExistsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[16] +func (x *OrderRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OrderRequest) ProtoMessage() {} + +func (x *OrderRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[15] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } + return mi.MessageOf(x) } -func (x *FQDNSetExistsRequest) String() string { +// Deprecated: Use OrderRequest.ProtoReflect.Descriptor instead. +func (*OrderRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{15} +} + +func (x *OrderRequest) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +type NewOrderRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 10 + RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + Expires *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=expires,proto3" json:"expires,omitempty"` + Identifiers []*proto.Identifier `protobuf:"bytes,9,rep,name=identifiers,proto3" json:"identifiers,omitempty"` + // A list of already-existing authorization IDs that should be associated with + // the new Order object. This is for authorization reuse. + V2Authorizations []int64 `protobuf:"varint,4,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"` + CertificateProfileName string `protobuf:"bytes,7,opt,name=certificateProfileName,proto3" json:"certificateProfileName,omitempty"` + // Replaces is the ARI certificate Id that this order replaces. + Replaces string `protobuf:"bytes,8,opt,name=replaces,proto3" json:"replaces,omitempty"` + // ReplacesSerial is the serial number of the certificate that this order + // replaces. + ReplacesSerial string `protobuf:"bytes,6,opt,name=replacesSerial,proto3" json:"replacesSerial,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NewOrderRequest) Reset() { + *x = NewOrderRequest{} + mi := &file_sa_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NewOrderRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FQDNSetExistsRequest) ProtoMessage() {} +func (*NewOrderRequest) ProtoMessage() {} -func (x *FQDNSetExistsRequest) ProtoReflect() protoreflect.Message { +func (x *NewOrderRequest) ProtoReflect() protoreflect.Message { mi := &file_sa_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -931,45 +915,88 @@ func (x *FQDNSetExistsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FQDNSetExistsRequest.ProtoReflect.Descriptor instead. -func (*FQDNSetExistsRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use NewOrderRequest.ProtoReflect.Descriptor instead. +func (*NewOrderRequest) Descriptor() ([]byte, []int) { return file_sa_proto_rawDescGZIP(), []int{16} } -func (x *FQDNSetExistsRequest) GetDomains() []string { +func (x *NewOrderRequest) GetRegistrationID() int64 { + if x != nil { + return x.RegistrationID + } + return 0 +} + +func (x *NewOrderRequest) GetExpires() *timestamppb.Timestamp { if x != nil { - return x.Domains + return x.Expires } return nil } -type PreviousCertificateExistsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *NewOrderRequest) GetIdentifiers() []*proto.Identifier { + if x != nil { + return x.Identifiers + } + return nil +} - Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` - RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"` +func (x *NewOrderRequest) GetV2Authorizations() []int64 { + if x != nil { + return x.V2Authorizations + } + return nil } -func (x *PreviousCertificateExistsRequest) Reset() { - *x = PreviousCertificateExistsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *NewOrderRequest) GetCertificateProfileName() string { + if x != nil { + return x.CertificateProfileName + } + return "" +} + +func (x *NewOrderRequest) GetReplaces() string { + if x != nil { + return x.Replaces + } + return "" +} + +func (x *NewOrderRequest) GetReplacesSerial() string { + if x != nil { + return x.ReplacesSerial } + return "" +} + +// NewAuthzRequest represents a request to create an authorization. +type NewAuthzRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Identifier *proto.Identifier `protobuf:"bytes,12,opt,name=identifier,proto3" json:"identifier,omitempty"` + RegistrationID int64 `protobuf:"varint,3,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + Expires *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=expires,proto3" json:"expires,omitempty"` + ChallengeTypes []string `protobuf:"bytes,10,rep,name=challengeTypes,proto3" json:"challengeTypes,omitempty"` + Token string `protobuf:"bytes,11,opt,name=token,proto3" json:"token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *PreviousCertificateExistsRequest) String() string { +func (x *NewAuthzRequest) Reset() { + *x = NewAuthzRequest{} + mi := &file_sa_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NewAuthzRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PreviousCertificateExistsRequest) ProtoMessage() {} +func (*NewAuthzRequest) ProtoMessage() {} -func (x *PreviousCertificateExistsRequest) ProtoReflect() protoreflect.Message { +func (x *NewAuthzRequest) ProtoReflect() protoreflect.Message { mi := &file_sa_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -979,53 +1006,129 @@ func (x *PreviousCertificateExistsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PreviousCertificateExistsRequest.ProtoReflect.Descriptor instead. -func (*PreviousCertificateExistsRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use NewAuthzRequest.ProtoReflect.Descriptor instead. +func (*NewAuthzRequest) Descriptor() ([]byte, []int) { return file_sa_proto_rawDescGZIP(), []int{17} } -func (x *PreviousCertificateExistsRequest) GetDomain() string { +func (x *NewAuthzRequest) GetIdentifier() *proto.Identifier { if x != nil { - return x.Domain + return x.Identifier } - return "" + return nil } -func (x *PreviousCertificateExistsRequest) GetRegID() int64 { +func (x *NewAuthzRequest) GetRegistrationID() int64 { if x != nil { - return x.RegID + return x.RegistrationID } return 0 } -type Exists struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` +func (x *NewAuthzRequest) GetExpires() *timestamppb.Timestamp { + if x != nil { + return x.Expires + } + return nil } -func (x *Exists) Reset() { - *x = Exists{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *NewAuthzRequest) GetChallengeTypes() []string { + if x != nil { + return x.ChallengeTypes } + return nil } -func (x *Exists) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *NewAuthzRequest) GetToken() string { + if x != nil { + return x.Token + } + return "" } -func (*Exists) ProtoMessage() {} +type NewOrderAndAuthzsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NewOrder *NewOrderRequest `protobuf:"bytes,1,opt,name=newOrder,proto3" json:"newOrder,omitempty"` + // Authorizations to be newly created alongside the order, and associated with it. + // These will be combined with any reused authorizations (newOrder.v2Authorizations) + // to make the overall set of authorizations for the order. This field and + // newOrder.v2Authorizations may both be present, or only one of the two may be + // present, but they may not both be absent. + NewAuthzs []*NewAuthzRequest `protobuf:"bytes,2,rep,name=newAuthzs,proto3" json:"newAuthzs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} -func (x *Exists) ProtoReflect() protoreflect.Message { +func (x *NewOrderAndAuthzsRequest) Reset() { + *x = NewOrderAndAuthzsRequest{} mi := &file_sa_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NewOrderAndAuthzsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NewOrderAndAuthzsRequest) ProtoMessage() {} + +func (x *NewOrderAndAuthzsRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NewOrderAndAuthzsRequest.ProtoReflect.Descriptor instead. +func (*NewOrderAndAuthzsRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{18} +} + +func (x *NewOrderAndAuthzsRequest) GetNewOrder() *NewOrderRequest { + if x != nil { + return x.NewOrder + } + return nil +} + +func (x *NewOrderAndAuthzsRequest) GetNewAuthzs() []*NewAuthzRequest { + if x != nil { + return x.NewAuthzs + } + return nil +} + +type SetOrderErrorRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Error *proto.ProblemDetails `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetOrderErrorRequest) Reset() { + *x = SetOrderErrorRequest{} + mi := &file_sa_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetOrderErrorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetOrderErrorRequest) ProtoMessage() {} + +func (x *SetOrderErrorRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms @@ -1033,47 +1136,506 @@ func (x *Exists) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Exists.ProtoReflect.Descriptor instead. -func (*Exists) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{18} +// Deprecated: Use SetOrderErrorRequest.ProtoReflect.Descriptor instead. +func (*SetOrderErrorRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{19} +} + +func (x *SetOrderErrorRequest) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *SetOrderErrorRequest) GetError() *proto.ProblemDetails { + if x != nil { + return x.Error + } + return nil +} + +type GetValidOrderAuthorizationsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + AcctID int64 `protobuf:"varint,2,opt,name=acctID,proto3" json:"acctID,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetValidOrderAuthorizationsRequest) Reset() { + *x = GetValidOrderAuthorizationsRequest{} + mi := &file_sa_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetValidOrderAuthorizationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetValidOrderAuthorizationsRequest) ProtoMessage() {} + +func (x *GetValidOrderAuthorizationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetValidOrderAuthorizationsRequest.ProtoReflect.Descriptor instead. +func (*GetValidOrderAuthorizationsRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{20} +} + +func (x *GetValidOrderAuthorizationsRequest) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *GetValidOrderAuthorizationsRequest) GetAcctID() int64 { + if x != nil { + return x.AcctID + } + return 0 +} + +type GetOrderForNamesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 4 + AcctID int64 `protobuf:"varint,1,opt,name=acctID,proto3" json:"acctID,omitempty"` + Identifiers []*proto.Identifier `protobuf:"bytes,3,rep,name=identifiers,proto3" json:"identifiers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetOrderForNamesRequest) Reset() { + *x = GetOrderForNamesRequest{} + mi := &file_sa_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetOrderForNamesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOrderForNamesRequest) ProtoMessage() {} + +func (x *GetOrderForNamesRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOrderForNamesRequest.ProtoReflect.Descriptor instead. +func (*GetOrderForNamesRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{21} +} + +func (x *GetOrderForNamesRequest) GetAcctID() int64 { + if x != nil { + return x.AcctID + } + return 0 +} + +func (x *GetOrderForNamesRequest) GetIdentifiers() []*proto.Identifier { + if x != nil { + return x.Identifiers + } + return nil +} + +type FinalizeOrderRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + CertificateSerial string `protobuf:"bytes,2,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FinalizeOrderRequest) Reset() { + *x = FinalizeOrderRequest{} + mi := &file_sa_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FinalizeOrderRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FinalizeOrderRequest) ProtoMessage() {} + +func (x *FinalizeOrderRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FinalizeOrderRequest.ProtoReflect.Descriptor instead. +func (*FinalizeOrderRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{22} +} + +func (x *FinalizeOrderRequest) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *FinalizeOrderRequest) GetCertificateSerial() string { + if x != nil { + return x.CertificateSerial + } + return "" +} + +type GetAuthorizationsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 7 + RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + Identifiers []*proto.Identifier `protobuf:"bytes,6,rep,name=identifiers,proto3" json:"identifiers,omitempty"` + ValidUntil *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=validUntil,proto3" json:"validUntil,omitempty"` + Profile string `protobuf:"bytes,5,opt,name=profile,proto3" json:"profile,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetAuthorizationsRequest) Reset() { + *x = GetAuthorizationsRequest{} + mi := &file_sa_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetAuthorizationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAuthorizationsRequest) ProtoMessage() {} + +func (x *GetAuthorizationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAuthorizationsRequest.ProtoReflect.Descriptor instead. +func (*GetAuthorizationsRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{23} +} + +func (x *GetAuthorizationsRequest) GetRegistrationID() int64 { + if x != nil { + return x.RegistrationID + } + return 0 +} + +func (x *GetAuthorizationsRequest) GetIdentifiers() []*proto.Identifier { + if x != nil { + return x.Identifiers + } + return nil +} + +func (x *GetAuthorizationsRequest) GetValidUntil() *timestamppb.Timestamp { + if x != nil { + return x.ValidUntil + } + return nil +} + +func (x *GetAuthorizationsRequest) GetProfile() string { + if x != nil { + return x.Profile + } + return "" +} + +type Authorizations struct { + state protoimpl.MessageState `protogen:"open.v1"` + Authzs []*proto.Authorization `protobuf:"bytes,2,rep,name=authzs,proto3" json:"authzs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Authorizations) Reset() { + *x = Authorizations{} + mi := &file_sa_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Authorizations) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Authorizations) ProtoMessage() {} + +func (x *Authorizations) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Authorizations.ProtoReflect.Descriptor instead. +func (*Authorizations) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{24} +} + +func (x *Authorizations) GetAuthzs() []*proto.Authorization { + if x != nil { + return x.Authzs + } + return nil +} + +type AuthorizationIDs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AuthorizationIDs) Reset() { + *x = AuthorizationIDs{} + mi := &file_sa_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AuthorizationIDs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthorizationIDs) ProtoMessage() {} + +func (x *AuthorizationIDs) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthorizationIDs.ProtoReflect.Descriptor instead. +func (*AuthorizationIDs) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{25} +} + +func (x *AuthorizationIDs) GetIds() []string { + if x != nil { + return x.Ids + } + return nil +} + +type AuthorizationID2 struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AuthorizationID2) Reset() { + *x = AuthorizationID2{} + mi := &file_sa_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AuthorizationID2) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthorizationID2) ProtoMessage() {} + +func (x *AuthorizationID2) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthorizationID2.ProtoReflect.Descriptor instead. +func (*AuthorizationID2) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{26} +} + +func (x *AuthorizationID2) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +type RevokeCertificateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 10 + Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` + Reason int64 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"` + Date *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=date,proto3" json:"date,omitempty"` + Backdate *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=backdate,proto3" json:"backdate,omitempty"` + Response []byte `protobuf:"bytes,4,opt,name=response,proto3" json:"response,omitempty"` + IssuerID int64 `protobuf:"varint,6,opt,name=issuerID,proto3" json:"issuerID,omitempty"` + ShardIdx int64 `protobuf:"varint,7,opt,name=shardIdx,proto3" json:"shardIdx,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RevokeCertificateRequest) Reset() { + *x = RevokeCertificateRequest{} + mi := &file_sa_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RevokeCertificateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RevokeCertificateRequest) ProtoMessage() {} + +func (x *RevokeCertificateRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RevokeCertificateRequest.ProtoReflect.Descriptor instead. +func (*RevokeCertificateRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{27} +} + +func (x *RevokeCertificateRequest) GetSerial() string { + if x != nil { + return x.Serial + } + return "" +} + +func (x *RevokeCertificateRequest) GetReason() int64 { + if x != nil { + return x.Reason + } + return 0 +} + +func (x *RevokeCertificateRequest) GetDate() *timestamppb.Timestamp { + if x != nil { + return x.Date + } + return nil +} + +func (x *RevokeCertificateRequest) GetBackdate() *timestamppb.Timestamp { + if x != nil { + return x.Backdate + } + return nil +} + +func (x *RevokeCertificateRequest) GetResponse() []byte { + if x != nil { + return x.Response + } + return nil +} + +func (x *RevokeCertificateRequest) GetIssuerID() int64 { + if x != nil { + return x.IssuerID + } + return 0 } -func (x *Exists) GetExists() bool { +func (x *RevokeCertificateRequest) GetShardIdx() int64 { if x != nil { - return x.Exists + return x.ShardIdx } - return false + return 0 } -type AddSerialRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RegID int64 `protobuf:"varint,1,opt,name=regID,proto3" json:"regID,omitempty"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial,omitempty"` - Created int64 `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` // Unix timestamp (nanoseconds) - Expires int64 `protobuf:"varint,4,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds) +type FinalizeAuthorizationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 10 + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + Expires *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=expires,proto3" json:"expires,omitempty"` + Attempted string `protobuf:"bytes,4,opt,name=attempted,proto3" json:"attempted,omitempty"` + ValidationRecords []*proto.ValidationRecord `protobuf:"bytes,5,rep,name=validationRecords,proto3" json:"validationRecords,omitempty"` + ValidationError *proto.ProblemDetails `protobuf:"bytes,6,opt,name=validationError,proto3" json:"validationError,omitempty"` + AttemptedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=attemptedAt,proto3" json:"attemptedAt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *AddSerialRequest) Reset() { - *x = AddSerialRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *FinalizeAuthorizationRequest) Reset() { + *x = FinalizeAuthorizationRequest{} + mi := &file_sa_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *AddSerialRequest) String() string { +func (x *FinalizeAuthorizationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*AddSerialRequest) ProtoMessage() {} +func (*FinalizeAuthorizationRequest) ProtoMessage() {} -func (x *AddSerialRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { +func (x *FinalizeAuthorizationRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[28] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1083,74 +1645,88 @@ func (x *AddSerialRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AddSerialRequest.ProtoReflect.Descriptor instead. -func (*AddSerialRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{19} +// Deprecated: Use FinalizeAuthorizationRequest.ProtoReflect.Descriptor instead. +func (*FinalizeAuthorizationRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{28} } -func (x *AddSerialRequest) GetRegID() int64 { +func (x *FinalizeAuthorizationRequest) GetId() int64 { if x != nil { - return x.RegID + return x.Id } return 0 } -func (x *AddSerialRequest) GetSerial() string { +func (x *FinalizeAuthorizationRequest) GetStatus() string { if x != nil { - return x.Serial + return x.Status } return "" } -func (x *AddSerialRequest) GetCreated() int64 { +func (x *FinalizeAuthorizationRequest) GetExpires() *timestamppb.Timestamp { if x != nil { - return x.Created + return x.Expires } - return 0 + return nil } -func (x *AddSerialRequest) GetExpires() int64 { +func (x *FinalizeAuthorizationRequest) GetAttempted() string { if x != nil { - return x.Expires + return x.Attempted } - return 0 + return "" } -type AddCertificateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *FinalizeAuthorizationRequest) GetValidationRecords() []*proto.ValidationRecord { + if x != nil { + return x.ValidationRecords + } + return nil +} - Der []byte `protobuf:"bytes,1,opt,name=der,proto3" json:"der,omitempty"` - RegID int64 `protobuf:"varint,2,opt,name=regID,proto3" json:"regID,omitempty"` - // A signed OCSP response for the certificate contained in "der". - // Note: The certificate status in the OCSP response is assumed to be 0 (good). - Ocsp []byte `protobuf:"bytes,3,opt,name=ocsp,proto3" json:"ocsp,omitempty"` - // An issued time. When not present the SA defaults to using - // the current time. The orphan-finder uses this parameter to add - // certificates with the correct historic issued date - Issued int64 `protobuf:"varint,4,opt,name=issued,proto3" json:"issued,omitempty"` - IssuerID int64 `protobuf:"varint,5,opt,name=issuerID,proto3" json:"issuerID,omitempty"` +func (x *FinalizeAuthorizationRequest) GetValidationError() *proto.ProblemDetails { + if x != nil { + return x.ValidationError + } + return nil } -func (x *AddCertificateRequest) Reset() { - *x = AddCertificateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *FinalizeAuthorizationRequest) GetAttemptedAt() *timestamppb.Timestamp { + if x != nil { + return x.AttemptedAt } + return nil } -func (x *AddCertificateRequest) String() string { +type AddBlockedKeyRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 7 + KeyHash []byte `protobuf:"bytes,1,opt,name=keyHash,proto3" json:"keyHash,omitempty"` + Added *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=added,proto3" json:"added,omitempty"` + Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"` + Comment string `protobuf:"bytes,4,opt,name=comment,proto3" json:"comment,omitempty"` + RevokedBy int64 `protobuf:"varint,5,opt,name=revokedBy,proto3" json:"revokedBy,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AddBlockedKeyRequest) Reset() { + *x = AddBlockedKeyRequest{} + mi := &file_sa_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AddBlockedKeyRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*AddCertificateRequest) ProtoMessage() {} +func (*AddBlockedKeyRequest) ProtoMessage() {} -func (x *AddCertificateRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { +func (x *AddBlockedKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[29] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1160,72 +1736,69 @@ func (x *AddCertificateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AddCertificateRequest.ProtoReflect.Descriptor instead. -func (*AddCertificateRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{20} +// Deprecated: Use AddBlockedKeyRequest.ProtoReflect.Descriptor instead. +func (*AddBlockedKeyRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{29} } -func (x *AddCertificateRequest) GetDer() []byte { +func (x *AddBlockedKeyRequest) GetKeyHash() []byte { if x != nil { - return x.Der + return x.KeyHash } return nil } -func (x *AddCertificateRequest) GetRegID() int64 { +func (x *AddBlockedKeyRequest) GetAdded() *timestamppb.Timestamp { if x != nil { - return x.RegID + return x.Added } - return 0 + return nil } -func (x *AddCertificateRequest) GetOcsp() []byte { +func (x *AddBlockedKeyRequest) GetSource() string { if x != nil { - return x.Ocsp + return x.Source } - return nil + return "" } -func (x *AddCertificateRequest) GetIssued() int64 { +func (x *AddBlockedKeyRequest) GetComment() string { if x != nil { - return x.Issued + return x.Comment } - return 0 + return "" } -func (x *AddCertificateRequest) GetIssuerID() int64 { +func (x *AddBlockedKeyRequest) GetRevokedBy() int64 { if x != nil { - return x.IssuerID + return x.RevokedBy } return 0 } -type AddCertificateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type SPKIHash struct { + state protoimpl.MessageState `protogen:"open.v1"` + KeyHash []byte `protobuf:"bytes,1,opt,name=keyHash,proto3" json:"keyHash,omitempty"` unknownFields protoimpl.UnknownFields - - Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *AddCertificateResponse) Reset() { - *x = AddCertificateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *SPKIHash) Reset() { + *x = SPKIHash{} + mi := &file_sa_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *AddCertificateResponse) String() string { +func (x *SPKIHash) String() string { return protoimpl.X.MessageStringOf(x) } -func (*AddCertificateResponse) ProtoMessage() {} +func (*SPKIHash) ProtoMessage() {} -func (x *AddCertificateResponse) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { +func (x *SPKIHash) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[30] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1235,44 +1808,46 @@ func (x *AddCertificateResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AddCertificateResponse.ProtoReflect.Descriptor instead. -func (*AddCertificateResponse) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{21} +// Deprecated: Use SPKIHash.ProtoReflect.Descriptor instead. +func (*SPKIHash) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{30} } -func (x *AddCertificateResponse) GetDigest() string { +func (x *SPKIHash) GetKeyHash() []byte { if x != nil { - return x.Digest + return x.KeyHash } - return "" + return nil } -type OrderRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type Incident struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 7 + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + SerialTable string `protobuf:"bytes,2,opt,name=serialTable,proto3" json:"serialTable,omitempty"` + Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` + RenewBy *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=renewBy,proto3" json:"renewBy,omitempty"` + Enabled bool `protobuf:"varint,5,opt,name=enabled,proto3" json:"enabled,omitempty"` unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *OrderRequest) Reset() { - *x = OrderRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *Incident) Reset() { + *x = Incident{} + mi := &file_sa_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *OrderRequest) String() string { +func (x *Incident) String() string { return protoimpl.X.MessageStringOf(x) } -func (*OrderRequest) ProtoMessage() {} +func (*Incident) ProtoMessage() {} -func (x *OrderRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { +func (x *Incident) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[31] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1282,47 +1857,69 @@ func (x *OrderRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use OrderRequest.ProtoReflect.Descriptor instead. -func (*OrderRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{22} +// Deprecated: Use Incident.ProtoReflect.Descriptor instead. +func (*Incident) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{31} } -func (x *OrderRequest) GetId() int64 { +func (x *Incident) GetId() int64 { if x != nil { return x.Id } return 0 } -type NewOrderRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *Incident) GetSerialTable() string { + if x != nil { + return x.SerialTable + } + return "" +} - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Expires int64 `protobuf:"varint,2,opt,name=expires,proto3" json:"expires,omitempty"` - Names []string `protobuf:"bytes,3,rep,name=names,proto3" json:"names,omitempty"` - V2Authorizations []int64 `protobuf:"varint,4,rep,packed,name=v2Authorizations,proto3" json:"v2Authorizations,omitempty"` +func (x *Incident) GetUrl() string { + if x != nil { + return x.Url + } + return "" } -func (x *NewOrderRequest) Reset() { - *x = NewOrderRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *Incident) GetRenewBy() *timestamppb.Timestamp { + if x != nil { + return x.RenewBy + } + return nil +} + +func (x *Incident) GetEnabled() bool { + if x != nil { + return x.Enabled } + return false } -func (x *NewOrderRequest) String() string { +type Incidents struct { + state protoimpl.MessageState `protogen:"open.v1"` + Incidents []*Incident `protobuf:"bytes,1,rep,name=incidents,proto3" json:"incidents,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Incidents) Reset() { + *x = Incidents{} + mi := &file_sa_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Incidents) String() string { return protoimpl.X.MessageStringOf(x) } -func (*NewOrderRequest) ProtoMessage() {} +func (*Incidents) ProtoMessage() {} -func (x *NewOrderRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { +func (x *Incidents) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[32] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1332,66 +1929,41 @@ func (x *NewOrderRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use NewOrderRequest.ProtoReflect.Descriptor instead. -func (*NewOrderRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{23} -} - -func (x *NewOrderRequest) GetRegistrationID() int64 { - if x != nil { - return x.RegistrationID - } - return 0 -} - -func (x *NewOrderRequest) GetExpires() int64 { - if x != nil { - return x.Expires - } - return 0 -} - -func (x *NewOrderRequest) GetNames() []string { - if x != nil { - return x.Names - } - return nil +// Deprecated: Use Incidents.ProtoReflect.Descriptor instead. +func (*Incidents) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{32} } -func (x *NewOrderRequest) GetV2Authorizations() []int64 { +func (x *Incidents) GetIncidents() []*Incident { if x != nil { - return x.V2Authorizations + return x.Incidents } return nil } -type NewOrderAndAuthzsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type SerialsForIncidentRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + IncidentTable string `protobuf:"bytes,1,opt,name=incidentTable,proto3" json:"incidentTable,omitempty"` unknownFields protoimpl.UnknownFields - - NewOrder *NewOrderRequest `protobuf:"bytes,1,opt,name=newOrder,proto3" json:"newOrder,omitempty"` - NewAuthzs []*proto.Authorization `protobuf:"bytes,2,rep,name=newAuthzs,proto3" json:"newAuthzs,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *NewOrderAndAuthzsRequest) Reset() { - *x = NewOrderAndAuthzsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *SerialsForIncidentRequest) Reset() { + *x = SerialsForIncidentRequest{} + mi := &file_sa_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *NewOrderAndAuthzsRequest) String() string { +func (x *SerialsForIncidentRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*NewOrderAndAuthzsRequest) ProtoMessage() {} +func (*SerialsForIncidentRequest) ProtoMessage() {} -func (x *NewOrderAndAuthzsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { +func (x *SerialsForIncidentRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[33] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1401,52 +1973,45 @@ func (x *NewOrderAndAuthzsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use NewOrderAndAuthzsRequest.ProtoReflect.Descriptor instead. -func (*NewOrderAndAuthzsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{24} -} - -func (x *NewOrderAndAuthzsRequest) GetNewOrder() *NewOrderRequest { - if x != nil { - return x.NewOrder - } - return nil +// Deprecated: Use SerialsForIncidentRequest.ProtoReflect.Descriptor instead. +func (*SerialsForIncidentRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{33} } -func (x *NewOrderAndAuthzsRequest) GetNewAuthzs() []*proto.Authorization { +func (x *SerialsForIncidentRequest) GetIncidentTable() string { if x != nil { - return x.NewAuthzs + return x.IncidentTable } - return nil + return "" } -type SetOrderErrorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Error *proto.ProblemDetails `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +type IncidentSerial struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Next unused field number: 6 + Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` + RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"` // May be 0 (NULL) + OrderID int64 `protobuf:"varint,3,opt,name=orderID,proto3" json:"orderID,omitempty"` // May be 0 (NULL) + LastNoticeSent *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=lastNoticeSent,proto3" json:"lastNoticeSent,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } - -func (x *SetOrderErrorRequest) Reset() { - *x = SetOrderErrorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + +func (x *IncidentSerial) Reset() { + *x = IncidentSerial{} + mi := &file_sa_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *SetOrderErrorRequest) String() string { +func (x *IncidentSerial) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetOrderErrorRequest) ProtoMessage() {} +func (*IncidentSerial) ProtoMessage() {} -func (x *SetOrderErrorRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { +func (x *IncidentSerial) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[34] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1456,52 +2021,65 @@ func (x *SetOrderErrorRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetOrderErrorRequest.ProtoReflect.Descriptor instead. -func (*SetOrderErrorRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{25} +// Deprecated: Use IncidentSerial.ProtoReflect.Descriptor instead. +func (*IncidentSerial) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{34} } -func (x *SetOrderErrorRequest) GetId() int64 { +func (x *IncidentSerial) GetSerial() string { if x != nil { - return x.Id + return x.Serial + } + return "" +} + +func (x *IncidentSerial) GetRegistrationID() int64 { + if x != nil { + return x.RegistrationID } return 0 } -func (x *SetOrderErrorRequest) GetError() *proto.ProblemDetails { +func (x *IncidentSerial) GetOrderID() int64 { if x != nil { - return x.Error + return x.OrderID + } + return 0 +} + +func (x *IncidentSerial) GetLastNoticeSent() *timestamppb.Timestamp { + if x != nil { + return x.LastNoticeSent } return nil } -type GetValidOrderAuthorizationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type GetRevokedCertsByShardRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"` + RevokedBefore *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=revokedBefore,proto3" json:"revokedBefore,omitempty"` + ExpiresAfter *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiresAfter,proto3" json:"expiresAfter,omitempty"` + ShardIdx int64 `protobuf:"varint,4,opt,name=shardIdx,proto3" json:"shardIdx,omitempty"` unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - AcctID int64 `protobuf:"varint,2,opt,name=acctID,proto3" json:"acctID,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetValidOrderAuthorizationsRequest) Reset() { - *x = GetValidOrderAuthorizationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GetRevokedCertsByShardRequest) Reset() { + *x = GetRevokedCertsByShardRequest{} + mi := &file_sa_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetValidOrderAuthorizationsRequest) String() string { +func (x *GetRevokedCertsByShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetValidOrderAuthorizationsRequest) ProtoMessage() {} +func (*GetRevokedCertsByShardRequest) ProtoMessage() {} -func (x *GetValidOrderAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetRevokedCertsByShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[35] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1511,52 +2089,64 @@ func (x *GetValidOrderAuthorizationsRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use GetValidOrderAuthorizationsRequest.ProtoReflect.Descriptor instead. -func (*GetValidOrderAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{26} +// Deprecated: Use GetRevokedCertsByShardRequest.ProtoReflect.Descriptor instead. +func (*GetRevokedCertsByShardRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{35} } -func (x *GetValidOrderAuthorizationsRequest) GetId() int64 { +func (x *GetRevokedCertsByShardRequest) GetIssuerNameID() int64 { if x != nil { - return x.Id + return x.IssuerNameID } return 0 } -func (x *GetValidOrderAuthorizationsRequest) GetAcctID() int64 { +func (x *GetRevokedCertsByShardRequest) GetRevokedBefore() *timestamppb.Timestamp { if x != nil { - return x.AcctID + return x.RevokedBefore + } + return nil +} + +func (x *GetRevokedCertsByShardRequest) GetExpiresAfter() *timestamppb.Timestamp { + if x != nil { + return x.ExpiresAfter + } + return nil +} + +func (x *GetRevokedCertsByShardRequest) GetShardIdx() int64 { + if x != nil { + return x.ShardIdx } return 0 } -type GetOrderForNamesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RevocationStatus struct { + state protoimpl.MessageState `protogen:"open.v1"` + Status int64 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + RevokedReason int64 `protobuf:"varint,2,opt,name=revokedReason,proto3" json:"revokedReason,omitempty"` + RevokedDate *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=revokedDate,proto3" json:"revokedDate,omitempty"` // Unix timestamp (nanoseconds) unknownFields protoimpl.UnknownFields - - AcctID int64 `protobuf:"varint,1,opt,name=acctID,proto3" json:"acctID,omitempty"` - Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetOrderForNamesRequest) Reset() { - *x = GetOrderForNamesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RevocationStatus) Reset() { + *x = RevocationStatus{} + mi := &file_sa_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetOrderForNamesRequest) String() string { +func (x *RevocationStatus) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetOrderForNamesRequest) ProtoMessage() {} +func (*RevocationStatus) ProtoMessage() {} -func (x *GetOrderForNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RevocationStatus) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[36] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1566,52 +2156,58 @@ func (x *GetOrderForNamesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetOrderForNamesRequest.ProtoReflect.Descriptor instead. -func (*GetOrderForNamesRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{27} +// Deprecated: Use RevocationStatus.ProtoReflect.Descriptor instead. +func (*RevocationStatus) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{36} } -func (x *GetOrderForNamesRequest) GetAcctID() int64 { +func (x *RevocationStatus) GetStatus() int64 { if x != nil { - return x.AcctID + return x.Status + } + return 0 +} + +func (x *RevocationStatus) GetRevokedReason() int64 { + if x != nil { + return x.RevokedReason } return 0 } -func (x *GetOrderForNamesRequest) GetNames() []string { +func (x *RevocationStatus) GetRevokedDate() *timestamppb.Timestamp { if x != nil { - return x.Names + return x.RevokedDate } return nil } -type FinalizeOrderRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type LeaseCRLShardRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"` + MinShardIdx int64 `protobuf:"varint,2,opt,name=minShardIdx,proto3" json:"minShardIdx,omitempty"` + MaxShardIdx int64 `protobuf:"varint,3,opt,name=maxShardIdx,proto3" json:"maxShardIdx,omitempty"` + Until *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=until,proto3" json:"until,omitempty"` unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - CertificateSerial string `protobuf:"bytes,2,opt,name=certificateSerial,proto3" json:"certificateSerial,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *FinalizeOrderRequest) Reset() { - *x = FinalizeOrderRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *LeaseCRLShardRequest) Reset() { + *x = LeaseCRLShardRequest{} + mi := &file_sa_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *FinalizeOrderRequest) String() string { +func (x *LeaseCRLShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FinalizeOrderRequest) ProtoMessage() {} +func (*LeaseCRLShardRequest) ProtoMessage() {} -func (x *FinalizeOrderRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { +func (x *LeaseCRLShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[37] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1621,53 +2217,63 @@ func (x *FinalizeOrderRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FinalizeOrderRequest.ProtoReflect.Descriptor instead. -func (*FinalizeOrderRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{28} +// Deprecated: Use LeaseCRLShardRequest.ProtoReflect.Descriptor instead. +func (*LeaseCRLShardRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{37} } -func (x *FinalizeOrderRequest) GetId() int64 { +func (x *LeaseCRLShardRequest) GetIssuerNameID() int64 { if x != nil { - return x.Id + return x.IssuerNameID } return 0 } -func (x *FinalizeOrderRequest) GetCertificateSerial() string { +func (x *LeaseCRLShardRequest) GetMinShardIdx() int64 { if x != nil { - return x.CertificateSerial + return x.MinShardIdx } - return "" + return 0 } -type GetAuthorizationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` - Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"` - Now int64 `protobuf:"varint,3,opt,name=now,proto3" json:"now,omitempty"` // Unix timestamp (nanoseconds) +func (x *LeaseCRLShardRequest) GetMaxShardIdx() int64 { + if x != nil { + return x.MaxShardIdx + } + return 0 } -func (x *GetAuthorizationsRequest) Reset() { - *x = GetAuthorizationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *LeaseCRLShardRequest) GetUntil() *timestamppb.Timestamp { + if x != nil { + return x.Until } + return nil } -func (x *GetAuthorizationsRequest) String() string { +type LeaseCRLShardResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"` + ShardIdx int64 `protobuf:"varint,2,opt,name=shardIdx,proto3" json:"shardIdx,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LeaseCRLShardResponse) Reset() { + *x = LeaseCRLShardResponse{} + mi := &file_sa_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LeaseCRLShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetAuthorizationsRequest) ProtoMessage() {} +func (*LeaseCRLShardResponse) ProtoMessage() {} -func (x *GetAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { +func (x *LeaseCRLShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[38] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1677,58 +2283,51 @@ func (x *GetAuthorizationsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetAuthorizationsRequest.ProtoReflect.Descriptor instead. -func (*GetAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{29} +// Deprecated: Use LeaseCRLShardResponse.ProtoReflect.Descriptor instead. +func (*LeaseCRLShardResponse) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{38} } -func (x *GetAuthorizationsRequest) GetRegistrationID() int64 { +func (x *LeaseCRLShardResponse) GetIssuerNameID() int64 { if x != nil { - return x.RegistrationID + return x.IssuerNameID } return 0 } -func (x *GetAuthorizationsRequest) GetDomains() []string { +func (x *LeaseCRLShardResponse) GetShardIdx() int64 { if x != nil { - return x.Domains - } - return nil -} - -func (x *GetAuthorizationsRequest) GetNow() int64 { - if x != nil { - return x.Now + return x.ShardIdx } return 0 } -type Authorizations struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type UpdateCRLShardRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"` + ShardIdx int64 `protobuf:"varint,2,opt,name=shardIdx,proto3" json:"shardIdx,omitempty"` + ThisUpdate *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=thisUpdate,proto3" json:"thisUpdate,omitempty"` + NextUpdate *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=nextUpdate,proto3" json:"nextUpdate,omitempty"` unknownFields protoimpl.UnknownFields - - Authz []*Authorizations_MapElement `protobuf:"bytes,1,rep,name=authz,proto3" json:"authz,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *Authorizations) Reset() { - *x = Authorizations{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateCRLShardRequest) Reset() { + *x = UpdateCRLShardRequest{} + mi := &file_sa_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *Authorizations) String() string { +func (x *UpdateCRLShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Authorizations) ProtoMessage() {} +func (*UpdateCRLShardRequest) ProtoMessage() {} -func (x *Authorizations) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateCRLShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[39] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1738,44 +2337,62 @@ func (x *Authorizations) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Authorizations.ProtoReflect.Descriptor instead. -func (*Authorizations) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{30} +// Deprecated: Use UpdateCRLShardRequest.ProtoReflect.Descriptor instead. +func (*UpdateCRLShardRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{39} } -func (x *Authorizations) GetAuthz() []*Authorizations_MapElement { +func (x *UpdateCRLShardRequest) GetIssuerNameID() int64 { if x != nil { - return x.Authz + return x.IssuerNameID } - return nil + return 0 } -type AddPendingAuthorizationsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *UpdateCRLShardRequest) GetShardIdx() int64 { + if x != nil { + return x.ShardIdx + } + return 0 +} - Authz []*proto.Authorization `protobuf:"bytes,1,rep,name=authz,proto3" json:"authz,omitempty"` +func (x *UpdateCRLShardRequest) GetThisUpdate() *timestamppb.Timestamp { + if x != nil { + return x.ThisUpdate + } + return nil } -func (x *AddPendingAuthorizationsRequest) Reset() { - *x = AddPendingAuthorizationsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *UpdateCRLShardRequest) GetNextUpdate() *timestamppb.Timestamp { + if x != nil { + return x.NextUpdate } + return nil +} + +type Identifiers struct { + state protoimpl.MessageState `protogen:"open.v1"` + Identifiers []*proto.Identifier `protobuf:"bytes,1,rep,name=identifiers,proto3" json:"identifiers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Identifiers) Reset() { + *x = Identifiers{} + mi := &file_sa_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *AddPendingAuthorizationsRequest) String() string { +func (x *Identifiers) String() string { return protoimpl.X.MessageStringOf(x) } -func (*AddPendingAuthorizationsRequest) ProtoMessage() {} +func (*Identifiers) ProtoMessage() {} -func (x *AddPendingAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { +func (x *Identifiers) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[40] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1785,44 +2402,42 @@ func (x *AddPendingAuthorizationsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AddPendingAuthorizationsRequest.ProtoReflect.Descriptor instead. -func (*AddPendingAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{31} +// Deprecated: Use Identifiers.ProtoReflect.Descriptor instead. +func (*Identifiers) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{40} } -func (x *AddPendingAuthorizationsRequest) GetAuthz() []*proto.Authorization { +func (x *Identifiers) GetIdentifiers() []*proto.Identifier { if x != nil { - return x.Authz + return x.Identifiers } return nil } -type AuthorizationIDs struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` +type PauseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + Identifiers []*proto.Identifier `protobuf:"bytes,2,rep,name=identifiers,proto3" json:"identifiers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *AuthorizationIDs) Reset() { - *x = AuthorizationIDs{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *PauseRequest) Reset() { + *x = PauseRequest{} + mi := &file_sa_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *AuthorizationIDs) String() string { +func (x *PauseRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*AuthorizationIDs) ProtoMessage() {} +func (*PauseRequest) ProtoMessage() {} -func (x *AuthorizationIDs) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { +func (x *PauseRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[41] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1832,44 +2447,49 @@ func (x *AuthorizationIDs) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AuthorizationIDs.ProtoReflect.Descriptor instead. -func (*AuthorizationIDs) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{32} +// Deprecated: Use PauseRequest.ProtoReflect.Descriptor instead. +func (*PauseRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{41} } -func (x *AuthorizationIDs) GetIds() []string { +func (x *PauseRequest) GetRegistrationID() int64 { if x != nil { - return x.Ids + return x.RegistrationID + } + return 0 +} + +func (x *PauseRequest) GetIdentifiers() []*proto.Identifier { + if x != nil { + return x.Identifiers } return nil } -type AuthorizationID2 struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type PauseIdentifiersResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Paused int64 `protobuf:"varint,1,opt,name=paused,proto3" json:"paused,omitempty"` + Repaused int64 `protobuf:"varint,2,opt,name=repaused,proto3" json:"repaused,omitempty"` unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *AuthorizationID2) Reset() { - *x = AuthorizationID2{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *PauseIdentifiersResponse) Reset() { + *x = PauseIdentifiersResponse{} + mi := &file_sa_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *AuthorizationID2) String() string { +func (x *PauseIdentifiersResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*AuthorizationID2) ProtoMessage() {} +func (*PauseIdentifiersResponse) ProtoMessage() {} -func (x *AuthorizationID2) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { +func (x *PauseIdentifiersResponse) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[42] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1879,44 +2499,49 @@ func (x *AuthorizationID2) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AuthorizationID2.ProtoReflect.Descriptor instead. -func (*AuthorizationID2) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{33} +// Deprecated: Use PauseIdentifiersResponse.ProtoReflect.Descriptor instead. +func (*PauseIdentifiersResponse) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{42} } -func (x *AuthorizationID2) GetId() int64 { +func (x *PauseIdentifiersResponse) GetPaused() int64 { if x != nil { - return x.Id + return x.Paused } return 0 } -type Authorization2IDs struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *PauseIdentifiersResponse) GetRepaused() int64 { + if x != nil { + return x.Repaused + } + return 0 +} - Ids []int64 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids,omitempty"` +type UpdateRegistrationKeyRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + RegistrationID int64 `protobuf:"varint,1,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + Jwk []byte `protobuf:"bytes,2,opt,name=jwk,proto3" json:"jwk,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *Authorization2IDs) Reset() { - *x = Authorization2IDs{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateRegistrationKeyRequest) Reset() { + *x = UpdateRegistrationKeyRequest{} + mi := &file_sa_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *Authorization2IDs) String() string { +func (x *UpdateRegistrationKeyRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Authorization2IDs) ProtoMessage() {} +func (*UpdateRegistrationKeyRequest) ProtoMessage() {} -func (x *Authorization2IDs) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateRegistrationKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[43] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1926,49 +2551,53 @@ func (x *Authorization2IDs) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Authorization2IDs.ProtoReflect.Descriptor instead. -func (*Authorization2IDs) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{34} +// Deprecated: Use UpdateRegistrationKeyRequest.ProtoReflect.Descriptor instead. +func (*UpdateRegistrationKeyRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{43} } -func (x *Authorization2IDs) GetIds() []int64 { +func (x *UpdateRegistrationKeyRequest) GetRegistrationID() int64 { if x != nil { - return x.Ids + return x.RegistrationID } - return nil + return 0 } -type RevokeCertificateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *UpdateRegistrationKeyRequest) GetJwk() []byte { + if x != nil { + return x.Jwk + } + return nil +} - Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` - Reason int64 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"` - Date int64 `protobuf:"varint,3,opt,name=date,proto3" json:"date,omitempty"` // Unix timestamp (nanoseconds) - Backdate int64 `protobuf:"varint,5,opt,name=backdate,proto3" json:"backdate,omitempty"` // Unix timestamp (nanoseconds) - Response []byte `protobuf:"bytes,4,opt,name=response,proto3" json:"response,omitempty"` - IssuerID int64 `protobuf:"varint,6,opt,name=issuerID,proto3" json:"issuerID,omitempty"` +type RateLimitOverride struct { + state protoimpl.MessageState `protogen:"open.v1"` + LimitEnum int64 `protobuf:"varint,1,opt,name=limitEnum,proto3" json:"limitEnum,omitempty"` + BucketKey string `protobuf:"bytes,2,opt,name=bucketKey,proto3" json:"bucketKey,omitempty"` + Comment string `protobuf:"bytes,3,opt,name=comment,proto3" json:"comment,omitempty"` + Period *durationpb.Duration `protobuf:"bytes,4,opt,name=period,proto3" json:"period,omitempty"` + Count int64 `protobuf:"varint,5,opt,name=count,proto3" json:"count,omitempty"` + Burst int64 `protobuf:"varint,6,opt,name=burst,proto3" json:"burst,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RevokeCertificateRequest) Reset() { - *x = RevokeCertificateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RateLimitOverride) Reset() { + *x = RateLimitOverride{} + mi := &file_sa_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RevokeCertificateRequest) String() string { +func (x *RateLimitOverride) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RevokeCertificateRequest) ProtoMessage() {} +func (*RateLimitOverride) ProtoMessage() {} -func (x *RevokeCertificateRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RateLimitOverride) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[44] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1978,85 +2607,76 @@ func (x *RevokeCertificateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RevokeCertificateRequest.ProtoReflect.Descriptor instead. -func (*RevokeCertificateRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{35} +// Deprecated: Use RateLimitOverride.ProtoReflect.Descriptor instead. +func (*RateLimitOverride) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{44} } -func (x *RevokeCertificateRequest) GetSerial() string { +func (x *RateLimitOverride) GetLimitEnum() int64 { if x != nil { - return x.Serial + return x.LimitEnum } - return "" + return 0 } -func (x *RevokeCertificateRequest) GetReason() int64 { +func (x *RateLimitOverride) GetBucketKey() string { if x != nil { - return x.Reason + return x.BucketKey } - return 0 + return "" } -func (x *RevokeCertificateRequest) GetDate() int64 { +func (x *RateLimitOverride) GetComment() string { if x != nil { - return x.Date + return x.Comment } - return 0 + return "" } -func (x *RevokeCertificateRequest) GetBackdate() int64 { +func (x *RateLimitOverride) GetPeriod() *durationpb.Duration { if x != nil { - return x.Backdate + return x.Period } - return 0 + return nil } -func (x *RevokeCertificateRequest) GetResponse() []byte { +func (x *RateLimitOverride) GetCount() int64 { if x != nil { - return x.Response + return x.Count } - return nil + return 0 } -func (x *RevokeCertificateRequest) GetIssuerID() int64 { +func (x *RateLimitOverride) GetBurst() int64 { if x != nil { - return x.IssuerID + return x.Burst } return 0 } -type FinalizeAuthorizationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type AddRateLimitOverrideRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Override *RateLimitOverride `protobuf:"bytes,1,opt,name=override,proto3" json:"override,omitempty"` unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - Expires int64 `protobuf:"varint,3,opt,name=expires,proto3" json:"expires,omitempty"` // Unix timestamp (nanoseconds) - Attempted string `protobuf:"bytes,4,opt,name=attempted,proto3" json:"attempted,omitempty"` - ValidationRecords []*proto.ValidationRecord `protobuf:"bytes,5,rep,name=validationRecords,proto3" json:"validationRecords,omitempty"` - ValidationError *proto.ProblemDetails `protobuf:"bytes,6,opt,name=validationError,proto3" json:"validationError,omitempty"` - AttemptedAt int64 `protobuf:"varint,7,opt,name=attemptedAt,proto3" json:"attemptedAt,omitempty"` // Unix timestamp (nanoseconds) + sizeCache protoimpl.SizeCache } -func (x *FinalizeAuthorizationRequest) Reset() { - *x = FinalizeAuthorizationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *AddRateLimitOverrideRequest) Reset() { + *x = AddRateLimitOverrideRequest{} + mi := &file_sa_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *FinalizeAuthorizationRequest) String() string { +func (x *AddRateLimitOverrideRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FinalizeAuthorizationRequest) ProtoMessage() {} +func (*AddRateLimitOverrideRequest) ProtoMessage() {} -func (x *FinalizeAuthorizationRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { +func (x *AddRateLimitOverrideRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[45] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2066,90 +2686,94 @@ func (x *FinalizeAuthorizationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FinalizeAuthorizationRequest.ProtoReflect.Descriptor instead. -func (*FinalizeAuthorizationRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{36} +// Deprecated: Use AddRateLimitOverrideRequest.ProtoReflect.Descriptor instead. +func (*AddRateLimitOverrideRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{45} } -func (x *FinalizeAuthorizationRequest) GetId() int64 { +func (x *AddRateLimitOverrideRequest) GetOverride() *RateLimitOverride { if x != nil { - return x.Id + return x.Override } - return 0 + return nil } -func (x *FinalizeAuthorizationRequest) GetStatus() string { - if x != nil { - return x.Status - } - return "" +type AddRateLimitOverrideResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Inserted bool `protobuf:"varint,1,opt,name=inserted,proto3" json:"inserted,omitempty"` + Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *FinalizeAuthorizationRequest) GetExpires() int64 { - if x != nil { - return x.Expires - } - return 0 +func (x *AddRateLimitOverrideResponse) Reset() { + *x = AddRateLimitOverrideResponse{} + mi := &file_sa_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *FinalizeAuthorizationRequest) GetAttempted() string { - if x != nil { - return x.Attempted - } - return "" +func (x *AddRateLimitOverrideResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *FinalizeAuthorizationRequest) GetValidationRecords() []*proto.ValidationRecord { +func (*AddRateLimitOverrideResponse) ProtoMessage() {} + +func (x *AddRateLimitOverrideResponse) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[46] if x != nil { - return x.ValidationRecords + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (x *FinalizeAuthorizationRequest) GetValidationError() *proto.ProblemDetails { +// Deprecated: Use AddRateLimitOverrideResponse.ProtoReflect.Descriptor instead. +func (*AddRateLimitOverrideResponse) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{46} +} + +func (x *AddRateLimitOverrideResponse) GetInserted() bool { if x != nil { - return x.ValidationError + return x.Inserted } - return nil + return false } -func (x *FinalizeAuthorizationRequest) GetAttemptedAt() int64 { +func (x *AddRateLimitOverrideResponse) GetEnabled() bool { if x != nil { - return x.AttemptedAt + return x.Enabled } - return 0 + return false } -type AddBlockedKeyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type EnableRateLimitOverrideRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + LimitEnum int64 `protobuf:"varint,1,opt,name=limitEnum,proto3" json:"limitEnum,omitempty"` + BucketKey string `protobuf:"bytes,2,opt,name=bucketKey,proto3" json:"bucketKey,omitempty"` unknownFields protoimpl.UnknownFields - - KeyHash []byte `protobuf:"bytes,1,opt,name=keyHash,proto3" json:"keyHash,omitempty"` - Added int64 `protobuf:"varint,2,opt,name=added,proto3" json:"added,omitempty"` // Unix timestamp (nanoseconds) - Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"` - Comment string `protobuf:"bytes,4,opt,name=comment,proto3" json:"comment,omitempty"` - RevokedBy int64 `protobuf:"varint,5,opt,name=revokedBy,proto3" json:"revokedBy,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *AddBlockedKeyRequest) Reset() { - *x = AddBlockedKeyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *EnableRateLimitOverrideRequest) Reset() { + *x = EnableRateLimitOverrideRequest{} + mi := &file_sa_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *AddBlockedKeyRequest) String() string { +func (x *EnableRateLimitOverrideRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*AddBlockedKeyRequest) ProtoMessage() {} +func (*EnableRateLimitOverrideRequest) ProtoMessage() {} -func (x *AddBlockedKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { +func (x *EnableRateLimitOverrideRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[47] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2159,72 +2783,49 @@ func (x *AddBlockedKeyRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AddBlockedKeyRequest.ProtoReflect.Descriptor instead. -func (*AddBlockedKeyRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{37} -} - -func (x *AddBlockedKeyRequest) GetKeyHash() []byte { - if x != nil { - return x.KeyHash - } - return nil +// Deprecated: Use EnableRateLimitOverrideRequest.ProtoReflect.Descriptor instead. +func (*EnableRateLimitOverrideRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{47} } -func (x *AddBlockedKeyRequest) GetAdded() int64 { +func (x *EnableRateLimitOverrideRequest) GetLimitEnum() int64 { if x != nil { - return x.Added + return x.LimitEnum } return 0 } -func (x *AddBlockedKeyRequest) GetSource() string { - if x != nil { - return x.Source - } - return "" -} - -func (x *AddBlockedKeyRequest) GetComment() string { +func (x *EnableRateLimitOverrideRequest) GetBucketKey() string { if x != nil { - return x.Comment + return x.BucketKey } return "" } -func (x *AddBlockedKeyRequest) GetRevokedBy() int64 { - if x != nil { - return x.RevokedBy - } - return 0 -} - -type KeyBlockedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type DisableRateLimitOverrideRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + LimitEnum int64 `protobuf:"varint,1,opt,name=limitEnum,proto3" json:"limitEnum,omitempty"` + BucketKey string `protobuf:"bytes,2,opt,name=bucketKey,proto3" json:"bucketKey,omitempty"` unknownFields protoimpl.UnknownFields - - KeyHash []byte `protobuf:"bytes,1,opt,name=keyHash,proto3" json:"keyHash,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *KeyBlockedRequest) Reset() { - *x = KeyBlockedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DisableRateLimitOverrideRequest) Reset() { + *x = DisableRateLimitOverrideRequest{} + mi := &file_sa_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *KeyBlockedRequest) String() string { +func (x *DisableRateLimitOverrideRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*KeyBlockedRequest) ProtoMessage() {} +func (*DisableRateLimitOverrideRequest) ProtoMessage() {} -func (x *KeyBlockedRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DisableRateLimitOverrideRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[48] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2234,45 +2835,49 @@ func (x *KeyBlockedRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use KeyBlockedRequest.ProtoReflect.Descriptor instead. -func (*KeyBlockedRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{38} +// Deprecated: Use DisableRateLimitOverrideRequest.ProtoReflect.Descriptor instead. +func (*DisableRateLimitOverrideRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{48} } -func (x *KeyBlockedRequest) GetKeyHash() []byte { +func (x *DisableRateLimitOverrideRequest) GetLimitEnum() int64 { if x != nil { - return x.KeyHash + return x.LimitEnum } - return nil + return 0 } -type ValidAuthorizations_MapElement struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *DisableRateLimitOverrideRequest) GetBucketKey() string { + if x != nil { + return x.BucketKey + } + return "" +} - Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` - Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"` +type GetRateLimitOverrideRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + LimitEnum int64 `protobuf:"varint,1,opt,name=limitEnum,proto3" json:"limitEnum,omitempty"` + BucketKey string `protobuf:"bytes,2,opt,name=bucketKey,proto3" json:"bucketKey,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ValidAuthorizations_MapElement) Reset() { - *x = ValidAuthorizations_MapElement{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GetRateLimitOverrideRequest) Reset() { + *x = GetRateLimitOverrideRequest{} + mi := &file_sa_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ValidAuthorizations_MapElement) String() string { +func (x *GetRateLimitOverrideRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidAuthorizations_MapElement) ProtoMessage() {} +func (*GetRateLimitOverrideRequest) ProtoMessage() {} -func (x *ValidAuthorizations_MapElement) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetRateLimitOverrideRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[49] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2282,52 +2887,50 @@ func (x *ValidAuthorizations_MapElement) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidAuthorizations_MapElement.ProtoReflect.Descriptor instead. -func (*ValidAuthorizations_MapElement) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{5, 0} +// Deprecated: Use GetRateLimitOverrideRequest.ProtoReflect.Descriptor instead. +func (*GetRateLimitOverrideRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{49} } -func (x *ValidAuthorizations_MapElement) GetDomain() string { +func (x *GetRateLimitOverrideRequest) GetLimitEnum() int64 { if x != nil { - return x.Domain + return x.LimitEnum } - return "" + return 0 } -func (x *ValidAuthorizations_MapElement) GetAuthz() *proto.Authorization { +func (x *GetRateLimitOverrideRequest) GetBucketKey() string { if x != nil { - return x.Authz + return x.BucketKey } - return nil + return "" } -type Authorizations_MapElement struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RateLimitOverrideResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Override *RateLimitOverride `protobuf:"bytes,1,opt,name=override,proto3" json:"override,omitempty"` + Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=updatedAt,proto3" json:"updatedAt,omitempty"` unknownFields protoimpl.UnknownFields - - Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` - Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *Authorizations_MapElement) Reset() { - *x = Authorizations_MapElement{} - if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RateLimitOverrideResponse) Reset() { + *x = RateLimitOverrideResponse{} + mi := &file_sa_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *Authorizations_MapElement) String() string { +func (x *RateLimitOverrideResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Authorizations_MapElement) ProtoMessage() {} +func (*RateLimitOverrideResponse) ProtoMessage() {} -func (x *Authorizations_MapElement) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RateLimitOverrideResponse) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[50] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2337,606 +2940,1060 @@ func (x *Authorizations_MapElement) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Authorizations_MapElement.ProtoReflect.Descriptor instead. -func (*Authorizations_MapElement) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{30, 0} +// Deprecated: Use RateLimitOverrideResponse.ProtoReflect.Descriptor instead. +func (*RateLimitOverrideResponse) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{50} } -func (x *Authorizations_MapElement) GetDomain() string { +func (x *RateLimitOverrideResponse) GetOverride() *RateLimitOverride { if x != nil { - return x.Domain + return x.Override } - return "" + return nil +} + +func (x *RateLimitOverrideResponse) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false } -func (x *Authorizations_MapElement) GetAuthz() *proto.Authorization { +func (x *RateLimitOverrideResponse) GetUpdatedAt() *timestamppb.Timestamp { if x != nil { - return x.Authz + return x.UpdatedAt } return nil } var File_sa_proto protoreflect.FileDescriptor -var file_sa_proto_rawDesc = []byte{ +var file_sa_proto_rawDesc = string([]byte{ 0x0a, 0x08, 0x73, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x73, 0x61, 0x1a, 0x15, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x20, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x02, 0x69, 0x64, 0x22, 0x1e, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, 0x4b, - 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6a, 0x77, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x03, 0x6a, 0x77, 0x6b, 0x22, 0x21, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0xba, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x50, - 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x20, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x1e, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, + 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6a, 0x77, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x03, 0x6a, 0x77, 0x6b, 0x22, 0x21, 0x0a, 0x0f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0xdd, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x55, 0x6e, 0x74, - 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x55, - 0x6e, 0x74, 0x69, 0x6c, 0x22, 0x73, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, - 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6e, 0x6f, 0x77, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x1a, 0x4f, 0x0a, 0x0a, 0x4d, - 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, - 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x22, 0x20, 0x0a, 0x06, - 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x84, - 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x49, 0x44, 0x12, 0x32, 0x0a, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x55, + 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x55, 0x6e, 0x74, + 0x69, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4a, 0x04, 0x08, 0x02, + 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x20, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0xc8, 0x01, 0x0a, 0x0e, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x34, 0x0a, + 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x7f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x36, + 0x0a, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x61, + 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x06, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x1d, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x4e, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xa4, 0x01, 0x0a, 0x21, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, + 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x44, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, + 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x9f, 0x01, 0x0a, + 0x14, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12, 0x31, 0x0a, 0x06, 0x77, 0x69, 0x6e, + 0x64, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x50, + 0x0a, 0x14, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x22, 0x20, 0x0a, 0x06, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, + 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x78, 0x69, 0x73, + 0x74, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x10, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x34, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x07, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, + 0x73, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xa9, 0x01, + 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, + 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x12, + 0x32, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x06, 0x69, 0x73, 0x73, + 0x75, 0x65, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x22, 0x1e, 0x0a, 0x0c, 0x4f, 0x72, 0x64, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0xd7, 0x02, 0x0a, 0x0f, 0x4e, 0x65, + 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, + 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12, + 0x2a, 0x0a, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x36, 0x0a, 0x16, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x12, + 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, + 0x73, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, + 0x03, 0x10, 0x04, 0x22, 0x89, 0x02, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0a, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x44, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x68, 0x61, 0x6c, 0x6c, + 0x65, 0x6e, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0e, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, + 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, + 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, + 0x7e, 0x0a, 0x18, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, 0x75, + 0x74, 0x68, 0x7a, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x08, 0x6e, + 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x09, + 0x6e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x52, 0x09, 0x6e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x22, + 0x52, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x22, 0x4c, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, + 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x63, + 0x74, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, + 0x44, 0x22, 0x6b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, + 0x63, 0x74, 0x49, 0x44, 0x12, 0x32, 0x0a, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x54, + 0x0a, 0x14, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2c, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x22, 0xd8, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x32, 0x0a, 0x0b, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, + 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, + 0x3d, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x2b, 0x0a, 0x06, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x22, 0x24, + 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x03, 0x69, 0x64, 0x73, 0x22, 0x22, 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x92, 0x02, 0x0a, 0x18, 0x52, 0x65, 0x76, + 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x64, 0x61, 0x74, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, + 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, + 0x75, 0x65, 0x72, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, + 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, + 0x78, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xea, 0x02, + 0x0a, 0x1c, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, + 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x11, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x11, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, + 0x12, 0x3e, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, + 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x12, 0x3c, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x41, 0x74, 0x4a, 0x04, + 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0xb8, 0x01, 0x0a, 0x14, 0x41, + 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x12, 0x30, 0x0a, + 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, + 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, 0x4a, + 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x24, 0x0a, 0x08, 0x53, 0x50, 0x4b, 0x49, 0x48, 0x61, 0x73, + 0x68, 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x22, 0xa4, 0x01, 0x0a, 0x08, + 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x34, 0x0a, 0x07, + 0x72, 0x65, 0x6e, 0x65, 0x77, 0x42, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x72, 0x65, 0x6e, 0x65, 0x77, + 0x42, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x22, 0x37, 0x0a, 0x09, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x12, + 0x2a, 0x0a, 0x09, 0x69, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x52, 0x09, 0x69, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x19, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x69, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0xb4, + 0x01, 0x0a, 0x0e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, - 0x44, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, - 0x70, 0x69, 0x72, 0x65, 0x73, 0x22, 0x3b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1a, - 0x0a, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x08, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, - 0x74, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6c, 0x61, 0x74, 0x65, - 0x73, 0x74, 0x22, 0x1d, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x22, 0x58, 0x0a, 0x1f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x7f, 0x0a, 0x0c, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x61, - 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x2e, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x1d, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x70, 0x12, 0x1f, 0x0a, - 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, - 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x88, - 0x01, 0x0a, 0x21, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, + 0x44, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x12, 0x42, 0x0a, 0x0e, 0x6c, + 0x61, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x63, 0x65, 0x53, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x63, 0x65, 0x53, 0x65, 0x6e, 0x74, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xe1, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, + 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, + 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x40, 0x0a, 0x0d, 0x72, + 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, + 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x3e, 0x0a, + 0x0c, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x0c, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, + 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x22, 0x8e, 0x01, 0x0a, 0x10, 0x52, 0x65, + 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, + 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x72, + 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3c, 0x0a, 0x0b, + 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x72, + 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x44, 0x61, 0x74, 0x65, 0x22, 0xb0, 0x01, 0x0a, 0x14, 0x4c, + 0x65, 0x61, 0x73, 0x65, 0x43, 0x52, 0x4c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x69, 0x6e, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6d, 0x69, + 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x61, 0x78, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, + 0x6d, 0x61, 0x78, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x12, 0x30, 0x0a, 0x05, 0x75, + 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x22, 0x57, 0x0a, + 0x15, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x43, 0x52, 0x4c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, + 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, 0x22, 0xcf, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x43, 0x52, 0x4c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x78, + 0x12, 0x3a, 0x0a, 0x0a, 0x74, 0x68, 0x69, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0a, 0x74, 0x68, 0x69, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x3a, 0x0a, 0x0a, + 0x6e, 0x65, 0x78, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6e, 0x65, + 0x78, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x41, 0x0a, 0x0b, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x22, 0x6a, 0x0a, 0x0c, 0x50, + 0x61, 0x75, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x44, 0x12, 0x32, 0x0a, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x22, 0x4e, 0x0a, 0x18, 0x50, 0x61, 0x75, 0x73, 0x65, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x75, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x61, 0x75, 0x73, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, + 0x65, 0x70, 0x61, 0x75, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x72, + 0x65, 0x70, 0x61, 0x75, 0x73, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, + 0x10, 0x0a, 0x03, 0x6a, 0x77, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6a, 0x77, + 0x6b, 0x22, 0xc8, 0x01, 0x0a, 0x11, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x45, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, + 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x75, 0x72, 0x73, 0x74, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x62, 0x75, 0x72, 0x73, 0x74, 0x22, 0x50, 0x0a, 0x1b, + 0x41, 0x64, 0x64, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x08, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x73, 0x61, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x52, 0x08, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x22, 0x54, + 0x0a, 0x1c, 0x41, 0x64, 0x64, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, + 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x22, 0x5c, 0x0a, 0x1e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x45, + 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x22, 0x5d, 0x0a, 0x1f, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x74, + 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x45, 0x6e, + 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x45, + 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x22, 0x59, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1c, 0x0a, 0x09, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x45, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x1c, + 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x22, 0xa2, 0x01, 0x0a, + 0x19, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x08, 0x6f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, + 0x61, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x52, 0x08, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, + 0x74, 0x32, 0xf8, 0x0d, 0x0a, 0x18, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x51, + 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x25, 0x2e, + 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, - 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x53, 0x0a, 0x12, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1c, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x1f, 0x0a, - 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, - 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x48, - 0x0a, 0x14, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18, - 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x22, 0x30, 0x0a, 0x14, 0x46, 0x51, 0x44, 0x4e, - 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x20, 0x50, 0x72, - 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x22, 0x20, 0x0a, 0x06, - 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x74, - 0x0a, 0x10, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, - 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, - 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, - 0x69, 0x72, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, - 0x0a, 0x03, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, - 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x63, 0x73, 0x70, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6f, 0x63, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, - 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x22, 0x30, - 0x0a, 0x16, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, - 0x22, 0x1e, 0x0a, 0x0c, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, - 0x22, 0x95, 0x01, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, - 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x10, - 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x7e, 0x0a, 0x18, 0x4e, 0x65, 0x77, 0x4f, - 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, - 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x6e, 0x65, 0x77, - 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, - 0x7a, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6e, - 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x22, 0x52, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x4f, - 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x4c, 0x0a, 0x22, - 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x22, 0x47, 0x0a, 0x17, 0x47, 0x65, - 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, - 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x22, 0x54, 0x0a, 0x14, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, - 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2c, 0x0a, 0x11, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x6e, 0x0a, 0x18, 0x47, 0x65, 0x74, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, - 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6e, 0x6f, 0x77, 0x22, 0x96, 0x01, 0x0a, 0x0e, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x05, - 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x61, - 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, - 0x7a, 0x1a, 0x4f, 0x0a, 0x0a, 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, - 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, - 0x68, 0x7a, 0x22, 0x4c, 0x0a, 0x1f, 0x41, 0x64, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, - 0x22, 0x24, 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x22, 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x25, 0x0a, 0x11, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x49, 0x44, 0x73, 0x12, - 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x03, 0x69, 0x64, - 0x73, 0x22, 0xb2, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x12, - 0x0a, 0x04, 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x64, 0x61, - 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, - 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, - 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, - 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x22, 0xa6, 0x02, 0x0a, 0x1c, 0x46, 0x69, 0x6e, 0x61, 0x6c, - 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x74, 0x74, - 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x74, - 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x3e, 0x0a, - 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x20, 0x0a, - 0x0b, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, - 0x96, 0x01, 0x0a, 0x14, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x48, - 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, - 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, - 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, - 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, 0x22, 0x2d, 0x0a, 0x11, 0x4b, 0x65, 0x79, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, - 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x32, 0xcd, 0x15, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x0f, - 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x44, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x14, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x79, 0x4b, 0x65, - 0x79, 0x12, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, 0x4b, 0x65, - 0x79, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x35, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, - 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x0a, 0x2e, 0x73, - 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, - 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x00, 0x12, 0x31, - 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, - 0x00, 0x12, 0x34, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x17, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x18, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x12, 0x23, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x16, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x42, 0x79, 0x49, 0x50, 0x12, 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, - 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x22, 0x00, 0x12, 0x32, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, - 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, - 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, - 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x0d, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, - 0x12, 0x37, 0x0a, 0x0d, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, - 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, - 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, 0x61, - 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x19, 0x50, 0x72, 0x65, - 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x24, 0x2e, 0x73, 0x61, 0x2e, 0x50, 0x72, 0x65, 0x76, - 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x45, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, + 0x00, 0x12, 0x3e, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, + 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, + 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, + 0x00, 0x12, 0x37, 0x0a, 0x0d, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, + 0x74, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, - 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x11, 0x47, 0x65, - 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, - 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x12, - 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x32, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x65, 0x6e, - 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x32, 0x12, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x3e, 0x0a, - 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x12, 0x2e, 0x73, - 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, - 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x5c, 0x0a, + 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x1a, 0x46, 0x51, + 0x44, 0x4e, 0x53, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x46, + 0x6f, 0x72, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x73, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x1a, + 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x38, 0x0a, 0x15, 0x47, 0x65, 0x74, + 0x4c, 0x69, 0x6e, 0x74, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, + 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x10, + 0x2e, 0x73, 0x61, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12, + 0x3e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x12, 0x1b, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, + 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12, + 0x3b, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x14, + 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x79, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, + 0x62, 0x4b, 0x65, 0x79, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x13, 0x47, 0x65, + 0x74, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x14, 0x2e, + 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, + 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, + 0x65, 0x72, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x52, 0x4c, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x35, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x0a, 0x2e, 0x73, 0x61, + 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x00, 0x12, 0x39, 0x0a, + 0x13, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x42, 0x79, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x22, 0x00, 0x30, 0x01, 0x12, 0x2f, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x0c, 0x2e, 0x73, 0x61, + 0x2e, 0x53, 0x50, 0x4b, 0x49, 0x48, 0x61, 0x73, 0x68, 0x1a, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x00, 0x30, 0x01, 0x12, 0x52, 0x0a, 0x17, 0x47, 0x65, 0x74, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x26, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x1b, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x25, 0x2e, 0x73, 0x61, 0x2e, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x52, - 0x0a, 0x17, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, - 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, - 0x12, 0x15, 0x2e, 0x73, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, - 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x12, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x00, 0x12, 0x42, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, - 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x48, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x12, 0x49, + 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x46, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x0d, 0x2e, + 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x00, 0x12, 0x28, + 0x0a, 0x0a, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x0c, 0x2e, 0x73, + 0x61, 0x2e, 0x53, 0x50, 0x4b, 0x49, 0x48, 0x61, 0x73, 0x68, 0x1a, 0x0a, 0x2e, 0x73, 0x61, 0x2e, + 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x32, 0x0a, 0x16, 0x52, 0x65, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, 0x78, 0x69, 0x73, + 0x74, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x0a, + 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x12, + 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x12, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, + 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x00, 0x30, 0x01, 0x12, 0x3d, 0x0a, 0x16, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x50, 0x61, 0x75, + 0x73, 0x65, 0x64, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x50, 0x61, 0x75, 0x73, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, + 0x61, 0x75, 0x73, 0x65, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, + 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x0f, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x73, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, + 0x1f, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x59, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, + 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x52, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0xbf, 0x1a, 0x0a, + 0x10, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x12, 0x51, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, + 0x12, 0x25, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x65, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x32, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x0d, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, + 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x46, 0x51, 0x44, 0x4e, 0x53, + 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x48, 0x0a, + 0x1a, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x73, 0x46, 0x6f, 0x72, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18, 0x2e, 0x73, 0x61, + 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x73, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, + 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x44, 0x32, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0e, 0x47, 0x65, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e, 0x73, 0x61, + 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x38, 0x0a, 0x15, + 0x47, 0x65, 0x74, 0x4c, 0x69, 0x6e, 0x74, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, + 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, + 0x72, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, + 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x1b, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4f, + 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, + 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, + 0x3c, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x4a, 0x53, 0x4f, + 0x4e, 0x57, 0x65, 0x62, 0x4b, 0x65, 0x79, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x39, 0x0a, + 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x1a, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, + 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x12, 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x6b, + 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x52, 0x4c, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x35, 0x0a, 0x11, 0x47, 0x65, 0x74, + 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x0a, + 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, + 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x00, + 0x12, 0x39, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x42, 0x79, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x0a, 0x2e, 0x73, 0x61, + 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x00, 0x30, 0x01, 0x12, 0x2f, 0x0a, 0x0f, 0x47, + 0x65, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x0c, + 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x50, 0x4b, 0x49, 0x48, 0x61, 0x73, 0x68, 0x1a, 0x0a, 0x2e, 0x73, + 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x00, 0x30, 0x01, 0x12, 0x52, 0x0a, 0x17, + 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, 0x12, 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, + 0x12, 0x5c, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, + 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x32, + 0x12, 0x26, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, + 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x31, + 0x0a, 0x12, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x46, 0x6f, 0x72, 0x53, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x1a, 0x0d, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0x00, 0x12, 0x28, 0x0a, 0x0a, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, + 0x0c, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x50, 0x4b, 0x49, 0x48, 0x61, 0x73, 0x68, 0x1a, 0x0a, 0x2e, + 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x32, 0x0a, 0x16, 0x52, + 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, + 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x1a, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, + 0x4b, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x00, 0x30, 0x01, 0x12, 0x3d, 0x0a, 0x16, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, + 0x50, 0x61, 0x75, 0x73, 0x65, 0x64, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x50, 0x61, 0x75, 0x73, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x14, 0x47, + 0x65, 0x74, 0x50, 0x61, 0x75, 0x73, 0x65, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x73, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x0f, 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x14, 0x47, 0x65, + 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, + 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x73, + 0x61, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x43, 0x0a, 0x0d, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, + 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x09, 0x41, - 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, - 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x11, 0x41, + 0x64, 0x64, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x19, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x09, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x18, 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x14, + 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x44, 0x32, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x42, + 0x0a, 0x16, 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x12, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x00, 0x12, 0x54, 0x0a, 0x16, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x20, 0x2e, 0x73, + 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x16, 0x44, 0x65, 0x61, 0x63, - 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0d, 0x46, 0x69, 0x6e, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x46, + 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x40, 0x0a, + 0x11, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, + 0x7a, 0x73, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, + 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12, + 0x3b, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x11, + 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0d, 0x53, 0x65, 0x74, + 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, + 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x40, + 0x0a, 0x12, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x69, 0x6e, 0x67, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, - 0x12, 0x2e, 0x0a, 0x08, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x13, 0x2e, 0x73, - 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, - 0x12, 0x40, 0x0a, 0x11, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, - 0x75, 0x74, 0x68, 0x7a, 0x73, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, - 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, - 0x22, 0x00, 0x12, 0x40, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x50, 0x72, - 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x4f, 0x72, - 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x12, 0x4f, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x2e, 0x73, 0x61, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x00, 0x12, 0x52, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x76, 0x6f, 0x6b, + 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, + 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x0d, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x43, 0x52, + 0x4c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x4c, 0x65, 0x61, 0x73, + 0x65, 0x43, 0x52, 0x4c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x73, 0x61, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x43, 0x52, 0x4c, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, + 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x19, 0x2e, 0x73, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x52, 0x4c, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4f, 0x72, - 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0d, 0x46, 0x69, 0x6e, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, - 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x74, 0x79, 0x22, 0x00, 0x12, 0x44, 0x0a, 0x10, 0x50, 0x61, 0x75, 0x73, 0x65, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x50, 0x61, + 0x75, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x73, 0x61, 0x2e, + 0x50, 0x61, 0x75, 0x73, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0e, 0x55, 0x6e, + 0x70, 0x61, 0x75, 0x73, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x2e, 0x73, + 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, + 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x5b, 0x0a, + 0x14, 0x41, 0x64, 0x64, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x52, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x18, 0x44, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x23, 0x2e, 0x73, 0x61, 0x2e, 0x44, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x17, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, + 0x12, 0x22, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x74, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x2b, - 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x10, 0x2e, 0x73, 0x61, 0x2e, - 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12, 0x3e, 0x0a, 0x10, 0x47, - 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, - 0x1b, 0x2e, 0x73, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0b, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x11, 0x52, - 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x12, - 0x4e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x32, 0x12, 0x23, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x49, 0x44, 0x73, 0x22, 0x00, - 0x12, 0x54, 0x0a, 0x16, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x12, 0x20, 0x2e, 0x73, 0x61, 0x2e, - 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x18, 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x32, 0x12, 0x14, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0d, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, - 0x4b, 0x65, 0x79, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x73, 0x61, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, 0x29, + 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, + 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, + 0x2f, 0x73, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +}) var ( file_sa_proto_rawDescOnce sync.Once - file_sa_proto_rawDescData = file_sa_proto_rawDesc + file_sa_proto_rawDescData []byte ) func file_sa_proto_rawDescGZIP() []byte { file_sa_proto_rawDescOnce.Do(func() { - file_sa_proto_rawDescData = protoimpl.X.CompressGZIP(file_sa_proto_rawDescData) + file_sa_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sa_proto_rawDesc), len(file_sa_proto_rawDesc))) }) return file_sa_proto_rawDescData } -var file_sa_proto_msgTypes = make([]protoimpl.MessageInfo, 42) -var file_sa_proto_goTypes = []interface{}{ +var file_sa_proto_msgTypes = make([]protoimpl.MessageInfo, 51) +var file_sa_proto_goTypes = []any{ (*RegistrationID)(nil), // 0: sa.RegistrationID (*JSONWebKey)(nil), // 1: sa.JSONWebKey (*AuthorizationID)(nil), // 2: sa.AuthorizationID - (*GetPendingAuthorizationRequest)(nil), // 3: sa.GetPendingAuthorizationRequest - (*GetValidAuthorizationsRequest)(nil), // 4: sa.GetValidAuthorizationsRequest - (*ValidAuthorizations)(nil), // 5: sa.ValidAuthorizations - (*Serial)(nil), // 6: sa.Serial - (*SerialMetadata)(nil), // 7: sa.SerialMetadata - (*Range)(nil), // 8: sa.Range - (*Count)(nil), // 9: sa.Count - (*CountCertificatesByNamesRequest)(nil), // 10: sa.CountCertificatesByNamesRequest - (*CountByNames)(nil), // 11: sa.CountByNames - (*CountRegistrationsByIPRequest)(nil), // 12: sa.CountRegistrationsByIPRequest - (*CountInvalidAuthorizationsRequest)(nil), // 13: sa.CountInvalidAuthorizationsRequest - (*CountOrdersRequest)(nil), // 14: sa.CountOrdersRequest - (*CountFQDNSetsRequest)(nil), // 15: sa.CountFQDNSetsRequest - (*FQDNSetExistsRequest)(nil), // 16: sa.FQDNSetExistsRequest - (*PreviousCertificateExistsRequest)(nil), // 17: sa.PreviousCertificateExistsRequest - (*Exists)(nil), // 18: sa.Exists - (*AddSerialRequest)(nil), // 19: sa.AddSerialRequest - (*AddCertificateRequest)(nil), // 20: sa.AddCertificateRequest - (*AddCertificateResponse)(nil), // 21: sa.AddCertificateResponse - (*OrderRequest)(nil), // 22: sa.OrderRequest - (*NewOrderRequest)(nil), // 23: sa.NewOrderRequest - (*NewOrderAndAuthzsRequest)(nil), // 24: sa.NewOrderAndAuthzsRequest - (*SetOrderErrorRequest)(nil), // 25: sa.SetOrderErrorRequest - (*GetValidOrderAuthorizationsRequest)(nil), // 26: sa.GetValidOrderAuthorizationsRequest - (*GetOrderForNamesRequest)(nil), // 27: sa.GetOrderForNamesRequest - (*FinalizeOrderRequest)(nil), // 28: sa.FinalizeOrderRequest - (*GetAuthorizationsRequest)(nil), // 29: sa.GetAuthorizationsRequest - (*Authorizations)(nil), // 30: sa.Authorizations - (*AddPendingAuthorizationsRequest)(nil), // 31: sa.AddPendingAuthorizationsRequest - (*AuthorizationIDs)(nil), // 32: sa.AuthorizationIDs - (*AuthorizationID2)(nil), // 33: sa.AuthorizationID2 - (*Authorization2IDs)(nil), // 34: sa.Authorization2IDs - (*RevokeCertificateRequest)(nil), // 35: sa.RevokeCertificateRequest - (*FinalizeAuthorizationRequest)(nil), // 36: sa.FinalizeAuthorizationRequest - (*AddBlockedKeyRequest)(nil), // 37: sa.AddBlockedKeyRequest - (*KeyBlockedRequest)(nil), // 38: sa.KeyBlockedRequest - (*ValidAuthorizations_MapElement)(nil), // 39: sa.ValidAuthorizations.MapElement - nil, // 40: sa.CountByNames.CountsEntry - (*Authorizations_MapElement)(nil), // 41: sa.Authorizations.MapElement - (*proto.Authorization)(nil), // 42: core.Authorization - (*proto.ProblemDetails)(nil), // 43: core.ProblemDetails - (*proto.ValidationRecord)(nil), // 44: core.ValidationRecord - (*proto.Registration)(nil), // 45: core.Registration - (*proto.Certificate)(nil), // 46: core.Certificate - (*proto.CertificateStatus)(nil), // 47: core.CertificateStatus - (*emptypb.Empty)(nil), // 48: google.protobuf.Empty - (*proto.Order)(nil), // 49: core.Order + (*GetValidAuthorizationsRequest)(nil), // 3: sa.GetValidAuthorizationsRequest + (*Serial)(nil), // 4: sa.Serial + (*SerialMetadata)(nil), // 5: sa.SerialMetadata + (*Range)(nil), // 6: sa.Range + (*Count)(nil), // 7: sa.Count + (*Timestamps)(nil), // 8: sa.Timestamps + (*CountInvalidAuthorizationsRequest)(nil), // 9: sa.CountInvalidAuthorizationsRequest + (*CountFQDNSetsRequest)(nil), // 10: sa.CountFQDNSetsRequest + (*FQDNSetExistsRequest)(nil), // 11: sa.FQDNSetExistsRequest + (*Exists)(nil), // 12: sa.Exists + (*AddSerialRequest)(nil), // 13: sa.AddSerialRequest + (*AddCertificateRequest)(nil), // 14: sa.AddCertificateRequest + (*OrderRequest)(nil), // 15: sa.OrderRequest + (*NewOrderRequest)(nil), // 16: sa.NewOrderRequest + (*NewAuthzRequest)(nil), // 17: sa.NewAuthzRequest + (*NewOrderAndAuthzsRequest)(nil), // 18: sa.NewOrderAndAuthzsRequest + (*SetOrderErrorRequest)(nil), // 19: sa.SetOrderErrorRequest + (*GetValidOrderAuthorizationsRequest)(nil), // 20: sa.GetValidOrderAuthorizationsRequest + (*GetOrderForNamesRequest)(nil), // 21: sa.GetOrderForNamesRequest + (*FinalizeOrderRequest)(nil), // 22: sa.FinalizeOrderRequest + (*GetAuthorizationsRequest)(nil), // 23: sa.GetAuthorizationsRequest + (*Authorizations)(nil), // 24: sa.Authorizations + (*AuthorizationIDs)(nil), // 25: sa.AuthorizationIDs + (*AuthorizationID2)(nil), // 26: sa.AuthorizationID2 + (*RevokeCertificateRequest)(nil), // 27: sa.RevokeCertificateRequest + (*FinalizeAuthorizationRequest)(nil), // 28: sa.FinalizeAuthorizationRequest + (*AddBlockedKeyRequest)(nil), // 29: sa.AddBlockedKeyRequest + (*SPKIHash)(nil), // 30: sa.SPKIHash + (*Incident)(nil), // 31: sa.Incident + (*Incidents)(nil), // 32: sa.Incidents + (*SerialsForIncidentRequest)(nil), // 33: sa.SerialsForIncidentRequest + (*IncidentSerial)(nil), // 34: sa.IncidentSerial + (*GetRevokedCertsByShardRequest)(nil), // 35: sa.GetRevokedCertsByShardRequest + (*RevocationStatus)(nil), // 36: sa.RevocationStatus + (*LeaseCRLShardRequest)(nil), // 37: sa.LeaseCRLShardRequest + (*LeaseCRLShardResponse)(nil), // 38: sa.LeaseCRLShardResponse + (*UpdateCRLShardRequest)(nil), // 39: sa.UpdateCRLShardRequest + (*Identifiers)(nil), // 40: sa.Identifiers + (*PauseRequest)(nil), // 41: sa.PauseRequest + (*PauseIdentifiersResponse)(nil), // 42: sa.PauseIdentifiersResponse + (*UpdateRegistrationKeyRequest)(nil), // 43: sa.UpdateRegistrationKeyRequest + (*RateLimitOverride)(nil), // 44: sa.RateLimitOverride + (*AddRateLimitOverrideRequest)(nil), // 45: sa.AddRateLimitOverrideRequest + (*AddRateLimitOverrideResponse)(nil), // 46: sa.AddRateLimitOverrideResponse + (*EnableRateLimitOverrideRequest)(nil), // 47: sa.EnableRateLimitOverrideRequest + (*DisableRateLimitOverrideRequest)(nil), // 48: sa.DisableRateLimitOverrideRequest + (*GetRateLimitOverrideRequest)(nil), // 49: sa.GetRateLimitOverrideRequest + (*RateLimitOverrideResponse)(nil), // 50: sa.RateLimitOverrideResponse + (*proto.Identifier)(nil), // 51: core.Identifier + (*timestamppb.Timestamp)(nil), // 52: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 53: google.protobuf.Duration + (*proto.ProblemDetails)(nil), // 54: core.ProblemDetails + (*proto.Authorization)(nil), // 55: core.Authorization + (*proto.ValidationRecord)(nil), // 56: core.ValidationRecord + (*emptypb.Empty)(nil), // 57: google.protobuf.Empty + (*proto.Registration)(nil), // 58: core.Registration + (*proto.Certificate)(nil), // 59: core.Certificate + (*proto.CertificateStatus)(nil), // 60: core.CertificateStatus + (*proto.Order)(nil), // 61: core.Order + (*proto.CRLEntry)(nil), // 62: core.CRLEntry } var file_sa_proto_depIdxs = []int32{ - 39, // 0: sa.ValidAuthorizations.valid:type_name -> sa.ValidAuthorizations.MapElement - 8, // 1: sa.CountCertificatesByNamesRequest.range:type_name -> sa.Range - 40, // 2: sa.CountByNames.counts:type_name -> sa.CountByNames.CountsEntry - 8, // 3: sa.CountRegistrationsByIPRequest.range:type_name -> sa.Range - 8, // 4: sa.CountInvalidAuthorizationsRequest.range:type_name -> sa.Range - 8, // 5: sa.CountOrdersRequest.range:type_name -> sa.Range - 23, // 6: sa.NewOrderAndAuthzsRequest.newOrder:type_name -> sa.NewOrderRequest - 42, // 7: sa.NewOrderAndAuthzsRequest.newAuthzs:type_name -> core.Authorization - 43, // 8: sa.SetOrderErrorRequest.error:type_name -> core.ProblemDetails - 41, // 9: sa.Authorizations.authz:type_name -> sa.Authorizations.MapElement - 42, // 10: sa.AddPendingAuthorizationsRequest.authz:type_name -> core.Authorization - 44, // 11: sa.FinalizeAuthorizationRequest.validationRecords:type_name -> core.ValidationRecord - 43, // 12: sa.FinalizeAuthorizationRequest.validationError:type_name -> core.ProblemDetails - 42, // 13: sa.ValidAuthorizations.MapElement.authz:type_name -> core.Authorization - 42, // 14: sa.Authorizations.MapElement.authz:type_name -> core.Authorization - 0, // 15: sa.StorageAuthority.GetRegistration:input_type -> sa.RegistrationID - 1, // 16: sa.StorageAuthority.GetRegistrationByKey:input_type -> sa.JSONWebKey - 6, // 17: sa.StorageAuthority.GetSerialMetadata:input_type -> sa.Serial - 6, // 18: sa.StorageAuthority.GetCertificate:input_type -> sa.Serial - 6, // 19: sa.StorageAuthority.GetPrecertificate:input_type -> sa.Serial - 6, // 20: sa.StorageAuthority.GetCertificateStatus:input_type -> sa.Serial - 10, // 21: sa.StorageAuthority.CountCertificatesByNames:input_type -> sa.CountCertificatesByNamesRequest - 12, // 22: sa.StorageAuthority.CountRegistrationsByIP:input_type -> sa.CountRegistrationsByIPRequest - 12, // 23: sa.StorageAuthority.CountRegistrationsByIPRange:input_type -> sa.CountRegistrationsByIPRequest - 14, // 24: sa.StorageAuthority.CountOrders:input_type -> sa.CountOrdersRequest - 15, // 25: sa.StorageAuthority.CountFQDNSets:input_type -> sa.CountFQDNSetsRequest - 16, // 26: sa.StorageAuthority.FQDNSetExists:input_type -> sa.FQDNSetExistsRequest - 17, // 27: sa.StorageAuthority.PreviousCertificateExists:input_type -> sa.PreviousCertificateExistsRequest - 33, // 28: sa.StorageAuthority.GetAuthorization2:input_type -> sa.AuthorizationID2 - 29, // 29: sa.StorageAuthority.GetAuthorizations2:input_type -> sa.GetAuthorizationsRequest - 3, // 30: sa.StorageAuthority.GetPendingAuthorization2:input_type -> sa.GetPendingAuthorizationRequest - 0, // 31: sa.StorageAuthority.CountPendingAuthorizations2:input_type -> sa.RegistrationID - 26, // 32: sa.StorageAuthority.GetValidOrderAuthorizations2:input_type -> sa.GetValidOrderAuthorizationsRequest - 13, // 33: sa.StorageAuthority.CountInvalidAuthorizations2:input_type -> sa.CountInvalidAuthorizationsRequest - 4, // 34: sa.StorageAuthority.GetValidAuthorizations2:input_type -> sa.GetValidAuthorizationsRequest - 38, // 35: sa.StorageAuthority.KeyBlocked:input_type -> sa.KeyBlockedRequest - 45, // 36: sa.StorageAuthority.NewRegistration:input_type -> core.Registration - 45, // 37: sa.StorageAuthority.UpdateRegistration:input_type -> core.Registration - 20, // 38: sa.StorageAuthority.AddCertificate:input_type -> sa.AddCertificateRequest - 20, // 39: sa.StorageAuthority.AddPrecertificate:input_type -> sa.AddCertificateRequest - 19, // 40: sa.StorageAuthority.AddSerial:input_type -> sa.AddSerialRequest - 0, // 41: sa.StorageAuthority.DeactivateRegistration:input_type -> sa.RegistrationID - 23, // 42: sa.StorageAuthority.NewOrder:input_type -> sa.NewOrderRequest - 24, // 43: sa.StorageAuthority.NewOrderAndAuthzs:input_type -> sa.NewOrderAndAuthzsRequest - 22, // 44: sa.StorageAuthority.SetOrderProcessing:input_type -> sa.OrderRequest - 25, // 45: sa.StorageAuthority.SetOrderError:input_type -> sa.SetOrderErrorRequest - 28, // 46: sa.StorageAuthority.FinalizeOrder:input_type -> sa.FinalizeOrderRequest - 22, // 47: sa.StorageAuthority.GetOrder:input_type -> sa.OrderRequest - 27, // 48: sa.StorageAuthority.GetOrderForNames:input_type -> sa.GetOrderForNamesRequest - 35, // 49: sa.StorageAuthority.RevokeCertificate:input_type -> sa.RevokeCertificateRequest - 35, // 50: sa.StorageAuthority.UpdateRevokedCertificate:input_type -> sa.RevokeCertificateRequest - 31, // 51: sa.StorageAuthority.NewAuthorizations2:input_type -> sa.AddPendingAuthorizationsRequest - 36, // 52: sa.StorageAuthority.FinalizeAuthorization2:input_type -> sa.FinalizeAuthorizationRequest - 33, // 53: sa.StorageAuthority.DeactivateAuthorization2:input_type -> sa.AuthorizationID2 - 37, // 54: sa.StorageAuthority.AddBlockedKey:input_type -> sa.AddBlockedKeyRequest - 45, // 55: sa.StorageAuthority.GetRegistration:output_type -> core.Registration - 45, // 56: sa.StorageAuthority.GetRegistrationByKey:output_type -> core.Registration - 7, // 57: sa.StorageAuthority.GetSerialMetadata:output_type -> sa.SerialMetadata - 46, // 58: sa.StorageAuthority.GetCertificate:output_type -> core.Certificate - 46, // 59: sa.StorageAuthority.GetPrecertificate:output_type -> core.Certificate - 47, // 60: sa.StorageAuthority.GetCertificateStatus:output_type -> core.CertificateStatus - 11, // 61: sa.StorageAuthority.CountCertificatesByNames:output_type -> sa.CountByNames - 9, // 62: sa.StorageAuthority.CountRegistrationsByIP:output_type -> sa.Count - 9, // 63: sa.StorageAuthority.CountRegistrationsByIPRange:output_type -> sa.Count - 9, // 64: sa.StorageAuthority.CountOrders:output_type -> sa.Count - 9, // 65: sa.StorageAuthority.CountFQDNSets:output_type -> sa.Count - 18, // 66: sa.StorageAuthority.FQDNSetExists:output_type -> sa.Exists - 18, // 67: sa.StorageAuthority.PreviousCertificateExists:output_type -> sa.Exists - 42, // 68: sa.StorageAuthority.GetAuthorization2:output_type -> core.Authorization - 30, // 69: sa.StorageAuthority.GetAuthorizations2:output_type -> sa.Authorizations - 42, // 70: sa.StorageAuthority.GetPendingAuthorization2:output_type -> core.Authorization - 9, // 71: sa.StorageAuthority.CountPendingAuthorizations2:output_type -> sa.Count - 30, // 72: sa.StorageAuthority.GetValidOrderAuthorizations2:output_type -> sa.Authorizations - 9, // 73: sa.StorageAuthority.CountInvalidAuthorizations2:output_type -> sa.Count - 30, // 74: sa.StorageAuthority.GetValidAuthorizations2:output_type -> sa.Authorizations - 18, // 75: sa.StorageAuthority.KeyBlocked:output_type -> sa.Exists - 45, // 76: sa.StorageAuthority.NewRegistration:output_type -> core.Registration - 48, // 77: sa.StorageAuthority.UpdateRegistration:output_type -> google.protobuf.Empty - 21, // 78: sa.StorageAuthority.AddCertificate:output_type -> sa.AddCertificateResponse - 48, // 79: sa.StorageAuthority.AddPrecertificate:output_type -> google.protobuf.Empty - 48, // 80: sa.StorageAuthority.AddSerial:output_type -> google.protobuf.Empty - 48, // 81: sa.StorageAuthority.DeactivateRegistration:output_type -> google.protobuf.Empty - 49, // 82: sa.StorageAuthority.NewOrder:output_type -> core.Order - 49, // 83: sa.StorageAuthority.NewOrderAndAuthzs:output_type -> core.Order - 48, // 84: sa.StorageAuthority.SetOrderProcessing:output_type -> google.protobuf.Empty - 48, // 85: sa.StorageAuthority.SetOrderError:output_type -> google.protobuf.Empty - 48, // 86: sa.StorageAuthority.FinalizeOrder:output_type -> google.protobuf.Empty - 49, // 87: sa.StorageAuthority.GetOrder:output_type -> core.Order - 49, // 88: sa.StorageAuthority.GetOrderForNames:output_type -> core.Order - 48, // 89: sa.StorageAuthority.RevokeCertificate:output_type -> google.protobuf.Empty - 48, // 90: sa.StorageAuthority.UpdateRevokedCertificate:output_type -> google.protobuf.Empty - 34, // 91: sa.StorageAuthority.NewAuthorizations2:output_type -> sa.Authorization2IDs - 48, // 92: sa.StorageAuthority.FinalizeAuthorization2:output_type -> google.protobuf.Empty - 48, // 93: sa.StorageAuthority.DeactivateAuthorization2:output_type -> google.protobuf.Empty - 48, // 94: sa.StorageAuthority.AddBlockedKey:output_type -> google.protobuf.Empty - 55, // [55:95] is the sub-list for method output_type - 15, // [15:55] is the sub-list for method input_type - 15, // [15:15] is the sub-list for extension type_name - 15, // [15:15] is the sub-list for extension extendee - 0, // [0:15] is the sub-list for field type_name + 51, // 0: sa.GetValidAuthorizationsRequest.identifiers:type_name -> core.Identifier + 52, // 1: sa.GetValidAuthorizationsRequest.validUntil:type_name -> google.protobuf.Timestamp + 52, // 2: sa.SerialMetadata.created:type_name -> google.protobuf.Timestamp + 52, // 3: sa.SerialMetadata.expires:type_name -> google.protobuf.Timestamp + 52, // 4: sa.Range.earliest:type_name -> google.protobuf.Timestamp + 52, // 5: sa.Range.latest:type_name -> google.protobuf.Timestamp + 52, // 6: sa.Timestamps.timestamps:type_name -> google.protobuf.Timestamp + 51, // 7: sa.CountInvalidAuthorizationsRequest.identifier:type_name -> core.Identifier + 6, // 8: sa.CountInvalidAuthorizationsRequest.range:type_name -> sa.Range + 51, // 9: sa.CountFQDNSetsRequest.identifiers:type_name -> core.Identifier + 53, // 10: sa.CountFQDNSetsRequest.window:type_name -> google.protobuf.Duration + 51, // 11: sa.FQDNSetExistsRequest.identifiers:type_name -> core.Identifier + 52, // 12: sa.AddSerialRequest.created:type_name -> google.protobuf.Timestamp + 52, // 13: sa.AddSerialRequest.expires:type_name -> google.protobuf.Timestamp + 52, // 14: sa.AddCertificateRequest.issued:type_name -> google.protobuf.Timestamp + 52, // 15: sa.NewOrderRequest.expires:type_name -> google.protobuf.Timestamp + 51, // 16: sa.NewOrderRequest.identifiers:type_name -> core.Identifier + 51, // 17: sa.NewAuthzRequest.identifier:type_name -> core.Identifier + 52, // 18: sa.NewAuthzRequest.expires:type_name -> google.protobuf.Timestamp + 16, // 19: sa.NewOrderAndAuthzsRequest.newOrder:type_name -> sa.NewOrderRequest + 17, // 20: sa.NewOrderAndAuthzsRequest.newAuthzs:type_name -> sa.NewAuthzRequest + 54, // 21: sa.SetOrderErrorRequest.error:type_name -> core.ProblemDetails + 51, // 22: sa.GetOrderForNamesRequest.identifiers:type_name -> core.Identifier + 51, // 23: sa.GetAuthorizationsRequest.identifiers:type_name -> core.Identifier + 52, // 24: sa.GetAuthorizationsRequest.validUntil:type_name -> google.protobuf.Timestamp + 55, // 25: sa.Authorizations.authzs:type_name -> core.Authorization + 52, // 26: sa.RevokeCertificateRequest.date:type_name -> google.protobuf.Timestamp + 52, // 27: sa.RevokeCertificateRequest.backdate:type_name -> google.protobuf.Timestamp + 52, // 28: sa.FinalizeAuthorizationRequest.expires:type_name -> google.protobuf.Timestamp + 56, // 29: sa.FinalizeAuthorizationRequest.validationRecords:type_name -> core.ValidationRecord + 54, // 30: sa.FinalizeAuthorizationRequest.validationError:type_name -> core.ProblemDetails + 52, // 31: sa.FinalizeAuthorizationRequest.attemptedAt:type_name -> google.protobuf.Timestamp + 52, // 32: sa.AddBlockedKeyRequest.added:type_name -> google.protobuf.Timestamp + 52, // 33: sa.Incident.renewBy:type_name -> google.protobuf.Timestamp + 31, // 34: sa.Incidents.incidents:type_name -> sa.Incident + 52, // 35: sa.IncidentSerial.lastNoticeSent:type_name -> google.protobuf.Timestamp + 52, // 36: sa.GetRevokedCertsByShardRequest.revokedBefore:type_name -> google.protobuf.Timestamp + 52, // 37: sa.GetRevokedCertsByShardRequest.expiresAfter:type_name -> google.protobuf.Timestamp + 52, // 38: sa.RevocationStatus.revokedDate:type_name -> google.protobuf.Timestamp + 52, // 39: sa.LeaseCRLShardRequest.until:type_name -> google.protobuf.Timestamp + 52, // 40: sa.UpdateCRLShardRequest.thisUpdate:type_name -> google.protobuf.Timestamp + 52, // 41: sa.UpdateCRLShardRequest.nextUpdate:type_name -> google.protobuf.Timestamp + 51, // 42: sa.Identifiers.identifiers:type_name -> core.Identifier + 51, // 43: sa.PauseRequest.identifiers:type_name -> core.Identifier + 53, // 44: sa.RateLimitOverride.period:type_name -> google.protobuf.Duration + 44, // 45: sa.AddRateLimitOverrideRequest.override:type_name -> sa.RateLimitOverride + 44, // 46: sa.RateLimitOverrideResponse.override:type_name -> sa.RateLimitOverride + 52, // 47: sa.RateLimitOverrideResponse.updatedAt:type_name -> google.protobuf.Timestamp + 9, // 48: sa.StorageAuthorityReadOnly.CountInvalidAuthorizations2:input_type -> sa.CountInvalidAuthorizationsRequest + 0, // 49: sa.StorageAuthorityReadOnly.CountPendingAuthorizations2:input_type -> sa.RegistrationID + 11, // 50: sa.StorageAuthorityReadOnly.FQDNSetExists:input_type -> sa.FQDNSetExistsRequest + 10, // 51: sa.StorageAuthorityReadOnly.FQDNSetTimestampsForWindow:input_type -> sa.CountFQDNSetsRequest + 26, // 52: sa.StorageAuthorityReadOnly.GetAuthorization2:input_type -> sa.AuthorizationID2 + 4, // 53: sa.StorageAuthorityReadOnly.GetCertificate:input_type -> sa.Serial + 4, // 54: sa.StorageAuthorityReadOnly.GetLintPrecertificate:input_type -> sa.Serial + 4, // 55: sa.StorageAuthorityReadOnly.GetCertificateStatus:input_type -> sa.Serial + 15, // 56: sa.StorageAuthorityReadOnly.GetOrder:input_type -> sa.OrderRequest + 21, // 57: sa.StorageAuthorityReadOnly.GetOrderForNames:input_type -> sa.GetOrderForNamesRequest + 0, // 58: sa.StorageAuthorityReadOnly.GetRegistration:input_type -> sa.RegistrationID + 1, // 59: sa.StorageAuthorityReadOnly.GetRegistrationByKey:input_type -> sa.JSONWebKey + 4, // 60: sa.StorageAuthorityReadOnly.GetRevocationStatus:input_type -> sa.Serial + 35, // 61: sa.StorageAuthorityReadOnly.GetRevokedCertsByShard:input_type -> sa.GetRevokedCertsByShardRequest + 4, // 62: sa.StorageAuthorityReadOnly.GetSerialMetadata:input_type -> sa.Serial + 0, // 63: sa.StorageAuthorityReadOnly.GetSerialsByAccount:input_type -> sa.RegistrationID + 30, // 64: sa.StorageAuthorityReadOnly.GetSerialsByKey:input_type -> sa.SPKIHash + 3, // 65: sa.StorageAuthorityReadOnly.GetValidAuthorizations2:input_type -> sa.GetValidAuthorizationsRequest + 20, // 66: sa.StorageAuthorityReadOnly.GetValidOrderAuthorizations2:input_type -> sa.GetValidOrderAuthorizationsRequest + 4, // 67: sa.StorageAuthorityReadOnly.IncidentsForSerial:input_type -> sa.Serial + 30, // 68: sa.StorageAuthorityReadOnly.KeyBlocked:input_type -> sa.SPKIHash + 4, // 69: sa.StorageAuthorityReadOnly.ReplacementOrderExists:input_type -> sa.Serial + 33, // 70: sa.StorageAuthorityReadOnly.SerialsForIncident:input_type -> sa.SerialsForIncidentRequest + 41, // 71: sa.StorageAuthorityReadOnly.CheckIdentifiersPaused:input_type -> sa.PauseRequest + 0, // 72: sa.StorageAuthorityReadOnly.GetPausedIdentifiers:input_type -> sa.RegistrationID + 49, // 73: sa.StorageAuthorityReadOnly.GetRateLimitOverride:input_type -> sa.GetRateLimitOverrideRequest + 57, // 74: sa.StorageAuthorityReadOnly.GetEnabledRateLimitOverrides:input_type -> google.protobuf.Empty + 9, // 75: sa.StorageAuthority.CountInvalidAuthorizations2:input_type -> sa.CountInvalidAuthorizationsRequest + 0, // 76: sa.StorageAuthority.CountPendingAuthorizations2:input_type -> sa.RegistrationID + 11, // 77: sa.StorageAuthority.FQDNSetExists:input_type -> sa.FQDNSetExistsRequest + 10, // 78: sa.StorageAuthority.FQDNSetTimestampsForWindow:input_type -> sa.CountFQDNSetsRequest + 26, // 79: sa.StorageAuthority.GetAuthorization2:input_type -> sa.AuthorizationID2 + 4, // 80: sa.StorageAuthority.GetCertificate:input_type -> sa.Serial + 4, // 81: sa.StorageAuthority.GetLintPrecertificate:input_type -> sa.Serial + 4, // 82: sa.StorageAuthority.GetCertificateStatus:input_type -> sa.Serial + 15, // 83: sa.StorageAuthority.GetOrder:input_type -> sa.OrderRequest + 21, // 84: sa.StorageAuthority.GetOrderForNames:input_type -> sa.GetOrderForNamesRequest + 0, // 85: sa.StorageAuthority.GetRegistration:input_type -> sa.RegistrationID + 1, // 86: sa.StorageAuthority.GetRegistrationByKey:input_type -> sa.JSONWebKey + 4, // 87: sa.StorageAuthority.GetRevocationStatus:input_type -> sa.Serial + 35, // 88: sa.StorageAuthority.GetRevokedCertsByShard:input_type -> sa.GetRevokedCertsByShardRequest + 4, // 89: sa.StorageAuthority.GetSerialMetadata:input_type -> sa.Serial + 0, // 90: sa.StorageAuthority.GetSerialsByAccount:input_type -> sa.RegistrationID + 30, // 91: sa.StorageAuthority.GetSerialsByKey:input_type -> sa.SPKIHash + 3, // 92: sa.StorageAuthority.GetValidAuthorizations2:input_type -> sa.GetValidAuthorizationsRequest + 20, // 93: sa.StorageAuthority.GetValidOrderAuthorizations2:input_type -> sa.GetValidOrderAuthorizationsRequest + 4, // 94: sa.StorageAuthority.IncidentsForSerial:input_type -> sa.Serial + 30, // 95: sa.StorageAuthority.KeyBlocked:input_type -> sa.SPKIHash + 4, // 96: sa.StorageAuthority.ReplacementOrderExists:input_type -> sa.Serial + 33, // 97: sa.StorageAuthority.SerialsForIncident:input_type -> sa.SerialsForIncidentRequest + 41, // 98: sa.StorageAuthority.CheckIdentifiersPaused:input_type -> sa.PauseRequest + 0, // 99: sa.StorageAuthority.GetPausedIdentifiers:input_type -> sa.RegistrationID + 49, // 100: sa.StorageAuthority.GetRateLimitOverride:input_type -> sa.GetRateLimitOverrideRequest + 57, // 101: sa.StorageAuthority.GetEnabledRateLimitOverrides:input_type -> google.protobuf.Empty + 29, // 102: sa.StorageAuthority.AddBlockedKey:input_type -> sa.AddBlockedKeyRequest + 14, // 103: sa.StorageAuthority.AddCertificate:input_type -> sa.AddCertificateRequest + 14, // 104: sa.StorageAuthority.AddPrecertificate:input_type -> sa.AddCertificateRequest + 13, // 105: sa.StorageAuthority.AddSerial:input_type -> sa.AddSerialRequest + 26, // 106: sa.StorageAuthority.DeactivateAuthorization2:input_type -> sa.AuthorizationID2 + 0, // 107: sa.StorageAuthority.DeactivateRegistration:input_type -> sa.RegistrationID + 28, // 108: sa.StorageAuthority.FinalizeAuthorization2:input_type -> sa.FinalizeAuthorizationRequest + 22, // 109: sa.StorageAuthority.FinalizeOrder:input_type -> sa.FinalizeOrderRequest + 18, // 110: sa.StorageAuthority.NewOrderAndAuthzs:input_type -> sa.NewOrderAndAuthzsRequest + 58, // 111: sa.StorageAuthority.NewRegistration:input_type -> core.Registration + 27, // 112: sa.StorageAuthority.RevokeCertificate:input_type -> sa.RevokeCertificateRequest + 19, // 113: sa.StorageAuthority.SetOrderError:input_type -> sa.SetOrderErrorRequest + 15, // 114: sa.StorageAuthority.SetOrderProcessing:input_type -> sa.OrderRequest + 43, // 115: sa.StorageAuthority.UpdateRegistrationKey:input_type -> sa.UpdateRegistrationKeyRequest + 27, // 116: sa.StorageAuthority.UpdateRevokedCertificate:input_type -> sa.RevokeCertificateRequest + 37, // 117: sa.StorageAuthority.LeaseCRLShard:input_type -> sa.LeaseCRLShardRequest + 39, // 118: sa.StorageAuthority.UpdateCRLShard:input_type -> sa.UpdateCRLShardRequest + 41, // 119: sa.StorageAuthority.PauseIdentifiers:input_type -> sa.PauseRequest + 0, // 120: sa.StorageAuthority.UnpauseAccount:input_type -> sa.RegistrationID + 45, // 121: sa.StorageAuthority.AddRateLimitOverride:input_type -> sa.AddRateLimitOverrideRequest + 48, // 122: sa.StorageAuthority.DisableRateLimitOverride:input_type -> sa.DisableRateLimitOverrideRequest + 47, // 123: sa.StorageAuthority.EnableRateLimitOverride:input_type -> sa.EnableRateLimitOverrideRequest + 7, // 124: sa.StorageAuthorityReadOnly.CountInvalidAuthorizations2:output_type -> sa.Count + 7, // 125: sa.StorageAuthorityReadOnly.CountPendingAuthorizations2:output_type -> sa.Count + 12, // 126: sa.StorageAuthorityReadOnly.FQDNSetExists:output_type -> sa.Exists + 8, // 127: sa.StorageAuthorityReadOnly.FQDNSetTimestampsForWindow:output_type -> sa.Timestamps + 55, // 128: sa.StorageAuthorityReadOnly.GetAuthorization2:output_type -> core.Authorization + 59, // 129: sa.StorageAuthorityReadOnly.GetCertificate:output_type -> core.Certificate + 59, // 130: sa.StorageAuthorityReadOnly.GetLintPrecertificate:output_type -> core.Certificate + 60, // 131: sa.StorageAuthorityReadOnly.GetCertificateStatus:output_type -> core.CertificateStatus + 61, // 132: sa.StorageAuthorityReadOnly.GetOrder:output_type -> core.Order + 61, // 133: sa.StorageAuthorityReadOnly.GetOrderForNames:output_type -> core.Order + 58, // 134: sa.StorageAuthorityReadOnly.GetRegistration:output_type -> core.Registration + 58, // 135: sa.StorageAuthorityReadOnly.GetRegistrationByKey:output_type -> core.Registration + 36, // 136: sa.StorageAuthorityReadOnly.GetRevocationStatus:output_type -> sa.RevocationStatus + 62, // 137: sa.StorageAuthorityReadOnly.GetRevokedCertsByShard:output_type -> core.CRLEntry + 5, // 138: sa.StorageAuthorityReadOnly.GetSerialMetadata:output_type -> sa.SerialMetadata + 4, // 139: sa.StorageAuthorityReadOnly.GetSerialsByAccount:output_type -> sa.Serial + 4, // 140: sa.StorageAuthorityReadOnly.GetSerialsByKey:output_type -> sa.Serial + 24, // 141: sa.StorageAuthorityReadOnly.GetValidAuthorizations2:output_type -> sa.Authorizations + 24, // 142: sa.StorageAuthorityReadOnly.GetValidOrderAuthorizations2:output_type -> sa.Authorizations + 32, // 143: sa.StorageAuthorityReadOnly.IncidentsForSerial:output_type -> sa.Incidents + 12, // 144: sa.StorageAuthorityReadOnly.KeyBlocked:output_type -> sa.Exists + 12, // 145: sa.StorageAuthorityReadOnly.ReplacementOrderExists:output_type -> sa.Exists + 34, // 146: sa.StorageAuthorityReadOnly.SerialsForIncident:output_type -> sa.IncidentSerial + 40, // 147: sa.StorageAuthorityReadOnly.CheckIdentifiersPaused:output_type -> sa.Identifiers + 40, // 148: sa.StorageAuthorityReadOnly.GetPausedIdentifiers:output_type -> sa.Identifiers + 50, // 149: sa.StorageAuthorityReadOnly.GetRateLimitOverride:output_type -> sa.RateLimitOverrideResponse + 50, // 150: sa.StorageAuthorityReadOnly.GetEnabledRateLimitOverrides:output_type -> sa.RateLimitOverrideResponse + 7, // 151: sa.StorageAuthority.CountInvalidAuthorizations2:output_type -> sa.Count + 7, // 152: sa.StorageAuthority.CountPendingAuthorizations2:output_type -> sa.Count + 12, // 153: sa.StorageAuthority.FQDNSetExists:output_type -> sa.Exists + 8, // 154: sa.StorageAuthority.FQDNSetTimestampsForWindow:output_type -> sa.Timestamps + 55, // 155: sa.StorageAuthority.GetAuthorization2:output_type -> core.Authorization + 59, // 156: sa.StorageAuthority.GetCertificate:output_type -> core.Certificate + 59, // 157: sa.StorageAuthority.GetLintPrecertificate:output_type -> core.Certificate + 60, // 158: sa.StorageAuthority.GetCertificateStatus:output_type -> core.CertificateStatus + 61, // 159: sa.StorageAuthority.GetOrder:output_type -> core.Order + 61, // 160: sa.StorageAuthority.GetOrderForNames:output_type -> core.Order + 58, // 161: sa.StorageAuthority.GetRegistration:output_type -> core.Registration + 58, // 162: sa.StorageAuthority.GetRegistrationByKey:output_type -> core.Registration + 36, // 163: sa.StorageAuthority.GetRevocationStatus:output_type -> sa.RevocationStatus + 62, // 164: sa.StorageAuthority.GetRevokedCertsByShard:output_type -> core.CRLEntry + 5, // 165: sa.StorageAuthority.GetSerialMetadata:output_type -> sa.SerialMetadata + 4, // 166: sa.StorageAuthority.GetSerialsByAccount:output_type -> sa.Serial + 4, // 167: sa.StorageAuthority.GetSerialsByKey:output_type -> sa.Serial + 24, // 168: sa.StorageAuthority.GetValidAuthorizations2:output_type -> sa.Authorizations + 24, // 169: sa.StorageAuthority.GetValidOrderAuthorizations2:output_type -> sa.Authorizations + 32, // 170: sa.StorageAuthority.IncidentsForSerial:output_type -> sa.Incidents + 12, // 171: sa.StorageAuthority.KeyBlocked:output_type -> sa.Exists + 12, // 172: sa.StorageAuthority.ReplacementOrderExists:output_type -> sa.Exists + 34, // 173: sa.StorageAuthority.SerialsForIncident:output_type -> sa.IncidentSerial + 40, // 174: sa.StorageAuthority.CheckIdentifiersPaused:output_type -> sa.Identifiers + 40, // 175: sa.StorageAuthority.GetPausedIdentifiers:output_type -> sa.Identifiers + 50, // 176: sa.StorageAuthority.GetRateLimitOverride:output_type -> sa.RateLimitOverrideResponse + 50, // 177: sa.StorageAuthority.GetEnabledRateLimitOverrides:output_type -> sa.RateLimitOverrideResponse + 57, // 178: sa.StorageAuthority.AddBlockedKey:output_type -> google.protobuf.Empty + 57, // 179: sa.StorageAuthority.AddCertificate:output_type -> google.protobuf.Empty + 57, // 180: sa.StorageAuthority.AddPrecertificate:output_type -> google.protobuf.Empty + 57, // 181: sa.StorageAuthority.AddSerial:output_type -> google.protobuf.Empty + 57, // 182: sa.StorageAuthority.DeactivateAuthorization2:output_type -> google.protobuf.Empty + 58, // 183: sa.StorageAuthority.DeactivateRegistration:output_type -> core.Registration + 57, // 184: sa.StorageAuthority.FinalizeAuthorization2:output_type -> google.protobuf.Empty + 57, // 185: sa.StorageAuthority.FinalizeOrder:output_type -> google.protobuf.Empty + 61, // 186: sa.StorageAuthority.NewOrderAndAuthzs:output_type -> core.Order + 58, // 187: sa.StorageAuthority.NewRegistration:output_type -> core.Registration + 57, // 188: sa.StorageAuthority.RevokeCertificate:output_type -> google.protobuf.Empty + 57, // 189: sa.StorageAuthority.SetOrderError:output_type -> google.protobuf.Empty + 57, // 190: sa.StorageAuthority.SetOrderProcessing:output_type -> google.protobuf.Empty + 58, // 191: sa.StorageAuthority.UpdateRegistrationKey:output_type -> core.Registration + 57, // 192: sa.StorageAuthority.UpdateRevokedCertificate:output_type -> google.protobuf.Empty + 38, // 193: sa.StorageAuthority.LeaseCRLShard:output_type -> sa.LeaseCRLShardResponse + 57, // 194: sa.StorageAuthority.UpdateCRLShard:output_type -> google.protobuf.Empty + 42, // 195: sa.StorageAuthority.PauseIdentifiers:output_type -> sa.PauseIdentifiersResponse + 7, // 196: sa.StorageAuthority.UnpauseAccount:output_type -> sa.Count + 46, // 197: sa.StorageAuthority.AddRateLimitOverride:output_type -> sa.AddRateLimitOverrideResponse + 57, // 198: sa.StorageAuthority.DisableRateLimitOverride:output_type -> google.protobuf.Empty + 57, // 199: sa.StorageAuthority.EnableRateLimitOverride:output_type -> google.protobuf.Empty + 124, // [124:200] is the sub-list for method output_type + 48, // [48:124] is the sub-list for method input_type + 48, // [48:48] is the sub-list for extension type_name + 48, // [48:48] is the sub-list for extension extendee + 0, // [0:48] is the sub-list for field type_name } func init() { file_sa_proto_init() } @@ -2944,516 +4001,21 @@ func file_sa_proto_init() { if File_sa_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_sa_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RegistrationID); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*JSONWebKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthorizationID); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetPendingAuthorizationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetValidAuthorizationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidAuthorizations); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Serial); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SerialMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Range); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Count); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountCertificatesByNamesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountByNames); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountRegistrationsByIPRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountInvalidAuthorizationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountOrdersRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountFQDNSetsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FQDNSetExistsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PreviousCertificateExistsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Exists); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddSerialRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCertificateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCertificateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OrderRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NewOrderRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NewOrderAndAuthzsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetOrderErrorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetValidOrderAuthorizationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOrderForNamesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FinalizeOrderRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAuthorizationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authorizations); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddPendingAuthorizationsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthorizationIDs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthorizationID2); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authorization2IDs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RevokeCertificateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FinalizeAuthorizationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddBlockedKeyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*KeyBlockedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidAuthorizations_MapElement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_sa_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authorizations_MapElement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_sa_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sa_proto_rawDesc), len(file_sa_proto_rawDesc)), NumEnums: 0, - NumMessages: 42, + NumMessages: 51, NumExtensions: 0, - NumServices: 1, + NumServices: 2, }, GoTypes: file_sa_proto_goTypes, DependencyIndexes: file_sa_proto_depIdxs, MessageInfos: file_sa_proto_msgTypes, }.Build() File_sa_proto = out.File - file_sa_proto_rawDesc = nil file_sa_proto_goTypes = nil file_sa_proto_depIdxs = nil } diff --git a/sa/proto/sa.proto b/sa/proto/sa.proto index c69bece3992..0baeadcd69e 100644 --- a/sa/proto/sa.proto +++ b/sa/proto/sa.proto @@ -5,52 +5,94 @@ option go_package = "github.com/letsencrypt/boulder/sa/proto"; import "core/proto/core.proto"; import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; -service StorageAuthority { - // Getters +// StorageAuthorityReadOnly exposes only those SA methods which are read-only. +service StorageAuthorityReadOnly { + rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {} + rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {} + rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {} + rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {} + rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {} + rpc GetCertificate(Serial) returns (core.Certificate) {} + rpc GetLintPrecertificate(Serial) returns (core.Certificate) {} + rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {} + rpc GetOrder(OrderRequest) returns (core.Order) {} + rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {} rpc GetRegistration(RegistrationID) returns (core.Registration) {} rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {} + rpc GetRevocationStatus(Serial) returns (RevocationStatus) {} + rpc GetRevokedCertsByShard(GetRevokedCertsByShardRequest) returns (stream core.CRLEntry) {} rpc GetSerialMetadata(Serial) returns (SerialMetadata) {} - rpc GetCertificate(Serial) returns (core.Certificate) {} - rpc GetPrecertificate(Serial) returns (core.Certificate) {} - rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {} - rpc CountCertificatesByNames(CountCertificatesByNamesRequest) returns (CountByNames) {} - rpc CountRegistrationsByIP(CountRegistrationsByIPRequest) returns (Count) {} - rpc CountRegistrationsByIPRange(CountRegistrationsByIPRequest) returns (Count) {} - rpc CountOrders(CountOrdersRequest) returns (Count) {} - // Return a count of authorizations with status "invalid" that belong to - // a given registration ID and expire in the given time range. - rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {} - rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {} - rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {} - rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {} - rpc GetAuthorizations2(GetAuthorizationsRequest) returns (Authorizations) {} - rpc GetPendingAuthorization2(GetPendingAuthorizationRequest) returns (core.Authorization) {} - rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {} + rpc GetSerialsByAccount(RegistrationID) returns (stream Serial) {} + rpc GetSerialsByKey(SPKIHash) returns (stream Serial) {} + rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {} rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {} + rpc IncidentsForSerial(Serial) returns (Incidents) {} + rpc KeyBlocked(SPKIHash) returns (Exists) {} + rpc ReplacementOrderExists(Serial) returns (Exists) {} + rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {} + rpc CheckIdentifiersPaused (PauseRequest) returns (Identifiers) {} + rpc GetPausedIdentifiers (RegistrationID) returns (Identifiers) {} + rpc GetRateLimitOverride(GetRateLimitOverrideRequest) returns (RateLimitOverrideResponse) {} + rpc GetEnabledRateLimitOverrides(google.protobuf.Empty) returns (stream RateLimitOverrideResponse) {} +} + +// StorageAuthority provides full read/write access to the database. +service StorageAuthority { + // Getters: this list must be identical to the StorageAuthorityReadOnly rpcs. rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {} + rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {} + rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {} + rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {} + rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {} + rpc GetCertificate(Serial) returns (core.Certificate) {} + rpc GetLintPrecertificate(Serial) returns (core.Certificate) {} + rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {} + rpc GetOrder(OrderRequest) returns (core.Order) {} + rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {} + rpc GetRegistration(RegistrationID) returns (core.Registration) {} + rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {} + rpc GetRevocationStatus(Serial) returns (RevocationStatus) {} + rpc GetRevokedCertsByShard(GetRevokedCertsByShardRequest) returns (stream core.CRLEntry) {} + rpc GetSerialMetadata(Serial) returns (SerialMetadata) {} + rpc GetSerialsByAccount(RegistrationID) returns (stream Serial) {} + rpc GetSerialsByKey(SPKIHash) returns (stream Serial) {} rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {} - rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {} + rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {} + rpc IncidentsForSerial(Serial) returns (Incidents) {} + rpc KeyBlocked(SPKIHash) returns (Exists) {} + rpc ReplacementOrderExists(Serial) returns (Exists) {} + rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {} + rpc CheckIdentifiersPaused (PauseRequest) returns (Identifiers) {} + rpc GetPausedIdentifiers (RegistrationID) returns (Identifiers) {} + rpc GetRateLimitOverride(GetRateLimitOverrideRequest) returns (RateLimitOverrideResponse) {} + rpc GetEnabledRateLimitOverrides(google.protobuf.Empty) returns (stream RateLimitOverrideResponse) {} + // Adders - rpc NewRegistration(core.Registration) returns (core.Registration) {} - rpc UpdateRegistration(core.Registration) returns (google.protobuf.Empty) {} - rpc AddCertificate(AddCertificateRequest) returns (AddCertificateResponse) {} + rpc AddBlockedKey(AddBlockedKeyRequest) returns (google.protobuf.Empty) {} + rpc AddCertificate(AddCertificateRequest) returns (google.protobuf.Empty) {} rpc AddPrecertificate(AddCertificateRequest) returns (google.protobuf.Empty) {} rpc AddSerial(AddSerialRequest) returns (google.protobuf.Empty) {} - rpc DeactivateRegistration(RegistrationID) returns (google.protobuf.Empty) {} - rpc NewOrder(NewOrderRequest) returns (core.Order) {} - rpc NewOrderAndAuthzs(NewOrderAndAuthzsRequest) returns (core.Order) {} - rpc SetOrderProcessing(OrderRequest) returns (google.protobuf.Empty) {} - rpc SetOrderError(SetOrderErrorRequest) returns (google.protobuf.Empty) {} + rpc DeactivateAuthorization2(AuthorizationID2) returns (google.protobuf.Empty) {} + rpc DeactivateRegistration(RegistrationID) returns (core.Registration) {} + rpc FinalizeAuthorization2(FinalizeAuthorizationRequest) returns (google.protobuf.Empty) {} rpc FinalizeOrder(FinalizeOrderRequest) returns (google.protobuf.Empty) {} - rpc GetOrder(OrderRequest) returns (core.Order) {} - rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {} + rpc NewOrderAndAuthzs(NewOrderAndAuthzsRequest) returns (core.Order) {} + rpc NewRegistration(core.Registration) returns (core.Registration) {} rpc RevokeCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {} + rpc SetOrderError(SetOrderErrorRequest) returns (google.protobuf.Empty) {} + rpc SetOrderProcessing(OrderRequest) returns (google.protobuf.Empty) {} + rpc UpdateRegistrationKey(UpdateRegistrationKeyRequest) returns (core.Registration) {} rpc UpdateRevokedCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {} - rpc NewAuthorizations2(AddPendingAuthorizationsRequest) returns (Authorization2IDs) {} - rpc FinalizeAuthorization2(FinalizeAuthorizationRequest) returns (google.protobuf.Empty) {} - rpc DeactivateAuthorization2(AuthorizationID2) returns (google.protobuf.Empty) {} - rpc AddBlockedKey(AddBlockedKeyRequest) returns (google.protobuf.Empty) {} + rpc LeaseCRLShard(LeaseCRLShardRequest) returns (LeaseCRLShardResponse) {} + rpc UpdateCRLShard(UpdateCRLShardRequest) returns (google.protobuf.Empty) {} + rpc PauseIdentifiers(PauseRequest) returns (PauseIdentifiersResponse) {} + rpc UnpauseAccount(RegistrationID) returns (Count) {} + rpc AddRateLimitOverride(AddRateLimitOverrideRequest) returns (AddRateLimitOverrideResponse) {} + rpc DisableRateLimitOverride(DisableRateLimitOverrideRequest) returns (google.protobuf.Empty) {} + rpc EnableRateLimitOverride(EnableRateLimitOverrideRequest) returns (google.protobuf.Empty) {} } message RegistrationID { @@ -58,33 +100,21 @@ message RegistrationID { } message JSONWebKey { - bytes jwk = 1; + bytes jwk = 1; } message AuthorizationID { string id = 1; } -message GetPendingAuthorizationRequest { - int64 registrationID = 1; - string identifierType = 2; - string identifierValue = 3; - // Result must be valid until at least this Unix timestamp (nanos) - int64 validUntil = 4; -} - message GetValidAuthorizationsRequest { + // Next unused field number: 7 int64 registrationID = 1; - repeated string domains = 2; - int64 now = 3; // Unix timestamp (nanoseconds) -} - -message ValidAuthorizations { - message MapElement { - string domain = 1; - core.Authorization authz = 2; - } - repeated MapElement valid = 1; + reserved 2; // Previously dnsNames + repeated core.Identifier identifiers = 6; + reserved 3; // Previously nowNS + google.protobuf.Timestamp validUntil = 4; + string profile = 5; } message Serial { @@ -92,59 +122,55 @@ message Serial { } message SerialMetadata { + // Next unused field number: 7 string serial = 1; int64 registrationID = 2; - int64 created = 3; // Unix timestamp (nanoseconds) - int64 expires = 4; // Unix timestamp (nanoseconds) + reserved 3; // Previously createdNS + google.protobuf.Timestamp created = 5; + reserved 4; // Previously expiresNS + google.protobuf.Timestamp expires = 6; } message Range { - int64 earliest = 1; // Unix timestamp (nanoseconds) - int64 latest = 2; // Unix timestamp (nanoseconds) + // Next unused field number: 5 + reserved 1; // Previously earliestNS + google.protobuf.Timestamp earliest = 3; + reserved 2; // Previously latestNS + google.protobuf.Timestamp latest = 4; } message Count { int64 count = 1; } -message CountCertificatesByNamesRequest { - Range range = 1; - repeated string names = 2; -} - -message CountByNames { - map counts = 1; -} - -message CountRegistrationsByIPRequest { - bytes ip = 1; - Range range = 2; +message Timestamps { + // Next unused field number: 3 + reserved 1; // Previously repeated timestampsNS + repeated google.protobuf.Timestamp timestamps = 2; } message CountInvalidAuthorizationsRequest { + // Next unused field number: 5 int64 registrationID = 1; - string hostname = 2; + reserved 2; // Previously dnsName + core.Identifier identifier = 4; // Count authorizations that expire in this range. Range range = 3; } -message CountOrdersRequest { - int64 accountID = 1; - Range range = 2; -} - message CountFQDNSetsRequest { - int64 window = 1; - repeated string domains = 2; + // Next unused field number: 6 + reserved 1; // Previously windowNS + reserved 2; // Previously dnsNames + repeated core.Identifier identifiers = 5; + google.protobuf.Duration window = 3; + int64 limit = 4; } message FQDNSetExistsRequest { - repeated string domains = 1; -} - -message PreviousCertificateExistsRequest { - string domain = 1; - int64 regID = 2; + // Next unused field number: 3 + reserved 1; // Previously dnsNames + repeated core.Identifier identifiers = 2; } message Exists { @@ -152,27 +178,26 @@ message Exists { } message AddSerialRequest { + // Next unused field number: 7 int64 regID = 1; string serial = 2; - int64 created = 3; // Unix timestamp (nanoseconds) - int64 expires = 4; // Unix timestamp (nanoseconds) + reserved 3; // Previously createdNS + google.protobuf.Timestamp created = 5; + reserved 4; // Previously expiresNS + google.protobuf.Timestamp expires = 6; } message AddCertificateRequest { + // Next unused field number: 8 bytes der = 1; int64 regID = 2; - // A signed OCSP response for the certificate contained in "der". - // Note: The certificate status in the OCSP response is assumed to be 0 (good). - bytes ocsp = 3; + reserved 3; // previously ocsp // An issued time. When not present the SA defaults to using - // the current time. The orphan-finder uses this parameter to add - // certificates with the correct historic issued date - int64 issued = 4; - int64 issuerID = 5; -} - -message AddCertificateResponse { - string digest = 1; + // the current time. + reserved 4; // Previously issuedNS + google.protobuf.Timestamp issued = 7; + int64 issuerNameID = 5; // https://pkg.go.dev/github.com/letsencrypt/boulder/issuance#IssuerNameID + reserved 6; // Previously ocspNotReady } message OrderRequest { @@ -180,15 +205,49 @@ message OrderRequest { } message NewOrderRequest { + // Next unused field number: 10 int64 registrationID = 1; - int64 expires = 2; - repeated string names = 3; + reserved 2; // Previously expiresNS + google.protobuf.Timestamp expires = 5; + reserved 3; // Previously dnsNames + repeated core.Identifier identifiers = 9; + // A list of already-existing authorization IDs that should be associated with + // the new Order object. This is for authorization reuse. repeated int64 v2Authorizations = 4; + string certificateProfileName = 7; + // Replaces is the ARI certificate Id that this order replaces. + string replaces = 8; + // ReplacesSerial is the serial number of the certificate that this order + // replaces. + string replacesSerial = 6; + +} + +// NewAuthzRequest represents a request to create an authorization. +message NewAuthzRequest { + // Next unused field number: 13 + reserved 1; // previously id + reserved 2; // previously dnsName + core.Identifier identifier = 12; + int64 registrationID = 3; + reserved 4; // previously status + reserved 5; // previously expiresNS + google.protobuf.Timestamp expires = 9; + reserved 6; // previously challenges + reserved 7; // previously ACMEv1 combinations + reserved 8; // previously v2 + repeated string challengeTypes = 10; + string token = 11; } message NewOrderAndAuthzsRequest { NewOrderRequest newOrder = 1; - repeated core.Authorization newAuthzs = 2; + // Authorizations to be newly created alongside the order, and associated with it. + // These will be combined with any reused authorizations (newOrder.v2Authorizations) + // to make the overall set of authorizations for the order. This field and + // newOrder.v2Authorizations may both be present, or only one of the two may be + // present, but they may not both be absent. + repeated NewAuthzRequest newAuthzs = 2; } message SetOrderErrorRequest { @@ -202,8 +261,10 @@ message GetValidOrderAuthorizationsRequest { } message GetOrderForNamesRequest { + // Next unused field number: 4 int64 acctID = 1; - repeated string names = 2; + reserved 2; // Previously dnsNames + repeated core.Identifier identifiers = 3; } message FinalizeOrderRequest { @@ -212,21 +273,17 @@ message FinalizeOrderRequest { } message GetAuthorizationsRequest { + // Next unused field number: 7 int64 registrationID = 1; - repeated string domains = 2; - int64 now = 3; // Unix timestamp (nanoseconds) + reserved 2; // Previously dnsNames + repeated core.Identifier identifiers = 6; + reserved 3; // Previously nowNS + google.protobuf.Timestamp validUntil = 4; + string profile = 5; } message Authorizations { - message MapElement { - string domain = 1; - core.Authorization authz = 2; - } - repeated MapElement authz = 1; -} - -message AddPendingAuthorizationsRequest { - repeated core.Authorization authz = 1; + repeated core.Authorization authzs = 2; } message AuthorizationIDs { @@ -237,37 +294,159 @@ message AuthorizationID2 { int64 id = 1; } -message Authorization2IDs { - repeated int64 ids = 1; -} - message RevokeCertificateRequest { + // Next unused field number: 10 string serial = 1; int64 reason = 2; - int64 date = 3; // Unix timestamp (nanoseconds) - int64 backdate = 5; // Unix timestamp (nanoseconds) + reserved 3; // Previously dateNS + google.protobuf.Timestamp date = 8; + reserved 5; // Previously backdateNS + google.protobuf.Timestamp backdate = 9; bytes response = 4; int64 issuerID = 6; + int64 shardIdx = 7; } message FinalizeAuthorizationRequest { + // Next unused field number: 10 int64 id = 1; string status = 2; - int64 expires = 3; // Unix timestamp (nanoseconds) + reserved 3; // Previously + google.protobuf.Timestamp expires = 8; string attempted = 4; repeated core.ValidationRecord validationRecords = 5; core.ProblemDetails validationError = 6; - int64 attemptedAt = 7; // Unix timestamp (nanoseconds) + reserved 7; // Previously attemptedAtNS + google.protobuf.Timestamp attemptedAt = 9; } message AddBlockedKeyRequest { + // Next unused field number: 7 bytes keyHash = 1; - int64 added = 2; // Unix timestamp (nanoseconds) + reserved 2; // Previously addedNS + google.protobuf.Timestamp added = 6; string source = 3; string comment = 4; int64 revokedBy = 5; } -message KeyBlockedRequest { +message SPKIHash { bytes keyHash = 1; } + +message Incident { + // Next unused field number: 7 + int64 id = 1; + string serialTable = 2; + string url = 3; + reserved 4; // Previously renewByNS + google.protobuf.Timestamp renewBy = 6; + bool enabled = 5; +} + +message Incidents { + repeated Incident incidents = 1; +} + +message SerialsForIncidentRequest { + string incidentTable = 1; +} + +message IncidentSerial { + // Next unused field number: 6 + string serial = 1; + int64 registrationID = 2; // May be 0 (NULL) + int64 orderID = 3; // May be 0 (NULL) + reserved 4; // Previously lastNoticeSentNS + google.protobuf.Timestamp lastNoticeSent = 5; +} + +message GetRevokedCertsByShardRequest { + int64 issuerNameID = 1; + google.protobuf.Timestamp revokedBefore = 2; + google.protobuf.Timestamp expiresAfter = 3; + int64 shardIdx = 4; +} + +message RevocationStatus { + int64 status = 1; + int64 revokedReason = 2; + google.protobuf.Timestamp revokedDate = 3; // Unix timestamp (nanoseconds) +} + +message LeaseCRLShardRequest { + int64 issuerNameID = 1; + int64 minShardIdx = 2; + int64 maxShardIdx = 3; + google.protobuf.Timestamp until = 4; +} + +message LeaseCRLShardResponse { + int64 issuerNameID = 1; + int64 shardIdx = 2; +} + +message UpdateCRLShardRequest { + int64 issuerNameID = 1; + int64 shardIdx = 2; + google.protobuf.Timestamp thisUpdate = 3; + google.protobuf.Timestamp nextUpdate = 4; +} + +message Identifiers { + repeated core.Identifier identifiers = 1; +} + +message PauseRequest { + int64 registrationID = 1; + repeated core.Identifier identifiers = 2; +} + +message PauseIdentifiersResponse { + int64 paused = 1; + int64 repaused = 2; +} + +message UpdateRegistrationKeyRequest { + int64 registrationID = 1; + bytes jwk = 2; +} + +message RateLimitOverride { + int64 limitEnum = 1; + string bucketKey = 2; + string comment = 3; + google.protobuf.Duration period = 4; + int64 count = 5; + int64 burst = 6; +} + +message AddRateLimitOverrideRequest { + RateLimitOverride override = 1; +} + +message AddRateLimitOverrideResponse { + bool inserted = 1; + bool enabled = 2; +} + +message EnableRateLimitOverrideRequest { + int64 limitEnum = 1; + string bucketKey = 2; +} + +message DisableRateLimitOverrideRequest { + int64 limitEnum = 1; + string bucketKey = 2; +} + +message GetRateLimitOverrideRequest { + int64 limitEnum = 1; + string bucketKey = 2; +} + +message RateLimitOverrideResponse { + RateLimitOverride override = 1; + bool enabled = 2; + google.protobuf.Timestamp updatedAt = 3; +} diff --git a/sa/proto/sa_grpc.pb.go b/sa/proto/sa_grpc.pb.go index 3aae5354b3a..733691dcd9d 100644 --- a/sa/proto/sa_grpc.pb.go +++ b/sa/proto/sa_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.20.1 +// source: sa.proto package proto @@ -13,57 +17,1229 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + StorageAuthorityReadOnly_CountInvalidAuthorizations2_FullMethodName = "/sa.StorageAuthorityReadOnly/CountInvalidAuthorizations2" + StorageAuthorityReadOnly_CountPendingAuthorizations2_FullMethodName = "/sa.StorageAuthorityReadOnly/CountPendingAuthorizations2" + StorageAuthorityReadOnly_FQDNSetExists_FullMethodName = "/sa.StorageAuthorityReadOnly/FQDNSetExists" + StorageAuthorityReadOnly_FQDNSetTimestampsForWindow_FullMethodName = "/sa.StorageAuthorityReadOnly/FQDNSetTimestampsForWindow" + StorageAuthorityReadOnly_GetAuthorization2_FullMethodName = "/sa.StorageAuthorityReadOnly/GetAuthorization2" + StorageAuthorityReadOnly_GetCertificate_FullMethodName = "/sa.StorageAuthorityReadOnly/GetCertificate" + StorageAuthorityReadOnly_GetLintPrecertificate_FullMethodName = "/sa.StorageAuthorityReadOnly/GetLintPrecertificate" + StorageAuthorityReadOnly_GetCertificateStatus_FullMethodName = "/sa.StorageAuthorityReadOnly/GetCertificateStatus" + StorageAuthorityReadOnly_GetOrder_FullMethodName = "/sa.StorageAuthorityReadOnly/GetOrder" + StorageAuthorityReadOnly_GetOrderForNames_FullMethodName = "/sa.StorageAuthorityReadOnly/GetOrderForNames" + StorageAuthorityReadOnly_GetRegistration_FullMethodName = "/sa.StorageAuthorityReadOnly/GetRegistration" + StorageAuthorityReadOnly_GetRegistrationByKey_FullMethodName = "/sa.StorageAuthorityReadOnly/GetRegistrationByKey" + StorageAuthorityReadOnly_GetRevocationStatus_FullMethodName = "/sa.StorageAuthorityReadOnly/GetRevocationStatus" + StorageAuthorityReadOnly_GetRevokedCertsByShard_FullMethodName = "/sa.StorageAuthorityReadOnly/GetRevokedCertsByShard" + StorageAuthorityReadOnly_GetSerialMetadata_FullMethodName = "/sa.StorageAuthorityReadOnly/GetSerialMetadata" + StorageAuthorityReadOnly_GetSerialsByAccount_FullMethodName = "/sa.StorageAuthorityReadOnly/GetSerialsByAccount" + StorageAuthorityReadOnly_GetSerialsByKey_FullMethodName = "/sa.StorageAuthorityReadOnly/GetSerialsByKey" + StorageAuthorityReadOnly_GetValidAuthorizations2_FullMethodName = "/sa.StorageAuthorityReadOnly/GetValidAuthorizations2" + StorageAuthorityReadOnly_GetValidOrderAuthorizations2_FullMethodName = "/sa.StorageAuthorityReadOnly/GetValidOrderAuthorizations2" + StorageAuthorityReadOnly_IncidentsForSerial_FullMethodName = "/sa.StorageAuthorityReadOnly/IncidentsForSerial" + StorageAuthorityReadOnly_KeyBlocked_FullMethodName = "/sa.StorageAuthorityReadOnly/KeyBlocked" + StorageAuthorityReadOnly_ReplacementOrderExists_FullMethodName = "/sa.StorageAuthorityReadOnly/ReplacementOrderExists" + StorageAuthorityReadOnly_SerialsForIncident_FullMethodName = "/sa.StorageAuthorityReadOnly/SerialsForIncident" + StorageAuthorityReadOnly_CheckIdentifiersPaused_FullMethodName = "/sa.StorageAuthorityReadOnly/CheckIdentifiersPaused" + StorageAuthorityReadOnly_GetPausedIdentifiers_FullMethodName = "/sa.StorageAuthorityReadOnly/GetPausedIdentifiers" + StorageAuthorityReadOnly_GetRateLimitOverride_FullMethodName = "/sa.StorageAuthorityReadOnly/GetRateLimitOverride" + StorageAuthorityReadOnly_GetEnabledRateLimitOverrides_FullMethodName = "/sa.StorageAuthorityReadOnly/GetEnabledRateLimitOverrides" +) + +// StorageAuthorityReadOnlyClient is the client API for StorageAuthorityReadOnly service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// StorageAuthorityReadOnly exposes only those SA methods which are read-only. +type StorageAuthorityReadOnlyClient interface { + CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) + CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) + FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) + FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) + GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) + GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) + GetLintPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) + GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) + GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) + GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) + GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) + GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) + GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error) + GetRevokedCertsByShard(ctx context.Context, in *GetRevokedCertsByShardRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[proto.CRLEntry], error) + GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) + GetSerialsByAccount(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Serial], error) + GetSerialsByKey(ctx context.Context, in *SPKIHash, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Serial], error) + GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) + GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) + IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error) + KeyBlocked(ctx context.Context, in *SPKIHash, opts ...grpc.CallOption) (*Exists, error) + ReplacementOrderExists(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Exists, error) + SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[IncidentSerial], error) + CheckIdentifiersPaused(ctx context.Context, in *PauseRequest, opts ...grpc.CallOption) (*Identifiers, error) + GetPausedIdentifiers(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Identifiers, error) + GetRateLimitOverride(ctx context.Context, in *GetRateLimitOverrideRequest, opts ...grpc.CallOption) (*RateLimitOverrideResponse, error) + GetEnabledRateLimitOverrides(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (grpc.ServerStreamingClient[RateLimitOverrideResponse], error) +} + +type storageAuthorityReadOnlyClient struct { + cc grpc.ClientConnInterface +} + +func NewStorageAuthorityReadOnlyClient(cc grpc.ClientConnInterface) StorageAuthorityReadOnlyClient { + return &storageAuthorityReadOnlyClient{cc} +} + +func (c *storageAuthorityReadOnlyClient) CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Count) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_CountInvalidAuthorizations2_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Count) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_CountPendingAuthorizations2_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Exists) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_FQDNSetExists_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Timestamps) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_FQDNSetTimestampsForWindow_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Authorization) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetAuthorization2_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Certificate) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetCertificate_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetLintPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Certificate) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetLintPrecertificate_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.CertificateStatus) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetCertificateStatus_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Order) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetOrder_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Order) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetOrderForNames_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Registration) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetRegistration_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Registration) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetRegistrationByKey_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RevocationStatus) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetRevocationStatus_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetRevokedCertsByShard(ctx context.Context, in *GetRevokedCertsByShardRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[proto.CRLEntry], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[0], StorageAuthorityReadOnly_GetRevokedCertsByShard_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[GetRevokedCertsByShardRequest, proto.CRLEntry]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthorityReadOnly_GetRevokedCertsByShardClient = grpc.ServerStreamingClient[proto.CRLEntry] + +func (c *storageAuthorityReadOnlyClient) GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SerialMetadata) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetSerialMetadata_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetSerialsByAccount(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Serial], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[1], StorageAuthorityReadOnly_GetSerialsByAccount_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[RegistrationID, Serial]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthorityReadOnly_GetSerialsByAccountClient = grpc.ServerStreamingClient[Serial] + +func (c *storageAuthorityReadOnlyClient) GetSerialsByKey(ctx context.Context, in *SPKIHash, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Serial], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[2], StorageAuthorityReadOnly_GetSerialsByKey_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[SPKIHash, Serial]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthorityReadOnly_GetSerialsByKeyClient = grpc.ServerStreamingClient[Serial] + +func (c *storageAuthorityReadOnlyClient) GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Authorizations) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetValidAuthorizations2_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Authorizations) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetValidOrderAuthorizations2_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Incidents) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_IncidentsForSerial_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) KeyBlocked(ctx context.Context, in *SPKIHash, opts ...grpc.CallOption) (*Exists, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Exists) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_KeyBlocked_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) ReplacementOrderExists(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Exists, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Exists) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_ReplacementOrderExists_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[IncidentSerial], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[3], StorageAuthorityReadOnly_SerialsForIncident_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[SerialsForIncidentRequest, IncidentSerial]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthorityReadOnly_SerialsForIncidentClient = grpc.ServerStreamingClient[IncidentSerial] + +func (c *storageAuthorityReadOnlyClient) CheckIdentifiersPaused(ctx context.Context, in *PauseRequest, opts ...grpc.CallOption) (*Identifiers, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Identifiers) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_CheckIdentifiersPaused_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetPausedIdentifiers(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Identifiers, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Identifiers) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetPausedIdentifiers_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetRateLimitOverride(ctx context.Context, in *GetRateLimitOverrideRequest, opts ...grpc.CallOption) (*RateLimitOverrideResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RateLimitOverrideResponse) + err := c.cc.Invoke(ctx, StorageAuthorityReadOnly_GetRateLimitOverride_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityReadOnlyClient) GetEnabledRateLimitOverrides(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (grpc.ServerStreamingClient[RateLimitOverrideResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StorageAuthorityReadOnly_ServiceDesc.Streams[4], StorageAuthorityReadOnly_GetEnabledRateLimitOverrides_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[emptypb.Empty, RateLimitOverrideResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthorityReadOnly_GetEnabledRateLimitOverridesClient = grpc.ServerStreamingClient[RateLimitOverrideResponse] + +// StorageAuthorityReadOnlyServer is the server API for StorageAuthorityReadOnly service. +// All implementations must embed UnimplementedStorageAuthorityReadOnlyServer +// for forward compatibility. +// +// StorageAuthorityReadOnly exposes only those SA methods which are read-only. +type StorageAuthorityReadOnlyServer interface { + CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) + CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) + FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) + FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) + GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) + GetCertificate(context.Context, *Serial) (*proto.Certificate, error) + GetLintPrecertificate(context.Context, *Serial) (*proto.Certificate, error) + GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) + GetOrder(context.Context, *OrderRequest) (*proto.Order, error) + GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) + GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) + GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) + GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error) + GetRevokedCertsByShard(*GetRevokedCertsByShardRequest, grpc.ServerStreamingServer[proto.CRLEntry]) error + GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) + GetSerialsByAccount(*RegistrationID, grpc.ServerStreamingServer[Serial]) error + GetSerialsByKey(*SPKIHash, grpc.ServerStreamingServer[Serial]) error + GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) + GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) + IncidentsForSerial(context.Context, *Serial) (*Incidents, error) + KeyBlocked(context.Context, *SPKIHash) (*Exists, error) + ReplacementOrderExists(context.Context, *Serial) (*Exists, error) + SerialsForIncident(*SerialsForIncidentRequest, grpc.ServerStreamingServer[IncidentSerial]) error + CheckIdentifiersPaused(context.Context, *PauseRequest) (*Identifiers, error) + GetPausedIdentifiers(context.Context, *RegistrationID) (*Identifiers, error) + GetRateLimitOverride(context.Context, *GetRateLimitOverrideRequest) (*RateLimitOverrideResponse, error) + GetEnabledRateLimitOverrides(*emptypb.Empty, grpc.ServerStreamingServer[RateLimitOverrideResponse]) error + mustEmbedUnimplementedStorageAuthorityReadOnlyServer() +} + +// UnimplementedStorageAuthorityReadOnlyServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedStorageAuthorityReadOnlyServer struct{} + +func (UnimplementedStorageAuthorityReadOnlyServer) CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) { + return nil, status.Errorf(codes.Unimplemented, "method CountInvalidAuthorizations2 not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) { + return nil, status.Errorf(codes.Unimplemented, "method CountPendingAuthorizations2 not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) { + return nil, status.Errorf(codes.Unimplemented, "method FQDNSetExists not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) { + return nil, status.Errorf(codes.Unimplemented, "method FQDNSetTimestampsForWindow not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAuthorization2 not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetCertificate(context.Context, *Serial) (*proto.Certificate, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCertificate not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetLintPrecertificate(context.Context, *Serial) (*proto.Certificate, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetLintPrecertificate not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCertificateStatus not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetOrder(context.Context, *OrderRequest) (*proto.Order, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetOrder not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetOrderForNames not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRegistration not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRegistrationByKey not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRevocationStatus not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetRevokedCertsByShard(*GetRevokedCertsByShardRequest, grpc.ServerStreamingServer[proto.CRLEntry]) error { + return status.Errorf(codes.Unimplemented, "method GetRevokedCertsByShard not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSerialMetadata not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetSerialsByAccount(*RegistrationID, grpc.ServerStreamingServer[Serial]) error { + return status.Errorf(codes.Unimplemented, "method GetSerialsByAccount not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetSerialsByKey(*SPKIHash, grpc.ServerStreamingServer[Serial]) error { + return status.Errorf(codes.Unimplemented, "method GetSerialsByKey not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetValidAuthorizations2 not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetValidOrderAuthorizations2 not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) IncidentsForSerial(context.Context, *Serial) (*Incidents, error) { + return nil, status.Errorf(codes.Unimplemented, "method IncidentsForSerial not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) KeyBlocked(context.Context, *SPKIHash) (*Exists, error) { + return nil, status.Errorf(codes.Unimplemented, "method KeyBlocked not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) ReplacementOrderExists(context.Context, *Serial) (*Exists, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReplacementOrderExists not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) SerialsForIncident(*SerialsForIncidentRequest, grpc.ServerStreamingServer[IncidentSerial]) error { + return status.Errorf(codes.Unimplemented, "method SerialsForIncident not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) CheckIdentifiersPaused(context.Context, *PauseRequest) (*Identifiers, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckIdentifiersPaused not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetPausedIdentifiers(context.Context, *RegistrationID) (*Identifiers, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPausedIdentifiers not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetRateLimitOverride(context.Context, *GetRateLimitOverrideRequest) (*RateLimitOverrideResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRateLimitOverride not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) GetEnabledRateLimitOverrides(*emptypb.Empty, grpc.ServerStreamingServer[RateLimitOverrideResponse]) error { + return status.Errorf(codes.Unimplemented, "method GetEnabledRateLimitOverrides not implemented") +} +func (UnimplementedStorageAuthorityReadOnlyServer) mustEmbedUnimplementedStorageAuthorityReadOnlyServer() { +} +func (UnimplementedStorageAuthorityReadOnlyServer) testEmbeddedByValue() {} + +// UnsafeStorageAuthorityReadOnlyServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to StorageAuthorityReadOnlyServer will +// result in compilation errors. +type UnsafeStorageAuthorityReadOnlyServer interface { + mustEmbedUnimplementedStorageAuthorityReadOnlyServer() +} + +func RegisterStorageAuthorityReadOnlyServer(s grpc.ServiceRegistrar, srv StorageAuthorityReadOnlyServer) { + // If the following call pancis, it indicates UnimplementedStorageAuthorityReadOnlyServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&StorageAuthorityReadOnly_ServiceDesc, srv) +} + +func _StorageAuthorityReadOnly_CountInvalidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CountInvalidAuthorizationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).CountInvalidAuthorizations2(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_CountInvalidAuthorizations2_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).CountInvalidAuthorizations2(ctx, req.(*CountInvalidAuthorizationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_CountPendingAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegistrationID) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).CountPendingAuthorizations2(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_CountPendingAuthorizations2_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).CountPendingAuthorizations2(ctx, req.(*RegistrationID)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_FQDNSetExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FQDNSetExistsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).FQDNSetExists(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_FQDNSetExists_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).FQDNSetExists(ctx, req.(*FQDNSetExistsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_FQDNSetTimestampsForWindow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CountFQDNSetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).FQDNSetTimestampsForWindow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_FQDNSetTimestampsForWindow_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).FQDNSetTimestampsForWindow(ctx, req.(*CountFQDNSetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthorizationID2) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetAuthorization2(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetAuthorization2_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetAuthorization2(ctx, req.(*AuthorizationID2)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Serial) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetCertificate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetCertificate_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetCertificate(ctx, req.(*Serial)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetLintPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Serial) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetLintPrecertificate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetLintPrecertificate_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetLintPrecertificate(ctx, req.(*Serial)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Serial) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetCertificateStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetCertificateStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetCertificateStatus(ctx, req.(*Serial)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(OrderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetOrder(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetOrder_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetOrder(ctx, req.(*OrderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetOrderForNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOrderForNamesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetOrderForNames(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetOrderForNames_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetOrderForNames(ctx, req.(*GetOrderForNamesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegistrationID) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetRegistration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetRegistration_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetRegistration(ctx, req.(*RegistrationID)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetRegistrationByKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(JSONWebKey) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetRegistrationByKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetRegistrationByKey_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetRegistrationByKey(ctx, req.(*JSONWebKey)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetRevocationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Serial) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetRevocationStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetRevocationStatus_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetRevocationStatus(ctx, req.(*Serial)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetRevokedCertsByShard_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetRevokedCertsByShardRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageAuthorityReadOnlyServer).GetRevokedCertsByShard(m, &grpc.GenericServerStream[GetRevokedCertsByShardRequest, proto.CRLEntry]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthorityReadOnly_GetRevokedCertsByShardServer = grpc.ServerStreamingServer[proto.CRLEntry] + +func _StorageAuthorityReadOnly_GetSerialMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Serial) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetSerialMetadata(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetSerialMetadata_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetSerialMetadata(ctx, req.(*Serial)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetSerialsByAccount_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(RegistrationID) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageAuthorityReadOnlyServer).GetSerialsByAccount(m, &grpc.GenericServerStream[RegistrationID, Serial]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthorityReadOnly_GetSerialsByAccountServer = grpc.ServerStreamingServer[Serial] + +func _StorageAuthorityReadOnly_GetSerialsByKey_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SPKIHash) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageAuthorityReadOnlyServer).GetSerialsByKey(m, &grpc.GenericServerStream[SPKIHash, Serial]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthorityReadOnly_GetSerialsByKeyServer = grpc.ServerStreamingServer[Serial] + +func _StorageAuthorityReadOnly_GetValidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetValidAuthorizationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetValidAuthorizations2(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetValidAuthorizations2_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetValidAuthorizations2(ctx, req.(*GetValidAuthorizationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetValidOrderAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetValidOrderAuthorizationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetValidOrderAuthorizations2(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetValidOrderAuthorizations2_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetValidOrderAuthorizations2(ctx, req.(*GetValidOrderAuthorizationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_IncidentsForSerial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Serial) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).IncidentsForSerial(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_IncidentsForSerial_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).IncidentsForSerial(ctx, req.(*Serial)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_KeyBlocked_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SPKIHash) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).KeyBlocked(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_KeyBlocked_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).KeyBlocked(ctx, req.(*SPKIHash)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_ReplacementOrderExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Serial) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).ReplacementOrderExists(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_ReplacementOrderExists_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).ReplacementOrderExists(ctx, req.(*Serial)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_SerialsForIncident_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SerialsForIncidentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageAuthorityReadOnlyServer).SerialsForIncident(m, &grpc.GenericServerStream[SerialsForIncidentRequest, IncidentSerial]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthorityReadOnly_SerialsForIncidentServer = grpc.ServerStreamingServer[IncidentSerial] + +func _StorageAuthorityReadOnly_CheckIdentifiersPaused_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PauseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).CheckIdentifiersPaused(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_CheckIdentifiersPaused_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).CheckIdentifiersPaused(ctx, req.(*PauseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetPausedIdentifiers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegistrationID) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetPausedIdentifiers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetPausedIdentifiers_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetPausedIdentifiers(ctx, req.(*RegistrationID)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetRateLimitOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRateLimitOverrideRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityReadOnlyServer).GetRateLimitOverride(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthorityReadOnly_GetRateLimitOverride_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityReadOnlyServer).GetRateLimitOverride(ctx, req.(*GetRateLimitOverrideRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthorityReadOnly_GetEnabledRateLimitOverrides_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(emptypb.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageAuthorityReadOnlyServer).GetEnabledRateLimitOverrides(m, &grpc.GenericServerStream[emptypb.Empty, RateLimitOverrideResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthorityReadOnly_GetEnabledRateLimitOverridesServer = grpc.ServerStreamingServer[RateLimitOverrideResponse] + +// StorageAuthorityReadOnly_ServiceDesc is the grpc.ServiceDesc for StorageAuthorityReadOnly service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var StorageAuthorityReadOnly_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "sa.StorageAuthorityReadOnly", + HandlerType: (*StorageAuthorityReadOnlyServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CountInvalidAuthorizations2", + Handler: _StorageAuthorityReadOnly_CountInvalidAuthorizations2_Handler, + }, + { + MethodName: "CountPendingAuthorizations2", + Handler: _StorageAuthorityReadOnly_CountPendingAuthorizations2_Handler, + }, + { + MethodName: "FQDNSetExists", + Handler: _StorageAuthorityReadOnly_FQDNSetExists_Handler, + }, + { + MethodName: "FQDNSetTimestampsForWindow", + Handler: _StorageAuthorityReadOnly_FQDNSetTimestampsForWindow_Handler, + }, + { + MethodName: "GetAuthorization2", + Handler: _StorageAuthorityReadOnly_GetAuthorization2_Handler, + }, + { + MethodName: "GetCertificate", + Handler: _StorageAuthorityReadOnly_GetCertificate_Handler, + }, + { + MethodName: "GetLintPrecertificate", + Handler: _StorageAuthorityReadOnly_GetLintPrecertificate_Handler, + }, + { + MethodName: "GetCertificateStatus", + Handler: _StorageAuthorityReadOnly_GetCertificateStatus_Handler, + }, + { + MethodName: "GetOrder", + Handler: _StorageAuthorityReadOnly_GetOrder_Handler, + }, + { + MethodName: "GetOrderForNames", + Handler: _StorageAuthorityReadOnly_GetOrderForNames_Handler, + }, + { + MethodName: "GetRegistration", + Handler: _StorageAuthorityReadOnly_GetRegistration_Handler, + }, + { + MethodName: "GetRegistrationByKey", + Handler: _StorageAuthorityReadOnly_GetRegistrationByKey_Handler, + }, + { + MethodName: "GetRevocationStatus", + Handler: _StorageAuthorityReadOnly_GetRevocationStatus_Handler, + }, + { + MethodName: "GetSerialMetadata", + Handler: _StorageAuthorityReadOnly_GetSerialMetadata_Handler, + }, + { + MethodName: "GetValidAuthorizations2", + Handler: _StorageAuthorityReadOnly_GetValidAuthorizations2_Handler, + }, + { + MethodName: "GetValidOrderAuthorizations2", + Handler: _StorageAuthorityReadOnly_GetValidOrderAuthorizations2_Handler, + }, + { + MethodName: "IncidentsForSerial", + Handler: _StorageAuthorityReadOnly_IncidentsForSerial_Handler, + }, + { + MethodName: "KeyBlocked", + Handler: _StorageAuthorityReadOnly_KeyBlocked_Handler, + }, + { + MethodName: "ReplacementOrderExists", + Handler: _StorageAuthorityReadOnly_ReplacementOrderExists_Handler, + }, + { + MethodName: "CheckIdentifiersPaused", + Handler: _StorageAuthorityReadOnly_CheckIdentifiersPaused_Handler, + }, + { + MethodName: "GetPausedIdentifiers", + Handler: _StorageAuthorityReadOnly_GetPausedIdentifiers_Handler, + }, + { + MethodName: "GetRateLimitOverride", + Handler: _StorageAuthorityReadOnly_GetRateLimitOverride_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetRevokedCertsByShard", + Handler: _StorageAuthorityReadOnly_GetRevokedCertsByShard_Handler, + ServerStreams: true, + }, + { + StreamName: "GetSerialsByAccount", + Handler: _StorageAuthorityReadOnly_GetSerialsByAccount_Handler, + ServerStreams: true, + }, + { + StreamName: "GetSerialsByKey", + Handler: _StorageAuthorityReadOnly_GetSerialsByKey_Handler, + ServerStreams: true, + }, + { + StreamName: "SerialsForIncident", + Handler: _StorageAuthorityReadOnly_SerialsForIncident_Handler, + ServerStreams: true, + }, + { + StreamName: "GetEnabledRateLimitOverrides", + Handler: _StorageAuthorityReadOnly_GetEnabledRateLimitOverrides_Handler, + ServerStreams: true, + }, + }, + Metadata: "sa.proto", +} + +const ( + StorageAuthority_CountInvalidAuthorizations2_FullMethodName = "/sa.StorageAuthority/CountInvalidAuthorizations2" + StorageAuthority_CountPendingAuthorizations2_FullMethodName = "/sa.StorageAuthority/CountPendingAuthorizations2" + StorageAuthority_FQDNSetExists_FullMethodName = "/sa.StorageAuthority/FQDNSetExists" + StorageAuthority_FQDNSetTimestampsForWindow_FullMethodName = "/sa.StorageAuthority/FQDNSetTimestampsForWindow" + StorageAuthority_GetAuthorization2_FullMethodName = "/sa.StorageAuthority/GetAuthorization2" + StorageAuthority_GetCertificate_FullMethodName = "/sa.StorageAuthority/GetCertificate" + StorageAuthority_GetLintPrecertificate_FullMethodName = "/sa.StorageAuthority/GetLintPrecertificate" + StorageAuthority_GetCertificateStatus_FullMethodName = "/sa.StorageAuthority/GetCertificateStatus" + StorageAuthority_GetOrder_FullMethodName = "/sa.StorageAuthority/GetOrder" + StorageAuthority_GetOrderForNames_FullMethodName = "/sa.StorageAuthority/GetOrderForNames" + StorageAuthority_GetRegistration_FullMethodName = "/sa.StorageAuthority/GetRegistration" + StorageAuthority_GetRegistrationByKey_FullMethodName = "/sa.StorageAuthority/GetRegistrationByKey" + StorageAuthority_GetRevocationStatus_FullMethodName = "/sa.StorageAuthority/GetRevocationStatus" + StorageAuthority_GetRevokedCertsByShard_FullMethodName = "/sa.StorageAuthority/GetRevokedCertsByShard" + StorageAuthority_GetSerialMetadata_FullMethodName = "/sa.StorageAuthority/GetSerialMetadata" + StorageAuthority_GetSerialsByAccount_FullMethodName = "/sa.StorageAuthority/GetSerialsByAccount" + StorageAuthority_GetSerialsByKey_FullMethodName = "/sa.StorageAuthority/GetSerialsByKey" + StorageAuthority_GetValidAuthorizations2_FullMethodName = "/sa.StorageAuthority/GetValidAuthorizations2" + StorageAuthority_GetValidOrderAuthorizations2_FullMethodName = "/sa.StorageAuthority/GetValidOrderAuthorizations2" + StorageAuthority_IncidentsForSerial_FullMethodName = "/sa.StorageAuthority/IncidentsForSerial" + StorageAuthority_KeyBlocked_FullMethodName = "/sa.StorageAuthority/KeyBlocked" + StorageAuthority_ReplacementOrderExists_FullMethodName = "/sa.StorageAuthority/ReplacementOrderExists" + StorageAuthority_SerialsForIncident_FullMethodName = "/sa.StorageAuthority/SerialsForIncident" + StorageAuthority_CheckIdentifiersPaused_FullMethodName = "/sa.StorageAuthority/CheckIdentifiersPaused" + StorageAuthority_GetPausedIdentifiers_FullMethodName = "/sa.StorageAuthority/GetPausedIdentifiers" + StorageAuthority_GetRateLimitOverride_FullMethodName = "/sa.StorageAuthority/GetRateLimitOverride" + StorageAuthority_GetEnabledRateLimitOverrides_FullMethodName = "/sa.StorageAuthority/GetEnabledRateLimitOverrides" + StorageAuthority_AddBlockedKey_FullMethodName = "/sa.StorageAuthority/AddBlockedKey" + StorageAuthority_AddCertificate_FullMethodName = "/sa.StorageAuthority/AddCertificate" + StorageAuthority_AddPrecertificate_FullMethodName = "/sa.StorageAuthority/AddPrecertificate" + StorageAuthority_AddSerial_FullMethodName = "/sa.StorageAuthority/AddSerial" + StorageAuthority_DeactivateAuthorization2_FullMethodName = "/sa.StorageAuthority/DeactivateAuthorization2" + StorageAuthority_DeactivateRegistration_FullMethodName = "/sa.StorageAuthority/DeactivateRegistration" + StorageAuthority_FinalizeAuthorization2_FullMethodName = "/sa.StorageAuthority/FinalizeAuthorization2" + StorageAuthority_FinalizeOrder_FullMethodName = "/sa.StorageAuthority/FinalizeOrder" + StorageAuthority_NewOrderAndAuthzs_FullMethodName = "/sa.StorageAuthority/NewOrderAndAuthzs" + StorageAuthority_NewRegistration_FullMethodName = "/sa.StorageAuthority/NewRegistration" + StorageAuthority_RevokeCertificate_FullMethodName = "/sa.StorageAuthority/RevokeCertificate" + StorageAuthority_SetOrderError_FullMethodName = "/sa.StorageAuthority/SetOrderError" + StorageAuthority_SetOrderProcessing_FullMethodName = "/sa.StorageAuthority/SetOrderProcessing" + StorageAuthority_UpdateRegistrationKey_FullMethodName = "/sa.StorageAuthority/UpdateRegistrationKey" + StorageAuthority_UpdateRevokedCertificate_FullMethodName = "/sa.StorageAuthority/UpdateRevokedCertificate" + StorageAuthority_LeaseCRLShard_FullMethodName = "/sa.StorageAuthority/LeaseCRLShard" + StorageAuthority_UpdateCRLShard_FullMethodName = "/sa.StorageAuthority/UpdateCRLShard" + StorageAuthority_PauseIdentifiers_FullMethodName = "/sa.StorageAuthority/PauseIdentifiers" + StorageAuthority_UnpauseAccount_FullMethodName = "/sa.StorageAuthority/UnpauseAccount" + StorageAuthority_AddRateLimitOverride_FullMethodName = "/sa.StorageAuthority/AddRateLimitOverride" + StorageAuthority_DisableRateLimitOverride_FullMethodName = "/sa.StorageAuthority/DisableRateLimitOverride" + StorageAuthority_EnableRateLimitOverride_FullMethodName = "/sa.StorageAuthority/EnableRateLimitOverride" +) // StorageAuthorityClient is the client API for StorageAuthority service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// StorageAuthority provides full read/write access to the database. type StorageAuthorityClient interface { - // Getters + // Getters: this list must be identical to the StorageAuthorityReadOnly rpcs. + CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) + CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) + FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) + FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) + GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) + GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) + GetLintPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) + GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) + GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) + GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) + GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error) + GetRevokedCertsByShard(ctx context.Context, in *GetRevokedCertsByShardRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[proto.CRLEntry], error) GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) - GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) - GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) - GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) - CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error) - CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) - CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) - CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error) - // Return a count of authorizations with status "invalid" that belong to - // a given registration ID and expire in the given time range. - CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) - FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) - PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) - GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) - GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) - CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) - GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) + GetSerialsByAccount(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Serial], error) + GetSerialsByKey(ctx context.Context, in *SPKIHash, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Serial], error) GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) + GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) + IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error) + KeyBlocked(ctx context.Context, in *SPKIHash, opts ...grpc.CallOption) (*Exists, error) + ReplacementOrderExists(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Exists, error) + SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[IncidentSerial], error) + CheckIdentifiersPaused(ctx context.Context, in *PauseRequest, opts ...grpc.CallOption) (*Identifiers, error) + GetPausedIdentifiers(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Identifiers, error) + GetRateLimitOverride(ctx context.Context, in *GetRateLimitOverrideRequest, opts ...grpc.CallOption) (*RateLimitOverrideResponse, error) + GetEnabledRateLimitOverrides(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (grpc.ServerStreamingClient[RateLimitOverrideResponse], error) // Adders - NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) - UpdateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error) - AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error) + AddBlockedKey(ctx context.Context, in *AddBlockedKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - DeactivateRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*emptypb.Empty, error) - NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) - NewOrderAndAuthzs(ctx context.Context, in *NewOrderAndAuthzsRequest, opts ...grpc.CallOption) (*proto.Order, error) - SetOrderProcessing(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - SetOrderError(ctx context.Context, in *SetOrderErrorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + DeactivateAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*emptypb.Empty, error) + DeactivateRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) + FinalizeAuthorization2(ctx context.Context, in *FinalizeAuthorizationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) - GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) + NewOrderAndAuthzs(ctx context.Context, in *NewOrderAndAuthzsRequest, opts ...grpc.CallOption) (*proto.Order, error) + NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) RevokeCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + SetOrderError(ctx context.Context, in *SetOrderErrorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + SetOrderProcessing(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + UpdateRegistrationKey(ctx context.Context, in *UpdateRegistrationKeyRequest, opts ...grpc.CallOption) (*proto.Registration, error) UpdateRevokedCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - NewAuthorizations2(ctx context.Context, in *AddPendingAuthorizationsRequest, opts ...grpc.CallOption) (*Authorization2IDs, error) - FinalizeAuthorization2(ctx context.Context, in *FinalizeAuthorizationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - DeactivateAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*emptypb.Empty, error) - AddBlockedKey(ctx context.Context, in *AddBlockedKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + LeaseCRLShard(ctx context.Context, in *LeaseCRLShardRequest, opts ...grpc.CallOption) (*LeaseCRLShardResponse, error) + UpdateCRLShard(ctx context.Context, in *UpdateCRLShardRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + PauseIdentifiers(ctx context.Context, in *PauseRequest, opts ...grpc.CallOption) (*PauseIdentifiersResponse, error) + UnpauseAccount(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) + AddRateLimitOverride(ctx context.Context, in *AddRateLimitOverrideRequest, opts ...grpc.CallOption) (*AddRateLimitOverrideResponse, error) + DisableRateLimitOverride(ctx context.Context, in *DisableRateLimitOverrideRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + EnableRateLimitOverride(ctx context.Context, in *EnableRateLimitOverrideRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) } type storageAuthorityClient struct { @@ -74,27 +1250,50 @@ func NewStorageAuthorityClient(cc grpc.ClientConnInterface) StorageAuthorityClie return &storageAuthorityClient{cc} } -func (c *storageAuthorityClient) GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) { - out := new(proto.Registration) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetRegistration", in, out, opts...) +func (c *storageAuthorityClient) CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Count) + err := c.cc.Invoke(ctx, StorageAuthority_CountInvalidAuthorizations2_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) { - out := new(proto.Registration) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetRegistrationByKey", in, out, opts...) +func (c *storageAuthorityClient) CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Count) + err := c.cc.Invoke(ctx, StorageAuthority_CountPendingAuthorizations2_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) { - out := new(SerialMetadata) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetSerialMetadata", in, out, opts...) +func (c *storageAuthorityClient) FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Exists) + err := c.cc.Invoke(ctx, StorageAuthority_FQDNSetExists_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityClient) FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Timestamps) + err := c.cc.Invoke(ctx, StorageAuthority_FQDNSetTimestampsForWindow_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityClient) GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Authorization) + err := c.cc.Invoke(ctx, StorageAuthority_GetAuthorization2_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -102,17 +1301,19 @@ func (c *storageAuthorityClient) GetSerialMetadata(ctx context.Context, in *Seri } func (c *storageAuthorityClient) GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(proto.Certificate) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetCertificate", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_GetCertificate_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) { +func (c *storageAuthorityClient) GetLintPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(proto.Certificate) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetPrecertificate", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_GetLintPrecertificate_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -120,170 +1321,264 @@ func (c *storageAuthorityClient) GetPrecertificate(ctx context.Context, in *Seri } func (c *storageAuthorityClient) GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(proto.CertificateStatus) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetCertificateStatus", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_GetCertificateStatus_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error) { - out := new(CountByNames) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountCertificatesByNames", in, out, opts...) +func (c *storageAuthorityClient) GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Order) + err := c.cc.Invoke(ctx, StorageAuthority_GetOrder_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountRegistrationsByIP", in, out, opts...) +func (c *storageAuthorityClient) GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Order) + err := c.cc.Invoke(ctx, StorageAuthority_GetOrderForNames_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountRegistrationsByIPRange", in, out, opts...) +func (c *storageAuthorityClient) GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Registration) + err := c.cc.Invoke(ctx, StorageAuthority_GetRegistration_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountOrders", in, out, opts...) +func (c *storageAuthorityClient) GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Registration) + err := c.cc.Invoke(ctx, StorageAuthority_GetRegistrationByKey_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountFQDNSets", in, out, opts...) +func (c *storageAuthorityClient) GetRevocationStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*RevocationStatus, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RevocationStatus) + err := c.cc.Invoke(ctx, StorageAuthority_GetRevocationStatus_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) { - out := new(Exists) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FQDNSetExists", in, out, opts...) +func (c *storageAuthorityClient) GetRevokedCertsByShard(ctx context.Context, in *GetRevokedCertsByShardRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[proto.CRLEntry], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[0], StorageAuthority_GetRevokedCertsByShard_FullMethodName, cOpts...) if err != nil { return nil, err } - return out, nil + x := &grpc.GenericClientStream[GetRevokedCertsByShardRequest, proto.CRLEntry]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil } -func (c *storageAuthorityClient) PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) { - out := new(Exists) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/PreviousCertificateExists", in, out, opts...) +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthority_GetRevokedCertsByShardClient = grpc.ServerStreamingClient[proto.CRLEntry] + +func (c *storageAuthorityClient) GetSerialMetadata(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*SerialMetadata, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SerialMetadata) + err := c.cc.Invoke(ctx, StorageAuthority_GetSerialMetadata_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) { - out := new(proto.Authorization) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetAuthorization2", in, out, opts...) +func (c *storageAuthorityClient) GetSerialsByAccount(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Serial], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[1], StorageAuthority_GetSerialsByAccount_FullMethodName, cOpts...) if err != nil { return nil, err } - return out, nil + x := &grpc.GenericClientStream[RegistrationID, Serial]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthority_GetSerialsByAccountClient = grpc.ServerStreamingClient[Serial] + +func (c *storageAuthorityClient) GetSerialsByKey(ctx context.Context, in *SPKIHash, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Serial], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[2], StorageAuthority_GetSerialsByKey_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[SPKIHash, Serial]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil } -func (c *storageAuthorityClient) GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthority_GetSerialsByKeyClient = grpc.ServerStreamingClient[Serial] + +func (c *storageAuthorityClient) GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(Authorizations) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetAuthorizations2", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_GetValidAuthorizations2_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) { - out := new(proto.Authorization) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetPendingAuthorization2", in, out, opts...) +func (c *storageAuthorityClient) GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Authorizations) + err := c.cc.Invoke(ctx, StorageAuthority_GetValidOrderAuthorizations2_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountPendingAuthorizations2", in, out, opts...) +func (c *storageAuthorityClient) IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Incidents) + err := c.cc.Invoke(ctx, StorageAuthority_IncidentsForSerial_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { - out := new(Authorizations) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetValidOrderAuthorizations2", in, out, opts...) +func (c *storageAuthorityClient) KeyBlocked(ctx context.Context, in *SPKIHash, opts ...grpc.CallOption) (*Exists, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Exists) + err := c.cc.Invoke(ctx, StorageAuthority_KeyBlocked_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) { - out := new(Count) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/CountInvalidAuthorizations2", in, out, opts...) +func (c *storageAuthorityClient) ReplacementOrderExists(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Exists, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Exists) + err := c.cc.Invoke(ctx, StorageAuthority_ReplacementOrderExists_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) { - out := new(Authorizations) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetValidAuthorizations2", in, out, opts...) +func (c *storageAuthorityClient) SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[IncidentSerial], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[3], StorageAuthority_SerialsForIncident_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[SerialsForIncidentRequest, IncidentSerial]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthority_SerialsForIncidentClient = grpc.ServerStreamingClient[IncidentSerial] + +func (c *storageAuthorityClient) CheckIdentifiersPaused(ctx context.Context, in *PauseRequest, opts ...grpc.CallOption) (*Identifiers, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Identifiers) + err := c.cc.Invoke(ctx, StorageAuthority_CheckIdentifiersPaused_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) { - out := new(Exists) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/KeyBlocked", in, out, opts...) +func (c *storageAuthorityClient) GetPausedIdentifiers(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Identifiers, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Identifiers) + err := c.cc.Invoke(ctx, StorageAuthority_GetPausedIdentifiers_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) { - out := new(proto.Registration) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewRegistration", in, out, opts...) +func (c *storageAuthorityClient) GetRateLimitOverride(ctx context.Context, in *GetRateLimitOverrideRequest, opts ...grpc.CallOption) (*RateLimitOverrideResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RateLimitOverrideResponse) + err := c.cc.Invoke(ctx, StorageAuthority_GetRateLimitOverride_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) UpdateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *storageAuthorityClient) GetEnabledRateLimitOverrides(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (grpc.ServerStreamingClient[RateLimitOverrideResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[4], StorageAuthority_GetEnabledRateLimitOverrides_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[emptypb.Empty, RateLimitOverrideResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthority_GetEnabledRateLimitOverridesClient = grpc.ServerStreamingClient[RateLimitOverrideResponse] + +func (c *storageAuthorityClient) AddBlockedKey(ctx context.Context, in *AddBlockedKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/UpdateRegistration", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_AddBlockedKey_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error) { - out := new(AddCertificateResponse) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddCertificate", in, out, opts...) +func (c *storageAuthorityClient) AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, StorageAuthority_AddCertificate_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -291,8 +1586,9 @@ func (c *storageAuthorityClient) AddCertificate(ctx context.Context, in *AddCert } func (c *storageAuthorityClient) AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddPrecertificate", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_AddPrecertificate_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -300,26 +1596,49 @@ func (c *storageAuthorityClient) AddPrecertificate(ctx context.Context, in *AddC } func (c *storageAuthorityClient) AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddSerial", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_AddSerial_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) DeactivateRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *storageAuthorityClient) DeactivateAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/DeactivateRegistration", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_DeactivateAuthorization2_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) NewOrder(ctx context.Context, in *NewOrderRequest, opts ...grpc.CallOption) (*proto.Order, error) { - out := new(proto.Order) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewOrder", in, out, opts...) +func (c *storageAuthorityClient) DeactivateRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Registration) + err := c.cc.Invoke(ctx, StorageAuthority_DeactivateRegistration_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityClient) FinalizeAuthorization2(ctx context.Context, in *FinalizeAuthorizationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, StorageAuthority_FinalizeAuthorization2_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityClient) FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, StorageAuthority_FinalizeOrder_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -327,17 +1646,29 @@ func (c *storageAuthorityClient) NewOrder(ctx context.Context, in *NewOrderReque } func (c *storageAuthorityClient) NewOrderAndAuthzs(ctx context.Context, in *NewOrderAndAuthzsRequest, opts ...grpc.CallOption) (*proto.Order, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(proto.Order) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewOrderAndAuthzs", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_NewOrderAndAuthzs_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) SetOrderProcessing(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *storageAuthorityClient) NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Registration) + err := c.cc.Invoke(ctx, StorageAuthority_NewRegistration_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityClient) RevokeCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/SetOrderProcessing", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_RevokeCertificate_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -345,89 +1676,109 @@ func (c *storageAuthorityClient) SetOrderProcessing(ctx context.Context, in *Ord } func (c *storageAuthorityClient) SetOrderError(ctx context.Context, in *SetOrderErrorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/SetOrderError", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_SetOrderError_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) FinalizeOrder(ctx context.Context, in *FinalizeOrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *storageAuthorityClient) SetOrderProcessing(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FinalizeOrder", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_SetOrderProcessing_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) { - out := new(proto.Order) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetOrder", in, out, opts...) +func (c *storageAuthorityClient) UpdateRegistrationKey(ctx context.Context, in *UpdateRegistrationKeyRequest, opts ...grpc.CallOption) (*proto.Registration, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(proto.Registration) + err := c.cc.Invoke(ctx, StorageAuthority_UpdateRegistrationKey_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) { - out := new(proto.Order) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/GetOrderForNames", in, out, opts...) +func (c *storageAuthorityClient) UpdateRevokedCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, StorageAuthority_UpdateRevokedCertificate_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) RevokeCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/RevokeCertificate", in, out, opts...) +func (c *storageAuthorityClient) LeaseCRLShard(ctx context.Context, in *LeaseCRLShardRequest, opts ...grpc.CallOption) (*LeaseCRLShardResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(LeaseCRLShardResponse) + err := c.cc.Invoke(ctx, StorageAuthority_LeaseCRLShard_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) UpdateRevokedCertificate(ctx context.Context, in *RevokeCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *storageAuthorityClient) UpdateCRLShard(ctx context.Context, in *UpdateCRLShardRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/UpdateRevokedCertificate", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_UpdateCRLShard_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageAuthorityClient) PauseIdentifiers(ctx context.Context, in *PauseRequest, opts ...grpc.CallOption) (*PauseIdentifiersResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(PauseIdentifiersResponse) + err := c.cc.Invoke(ctx, StorageAuthority_PauseIdentifiers_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) NewAuthorizations2(ctx context.Context, in *AddPendingAuthorizationsRequest, opts ...grpc.CallOption) (*Authorization2IDs, error) { - out := new(Authorization2IDs) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewAuthorizations2", in, out, opts...) +func (c *storageAuthorityClient) UnpauseAccount(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Count) + err := c.cc.Invoke(ctx, StorageAuthority_UnpauseAccount_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) FinalizeAuthorization2(ctx context.Context, in *FinalizeAuthorizationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FinalizeAuthorization2", in, out, opts...) +func (c *storageAuthorityClient) AddRateLimitOverride(ctx context.Context, in *AddRateLimitOverrideRequest, opts ...grpc.CallOption) (*AddRateLimitOverrideResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(AddRateLimitOverrideResponse) + err := c.cc.Invoke(ctx, StorageAuthority_AddRateLimitOverride_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) DeactivateAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *storageAuthorityClient) DisableRateLimitOverride(ctx context.Context, in *DisableRateLimitOverrideRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/DeactivateAuthorization2", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_DisableRateLimitOverride_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } return out, nil } -func (c *storageAuthorityClient) AddBlockedKey(ctx context.Context, in *AddBlockedKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *storageAuthorityClient) EnableRateLimitOverride(ctx context.Context, in *EnableRateLimitOverrideRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/sa.StorageAuthority/AddBlockedKey", in, out, opts...) + err := c.cc.Invoke(ctx, StorageAuthority_EnableRateLimitOverride_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -436,129 +1787,156 @@ func (c *storageAuthorityClient) AddBlockedKey(ctx context.Context, in *AddBlock // StorageAuthorityServer is the server API for StorageAuthority service. // All implementations must embed UnimplementedStorageAuthorityServer -// for forward compatibility +// for forward compatibility. +// +// StorageAuthority provides full read/write access to the database. type StorageAuthorityServer interface { - // Getters + // Getters: this list must be identical to the StorageAuthorityReadOnly rpcs. + CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) + CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) + FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) + FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) + GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) + GetCertificate(context.Context, *Serial) (*proto.Certificate, error) + GetLintPrecertificate(context.Context, *Serial) (*proto.Certificate, error) + GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) + GetOrder(context.Context, *OrderRequest) (*proto.Order, error) + GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) + GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error) + GetRevokedCertsByShard(*GetRevokedCertsByShardRequest, grpc.ServerStreamingServer[proto.CRLEntry]) error GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) - GetCertificate(context.Context, *Serial) (*proto.Certificate, error) - GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error) - GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) - CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error) - CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error) - CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error) - CountOrders(context.Context, *CountOrdersRequest) (*Count, error) - // Return a count of authorizations with status "invalid" that belong to - // a given registration ID and expire in the given time range. - CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error) - FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) - PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error) - GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) - GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error) - GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error) - CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) - GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) - CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) + GetSerialsByAccount(*RegistrationID, grpc.ServerStreamingServer[Serial]) error + GetSerialsByKey(*SPKIHash, grpc.ServerStreamingServer[Serial]) error GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) - KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error) + GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) + IncidentsForSerial(context.Context, *Serial) (*Incidents, error) + KeyBlocked(context.Context, *SPKIHash) (*Exists, error) + ReplacementOrderExists(context.Context, *Serial) (*Exists, error) + SerialsForIncident(*SerialsForIncidentRequest, grpc.ServerStreamingServer[IncidentSerial]) error + CheckIdentifiersPaused(context.Context, *PauseRequest) (*Identifiers, error) + GetPausedIdentifiers(context.Context, *RegistrationID) (*Identifiers, error) + GetRateLimitOverride(context.Context, *GetRateLimitOverrideRequest) (*RateLimitOverrideResponse, error) + GetEnabledRateLimitOverrides(*emptypb.Empty, grpc.ServerStreamingServer[RateLimitOverrideResponse]) error // Adders - NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) - UpdateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error) - AddCertificate(context.Context, *AddCertificateRequest) (*AddCertificateResponse, error) + AddBlockedKey(context.Context, *AddBlockedKeyRequest) (*emptypb.Empty, error) + AddCertificate(context.Context, *AddCertificateRequest) (*emptypb.Empty, error) AddPrecertificate(context.Context, *AddCertificateRequest) (*emptypb.Empty, error) AddSerial(context.Context, *AddSerialRequest) (*emptypb.Empty, error) - DeactivateRegistration(context.Context, *RegistrationID) (*emptypb.Empty, error) - NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error) - NewOrderAndAuthzs(context.Context, *NewOrderAndAuthzsRequest) (*proto.Order, error) - SetOrderProcessing(context.Context, *OrderRequest) (*emptypb.Empty, error) - SetOrderError(context.Context, *SetOrderErrorRequest) (*emptypb.Empty, error) + DeactivateAuthorization2(context.Context, *AuthorizationID2) (*emptypb.Empty, error) + DeactivateRegistration(context.Context, *RegistrationID) (*proto.Registration, error) + FinalizeAuthorization2(context.Context, *FinalizeAuthorizationRequest) (*emptypb.Empty, error) FinalizeOrder(context.Context, *FinalizeOrderRequest) (*emptypb.Empty, error) - GetOrder(context.Context, *OrderRequest) (*proto.Order, error) - GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) + NewOrderAndAuthzs(context.Context, *NewOrderAndAuthzsRequest) (*proto.Order, error) + NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) RevokeCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) + SetOrderError(context.Context, *SetOrderErrorRequest) (*emptypb.Empty, error) + SetOrderProcessing(context.Context, *OrderRequest) (*emptypb.Empty, error) + UpdateRegistrationKey(context.Context, *UpdateRegistrationKeyRequest) (*proto.Registration, error) UpdateRevokedCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) - NewAuthorizations2(context.Context, *AddPendingAuthorizationsRequest) (*Authorization2IDs, error) - FinalizeAuthorization2(context.Context, *FinalizeAuthorizationRequest) (*emptypb.Empty, error) - DeactivateAuthorization2(context.Context, *AuthorizationID2) (*emptypb.Empty, error) - AddBlockedKey(context.Context, *AddBlockedKeyRequest) (*emptypb.Empty, error) + LeaseCRLShard(context.Context, *LeaseCRLShardRequest) (*LeaseCRLShardResponse, error) + UpdateCRLShard(context.Context, *UpdateCRLShardRequest) (*emptypb.Empty, error) + PauseIdentifiers(context.Context, *PauseRequest) (*PauseIdentifiersResponse, error) + UnpauseAccount(context.Context, *RegistrationID) (*Count, error) + AddRateLimitOverride(context.Context, *AddRateLimitOverrideRequest) (*AddRateLimitOverrideResponse, error) + DisableRateLimitOverride(context.Context, *DisableRateLimitOverrideRequest) (*emptypb.Empty, error) + EnableRateLimitOverride(context.Context, *EnableRateLimitOverrideRequest) (*emptypb.Empty, error) mustEmbedUnimplementedStorageAuthorityServer() } -// UnimplementedStorageAuthorityServer must be embedded to have forward compatible implementations. -type UnimplementedStorageAuthorityServer struct { -} +// UnimplementedStorageAuthorityServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedStorageAuthorityServer struct{} -func (UnimplementedStorageAuthorityServer) GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetRegistration not implemented") +func (UnimplementedStorageAuthorityServer) CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) { + return nil, status.Errorf(codes.Unimplemented, "method CountInvalidAuthorizations2 not implemented") } -func (UnimplementedStorageAuthorityServer) GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetRegistrationByKey not implemented") +func (UnimplementedStorageAuthorityServer) CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) { + return nil, status.Errorf(codes.Unimplemented, "method CountPendingAuthorizations2 not implemented") } -func (UnimplementedStorageAuthorityServer) GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSerialMetadata not implemented") +func (UnimplementedStorageAuthorityServer) FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) { + return nil, status.Errorf(codes.Unimplemented, "method FQDNSetExists not implemented") +} +func (UnimplementedStorageAuthorityServer) FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) { + return nil, status.Errorf(codes.Unimplemented, "method FQDNSetTimestampsForWindow not implemented") +} +func (UnimplementedStorageAuthorityServer) GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAuthorization2 not implemented") } func (UnimplementedStorageAuthorityServer) GetCertificate(context.Context, *Serial) (*proto.Certificate, error) { return nil, status.Errorf(codes.Unimplemented, "method GetCertificate not implemented") } -func (UnimplementedStorageAuthorityServer) GetPrecertificate(context.Context, *Serial) (*proto.Certificate, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPrecertificate not implemented") +func (UnimplementedStorageAuthorityServer) GetLintPrecertificate(context.Context, *Serial) (*proto.Certificate, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetLintPrecertificate not implemented") } func (UnimplementedStorageAuthorityServer) GetCertificateStatus(context.Context, *Serial) (*proto.CertificateStatus, error) { return nil, status.Errorf(codes.Unimplemented, "method GetCertificateStatus not implemented") } -func (UnimplementedStorageAuthorityServer) CountCertificatesByNames(context.Context, *CountCertificatesByNamesRequest) (*CountByNames, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountCertificatesByNames not implemented") -} -func (UnimplementedStorageAuthorityServer) CountRegistrationsByIP(context.Context, *CountRegistrationsByIPRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIP not implemented") +func (UnimplementedStorageAuthorityServer) GetOrder(context.Context, *OrderRequest) (*proto.Order, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetOrder not implemented") } -func (UnimplementedStorageAuthorityServer) CountRegistrationsByIPRange(context.Context, *CountRegistrationsByIPRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountRegistrationsByIPRange not implemented") +func (UnimplementedStorageAuthorityServer) GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetOrderForNames not implemented") } -func (UnimplementedStorageAuthorityServer) CountOrders(context.Context, *CountOrdersRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountOrders not implemented") +func (UnimplementedStorageAuthorityServer) GetRegistration(context.Context, *RegistrationID) (*proto.Registration, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRegistration not implemented") } -func (UnimplementedStorageAuthorityServer) CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountFQDNSets not implemented") +func (UnimplementedStorageAuthorityServer) GetRegistrationByKey(context.Context, *JSONWebKey) (*proto.Registration, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRegistrationByKey not implemented") } -func (UnimplementedStorageAuthorityServer) FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) { - return nil, status.Errorf(codes.Unimplemented, "method FQDNSetExists not implemented") +func (UnimplementedStorageAuthorityServer) GetRevocationStatus(context.Context, *Serial) (*RevocationStatus, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRevocationStatus not implemented") } -func (UnimplementedStorageAuthorityServer) PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error) { - return nil, status.Errorf(codes.Unimplemented, "method PreviousCertificateExists not implemented") +func (UnimplementedStorageAuthorityServer) GetRevokedCertsByShard(*GetRevokedCertsByShardRequest, grpc.ServerStreamingServer[proto.CRLEntry]) error { + return status.Errorf(codes.Unimplemented, "method GetRevokedCertsByShard not implemented") } -func (UnimplementedStorageAuthorityServer) GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAuthorization2 not implemented") +func (UnimplementedStorageAuthorityServer) GetSerialMetadata(context.Context, *Serial) (*SerialMetadata, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSerialMetadata not implemented") } -func (UnimplementedStorageAuthorityServer) GetAuthorizations2(context.Context, *GetAuthorizationsRequest) (*Authorizations, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAuthorizations2 not implemented") +func (UnimplementedStorageAuthorityServer) GetSerialsByAccount(*RegistrationID, grpc.ServerStreamingServer[Serial]) error { + return status.Errorf(codes.Unimplemented, "method GetSerialsByAccount not implemented") } -func (UnimplementedStorageAuthorityServer) GetPendingAuthorization2(context.Context, *GetPendingAuthorizationRequest) (*proto.Authorization, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPendingAuthorization2 not implemented") +func (UnimplementedStorageAuthorityServer) GetSerialsByKey(*SPKIHash, grpc.ServerStreamingServer[Serial]) error { + return status.Errorf(codes.Unimplemented, "method GetSerialsByKey not implemented") } -func (UnimplementedStorageAuthorityServer) CountPendingAuthorizations2(context.Context, *RegistrationID) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountPendingAuthorizations2 not implemented") +func (UnimplementedStorageAuthorityServer) GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetValidAuthorizations2 not implemented") } func (UnimplementedStorageAuthorityServer) GetValidOrderAuthorizations2(context.Context, *GetValidOrderAuthorizationsRequest) (*Authorizations, error) { return nil, status.Errorf(codes.Unimplemented, "method GetValidOrderAuthorizations2 not implemented") } -func (UnimplementedStorageAuthorityServer) CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) { - return nil, status.Errorf(codes.Unimplemented, "method CountInvalidAuthorizations2 not implemented") -} -func (UnimplementedStorageAuthorityServer) GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetValidAuthorizations2 not implemented") +func (UnimplementedStorageAuthorityServer) IncidentsForSerial(context.Context, *Serial) (*Incidents, error) { + return nil, status.Errorf(codes.Unimplemented, "method IncidentsForSerial not implemented") } -func (UnimplementedStorageAuthorityServer) KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error) { +func (UnimplementedStorageAuthorityServer) KeyBlocked(context.Context, *SPKIHash) (*Exists, error) { return nil, status.Errorf(codes.Unimplemented, "method KeyBlocked not implemented") } -func (UnimplementedStorageAuthorityServer) NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) { - return nil, status.Errorf(codes.Unimplemented, "method NewRegistration not implemented") +func (UnimplementedStorageAuthorityServer) ReplacementOrderExists(context.Context, *Serial) (*Exists, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReplacementOrderExists not implemented") +} +func (UnimplementedStorageAuthorityServer) SerialsForIncident(*SerialsForIncidentRequest, grpc.ServerStreamingServer[IncidentSerial]) error { + return status.Errorf(codes.Unimplemented, "method SerialsForIncident not implemented") +} +func (UnimplementedStorageAuthorityServer) CheckIdentifiersPaused(context.Context, *PauseRequest) (*Identifiers, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckIdentifiersPaused not implemented") +} +func (UnimplementedStorageAuthorityServer) GetPausedIdentifiers(context.Context, *RegistrationID) (*Identifiers, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPausedIdentifiers not implemented") } -func (UnimplementedStorageAuthorityServer) UpdateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateRegistration not implemented") +func (UnimplementedStorageAuthorityServer) GetRateLimitOverride(context.Context, *GetRateLimitOverrideRequest) (*RateLimitOverrideResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRateLimitOverride not implemented") +} +func (UnimplementedStorageAuthorityServer) GetEnabledRateLimitOverrides(*emptypb.Empty, grpc.ServerStreamingServer[RateLimitOverrideResponse]) error { + return status.Errorf(codes.Unimplemented, "method GetEnabledRateLimitOverrides not implemented") +} +func (UnimplementedStorageAuthorityServer) AddBlockedKey(context.Context, *AddBlockedKeyRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddBlockedKey not implemented") } -func (UnimplementedStorageAuthorityServer) AddCertificate(context.Context, *AddCertificateRequest) (*AddCertificateResponse, error) { +func (UnimplementedStorageAuthorityServer) AddCertificate(context.Context, *AddCertificateRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method AddCertificate not implemented") } func (UnimplementedStorageAuthorityServer) AddPrecertificate(context.Context, *AddCertificateRequest) (*emptypb.Empty, error) { @@ -567,49 +1945,62 @@ func (UnimplementedStorageAuthorityServer) AddPrecertificate(context.Context, *A func (UnimplementedStorageAuthorityServer) AddSerial(context.Context, *AddSerialRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method AddSerial not implemented") } -func (UnimplementedStorageAuthorityServer) DeactivateRegistration(context.Context, *RegistrationID) (*emptypb.Empty, error) { +func (UnimplementedStorageAuthorityServer) DeactivateAuthorization2(context.Context, *AuthorizationID2) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeactivateAuthorization2 not implemented") +} +func (UnimplementedStorageAuthorityServer) DeactivateRegistration(context.Context, *RegistrationID) (*proto.Registration, error) { return nil, status.Errorf(codes.Unimplemented, "method DeactivateRegistration not implemented") } -func (UnimplementedStorageAuthorityServer) NewOrder(context.Context, *NewOrderRequest) (*proto.Order, error) { - return nil, status.Errorf(codes.Unimplemented, "method NewOrder not implemented") +func (UnimplementedStorageAuthorityServer) FinalizeAuthorization2(context.Context, *FinalizeAuthorizationRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizeAuthorization2 not implemented") +} +func (UnimplementedStorageAuthorityServer) FinalizeOrder(context.Context, *FinalizeOrderRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizeOrder not implemented") } func (UnimplementedStorageAuthorityServer) NewOrderAndAuthzs(context.Context, *NewOrderAndAuthzsRequest) (*proto.Order, error) { return nil, status.Errorf(codes.Unimplemented, "method NewOrderAndAuthzs not implemented") } -func (UnimplementedStorageAuthorityServer) SetOrderProcessing(context.Context, *OrderRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetOrderProcessing not implemented") +func (UnimplementedStorageAuthorityServer) NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) { + return nil, status.Errorf(codes.Unimplemented, "method NewRegistration not implemented") +} +func (UnimplementedStorageAuthorityServer) RevokeCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method RevokeCertificate not implemented") } func (UnimplementedStorageAuthorityServer) SetOrderError(context.Context, *SetOrderErrorRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method SetOrderError not implemented") } -func (UnimplementedStorageAuthorityServer) FinalizeOrder(context.Context, *FinalizeOrderRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method FinalizeOrder not implemented") -} -func (UnimplementedStorageAuthorityServer) GetOrder(context.Context, *OrderRequest) (*proto.Order, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetOrder not implemented") -} -func (UnimplementedStorageAuthorityServer) GetOrderForNames(context.Context, *GetOrderForNamesRequest) (*proto.Order, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetOrderForNames not implemented") +func (UnimplementedStorageAuthorityServer) SetOrderProcessing(context.Context, *OrderRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetOrderProcessing not implemented") } -func (UnimplementedStorageAuthorityServer) RevokeCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method RevokeCertificate not implemented") +func (UnimplementedStorageAuthorityServer) UpdateRegistrationKey(context.Context, *UpdateRegistrationKeyRequest) (*proto.Registration, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateRegistrationKey not implemented") } func (UnimplementedStorageAuthorityServer) UpdateRevokedCertificate(context.Context, *RevokeCertificateRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateRevokedCertificate not implemented") } -func (UnimplementedStorageAuthorityServer) NewAuthorizations2(context.Context, *AddPendingAuthorizationsRequest) (*Authorization2IDs, error) { - return nil, status.Errorf(codes.Unimplemented, "method NewAuthorizations2 not implemented") +func (UnimplementedStorageAuthorityServer) LeaseCRLShard(context.Context, *LeaseCRLShardRequest) (*LeaseCRLShardResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeaseCRLShard not implemented") } -func (UnimplementedStorageAuthorityServer) FinalizeAuthorization2(context.Context, *FinalizeAuthorizationRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method FinalizeAuthorization2 not implemented") +func (UnimplementedStorageAuthorityServer) UpdateCRLShard(context.Context, *UpdateCRLShardRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateCRLShard not implemented") } -func (UnimplementedStorageAuthorityServer) DeactivateAuthorization2(context.Context, *AuthorizationID2) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeactivateAuthorization2 not implemented") +func (UnimplementedStorageAuthorityServer) PauseIdentifiers(context.Context, *PauseRequest) (*PauseIdentifiersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PauseIdentifiers not implemented") } -func (UnimplementedStorageAuthorityServer) AddBlockedKey(context.Context, *AddBlockedKeyRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddBlockedKey not implemented") +func (UnimplementedStorageAuthorityServer) UnpauseAccount(context.Context, *RegistrationID) (*Count, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnpauseAccount not implemented") +} +func (UnimplementedStorageAuthorityServer) AddRateLimitOverride(context.Context, *AddRateLimitOverrideRequest) (*AddRateLimitOverrideResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddRateLimitOverride not implemented") +} +func (UnimplementedStorageAuthorityServer) DisableRateLimitOverride(context.Context, *DisableRateLimitOverrideRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DisableRateLimitOverride not implemented") +} +func (UnimplementedStorageAuthorityServer) EnableRateLimitOverride(context.Context, *EnableRateLimitOverrideRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method EnableRateLimitOverride not implemented") } func (UnimplementedStorageAuthorityServer) mustEmbedUnimplementedStorageAuthorityServer() {} +func (UnimplementedStorageAuthorityServer) testEmbeddedByValue() {} // UnsafeStorageAuthorityServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to StorageAuthorityServer will @@ -619,59 +2010,102 @@ type UnsafeStorageAuthorityServer interface { } func RegisterStorageAuthorityServer(s grpc.ServiceRegistrar, srv StorageAuthorityServer) { + // If the following call pancis, it indicates UnimplementedStorageAuthorityServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&StorageAuthority_ServiceDesc, srv) } -func _StorageAuthority_GetRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _StorageAuthority_CountInvalidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CountInvalidAuthorizationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityServer).CountInvalidAuthorizations2(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthority_CountInvalidAuthorizations2_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityServer).CountInvalidAuthorizations2(ctx, req.(*CountInvalidAuthorizationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthority_CountPendingAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RegistrationID) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).GetRegistration(ctx, in) + return srv.(StorageAuthorityServer).CountPendingAuthorizations2(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetRegistration", + FullMethod: StorageAuthority_CountPendingAuthorizations2_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetRegistration(ctx, req.(*RegistrationID)) + return srv.(StorageAuthorityServer).CountPendingAuthorizations2(ctx, req.(*RegistrationID)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_GetRegistrationByKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(JSONWebKey) +func _StorageAuthority_FQDNSetExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FQDNSetExistsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).GetRegistrationByKey(ctx, in) + return srv.(StorageAuthorityServer).FQDNSetExists(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetRegistrationByKey", + FullMethod: StorageAuthority_FQDNSetExists_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetRegistrationByKey(ctx, req.(*JSONWebKey)) + return srv.(StorageAuthorityServer).FQDNSetExists(ctx, req.(*FQDNSetExistsRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_GetSerialMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Serial) +func _StorageAuthority_FQDNSetTimestampsForWindow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CountFQDNSetsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).GetSerialMetadata(ctx, in) + return srv.(StorageAuthorityServer).FQDNSetTimestampsForWindow(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetSerialMetadata", + FullMethod: StorageAuthority_FQDNSetTimestampsForWindow_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetSerialMetadata(ctx, req.(*Serial)) + return srv.(StorageAuthorityServer).FQDNSetTimestampsForWindow(ctx, req.(*CountFQDNSetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthority_GetAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthorizationID2) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityServer).GetAuthorization2(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthority_GetAuthorization2_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityServer).GetAuthorization2(ctx, req.(*AuthorizationID2)) } return interceptor(ctx, in, info, handler) } @@ -686,7 +2120,7 @@ func _StorageAuthority_GetCertificate_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetCertificate", + FullMethod: StorageAuthority_GetCertificate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageAuthorityServer).GetCertificate(ctx, req.(*Serial)) @@ -694,20 +2128,20 @@ func _StorageAuthority_GetCertificate_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } -func _StorageAuthority_GetPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _StorageAuthority_GetLintPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Serial) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).GetPrecertificate(ctx, in) + return srv.(StorageAuthorityServer).GetLintPrecertificate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetPrecertificate", + FullMethod: StorageAuthority_GetLintPrecertificate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetPrecertificate(ctx, req.(*Serial)) + return srv.(StorageAuthorityServer).GetLintPrecertificate(ctx, req.(*Serial)) } return interceptor(ctx, in, info, handler) } @@ -722,7 +2156,7 @@ func _StorageAuthority_GetCertificateStatus_Handler(srv interface{}, ctx context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetCertificateStatus", + FullMethod: StorageAuthority_GetCertificateStatus_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageAuthorityServer).GetCertificateStatus(ctx, req.(*Serial)) @@ -730,614 +2164,705 @@ func _StorageAuthority_GetCertificateStatus_Handler(srv interface{}, ctx context return interceptor(ctx, in, info, handler) } -func _StorageAuthority_CountCertificatesByNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountCertificatesByNamesRequest) +func _StorageAuthority_GetOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(OrderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityServer).GetOrder(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthority_GetOrder_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityServer).GetOrder(ctx, req.(*OrderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthority_GetOrderForNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOrderForNamesRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).CountCertificatesByNames(ctx, in) + return srv.(StorageAuthorityServer).GetOrderForNames(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/CountCertificatesByNames", + FullMethod: StorageAuthority_GetOrderForNames_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountCertificatesByNames(ctx, req.(*CountCertificatesByNamesRequest)) + return srv.(StorageAuthorityServer).GetOrderForNames(ctx, req.(*GetOrderForNamesRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_CountRegistrationsByIP_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountRegistrationsByIPRequest) +func _StorageAuthority_GetRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegistrationID) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).CountRegistrationsByIP(ctx, in) + return srv.(StorageAuthorityServer).GetRegistration(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/CountRegistrationsByIP", + FullMethod: StorageAuthority_GetRegistration_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountRegistrationsByIP(ctx, req.(*CountRegistrationsByIPRequest)) + return srv.(StorageAuthorityServer).GetRegistration(ctx, req.(*RegistrationID)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_CountRegistrationsByIPRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountRegistrationsByIPRequest) +func _StorageAuthority_GetRegistrationByKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(JSONWebKey) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).CountRegistrationsByIPRange(ctx, in) + return srv.(StorageAuthorityServer).GetRegistrationByKey(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/CountRegistrationsByIPRange", + FullMethod: StorageAuthority_GetRegistrationByKey_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountRegistrationsByIPRange(ctx, req.(*CountRegistrationsByIPRequest)) + return srv.(StorageAuthorityServer).GetRegistrationByKey(ctx, req.(*JSONWebKey)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_CountOrders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountOrdersRequest) +func _StorageAuthority_GetRevocationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Serial) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).CountOrders(ctx, in) + return srv.(StorageAuthorityServer).GetRevocationStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/CountOrders", + FullMethod: StorageAuthority_GetRevocationStatus_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountOrders(ctx, req.(*CountOrdersRequest)) + return srv.(StorageAuthorityServer).GetRevocationStatus(ctx, req.(*Serial)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_CountFQDNSets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountFQDNSetsRequest) +func _StorageAuthority_GetRevokedCertsByShard_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetRevokedCertsByShardRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageAuthorityServer).GetRevokedCertsByShard(m, &grpc.GenericServerStream[GetRevokedCertsByShardRequest, proto.CRLEntry]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthority_GetRevokedCertsByShardServer = grpc.ServerStreamingServer[proto.CRLEntry] + +func _StorageAuthority_GetSerialMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Serial) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).CountFQDNSets(ctx, in) + return srv.(StorageAuthorityServer).GetSerialMetadata(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/CountFQDNSets", + FullMethod: StorageAuthority_GetSerialMetadata_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountFQDNSets(ctx, req.(*CountFQDNSetsRequest)) + return srv.(StorageAuthorityServer).GetSerialMetadata(ctx, req.(*Serial)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_FQDNSetExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FQDNSetExistsRequest) +func _StorageAuthority_GetSerialsByAccount_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(RegistrationID) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageAuthorityServer).GetSerialsByAccount(m, &grpc.GenericServerStream[RegistrationID, Serial]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthority_GetSerialsByAccountServer = grpc.ServerStreamingServer[Serial] + +func _StorageAuthority_GetSerialsByKey_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SPKIHash) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageAuthorityServer).GetSerialsByKey(m, &grpc.GenericServerStream[SPKIHash, Serial]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthority_GetSerialsByKeyServer = grpc.ServerStreamingServer[Serial] + +func _StorageAuthority_GetValidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetValidAuthorizationsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).FQDNSetExists(ctx, in) + return srv.(StorageAuthorityServer).GetValidAuthorizations2(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/FQDNSetExists", + FullMethod: StorageAuthority_GetValidAuthorizations2_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).FQDNSetExists(ctx, req.(*FQDNSetExistsRequest)) + return srv.(StorageAuthorityServer).GetValidAuthorizations2(ctx, req.(*GetValidAuthorizationsRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_PreviousCertificateExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PreviousCertificateExistsRequest) +func _StorageAuthority_GetValidOrderAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetValidOrderAuthorizationsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).PreviousCertificateExists(ctx, in) + return srv.(StorageAuthorityServer).GetValidOrderAuthorizations2(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/PreviousCertificateExists", + FullMethod: StorageAuthority_GetValidOrderAuthorizations2_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).PreviousCertificateExists(ctx, req.(*PreviousCertificateExistsRequest)) + return srv.(StorageAuthorityServer).GetValidOrderAuthorizations2(ctx, req.(*GetValidOrderAuthorizationsRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_GetAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthorizationID2) +func _StorageAuthority_IncidentsForSerial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Serial) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).GetAuthorization2(ctx, in) + return srv.(StorageAuthorityServer).IncidentsForSerial(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageAuthority_IncidentsForSerial_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityServer).IncidentsForSerial(ctx, req.(*Serial)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageAuthority_KeyBlocked_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SPKIHash) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityServer).KeyBlocked(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetAuthorization2", + FullMethod: StorageAuthority_KeyBlocked_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetAuthorization2(ctx, req.(*AuthorizationID2)) + return srv.(StorageAuthorityServer).KeyBlocked(ctx, req.(*SPKIHash)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_GetAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAuthorizationsRequest) +func _StorageAuthority_ReplacementOrderExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Serial) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).GetAuthorizations2(ctx, in) + return srv.(StorageAuthorityServer).ReplacementOrderExists(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetAuthorizations2", + FullMethod: StorageAuthority_ReplacementOrderExists_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetAuthorizations2(ctx, req.(*GetAuthorizationsRequest)) + return srv.(StorageAuthorityServer).ReplacementOrderExists(ctx, req.(*Serial)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_GetPendingAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPendingAuthorizationRequest) +func _StorageAuthority_SerialsForIncident_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SerialsForIncidentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageAuthorityServer).SerialsForIncident(m, &grpc.GenericServerStream[SerialsForIncidentRequest, IncidentSerial]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthority_SerialsForIncidentServer = grpc.ServerStreamingServer[IncidentSerial] + +func _StorageAuthority_CheckIdentifiersPaused_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PauseRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).GetPendingAuthorization2(ctx, in) + return srv.(StorageAuthorityServer).CheckIdentifiersPaused(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetPendingAuthorization2", + FullMethod: StorageAuthority_CheckIdentifiersPaused_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetPendingAuthorization2(ctx, req.(*GetPendingAuthorizationRequest)) + return srv.(StorageAuthorityServer).CheckIdentifiersPaused(ctx, req.(*PauseRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_CountPendingAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _StorageAuthority_GetPausedIdentifiers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RegistrationID) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).CountPendingAuthorizations2(ctx, in) + return srv.(StorageAuthorityServer).GetPausedIdentifiers(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/CountPendingAuthorizations2", + FullMethod: StorageAuthority_GetPausedIdentifiers_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountPendingAuthorizations2(ctx, req.(*RegistrationID)) + return srv.(StorageAuthorityServer).GetPausedIdentifiers(ctx, req.(*RegistrationID)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_GetValidOrderAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetValidOrderAuthorizationsRequest) +func _StorageAuthority_GetRateLimitOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRateLimitOverrideRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).GetValidOrderAuthorizations2(ctx, in) + return srv.(StorageAuthorityServer).GetRateLimitOverride(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetValidOrderAuthorizations2", + FullMethod: StorageAuthority_GetRateLimitOverride_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetValidOrderAuthorizations2(ctx, req.(*GetValidOrderAuthorizationsRequest)) + return srv.(StorageAuthorityServer).GetRateLimitOverride(ctx, req.(*GetRateLimitOverrideRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_CountInvalidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CountInvalidAuthorizationsRequest) +func _StorageAuthority_GetEnabledRateLimitOverrides_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(emptypb.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageAuthorityServer).GetEnabledRateLimitOverrides(m, &grpc.GenericServerStream[emptypb.Empty, RateLimitOverrideResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type StorageAuthority_GetEnabledRateLimitOverridesServer = grpc.ServerStreamingServer[RateLimitOverrideResponse] + +func _StorageAuthority_AddBlockedKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddBlockedKeyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).CountInvalidAuthorizations2(ctx, in) + return srv.(StorageAuthorityServer).AddBlockedKey(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/CountInvalidAuthorizations2", + FullMethod: StorageAuthority_AddBlockedKey_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).CountInvalidAuthorizations2(ctx, req.(*CountInvalidAuthorizationsRequest)) + return srv.(StorageAuthorityServer).AddBlockedKey(ctx, req.(*AddBlockedKeyRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_GetValidAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetValidAuthorizationsRequest) +func _StorageAuthority_AddCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddCertificateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).GetValidAuthorizations2(ctx, in) + return srv.(StorageAuthorityServer).AddCertificate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetValidAuthorizations2", + FullMethod: StorageAuthority_AddCertificate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetValidAuthorizations2(ctx, req.(*GetValidAuthorizationsRequest)) + return srv.(StorageAuthorityServer).AddCertificate(ctx, req.(*AddCertificateRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_KeyBlocked_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(KeyBlockedRequest) +func _StorageAuthority_AddPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddCertificateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).KeyBlocked(ctx, in) + return srv.(StorageAuthorityServer).AddPrecertificate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/KeyBlocked", + FullMethod: StorageAuthority_AddPrecertificate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).KeyBlocked(ctx, req.(*KeyBlockedRequest)) + return srv.(StorageAuthorityServer).AddPrecertificate(ctx, req.(*AddCertificateRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_NewRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(proto.Registration) +func _StorageAuthority_AddSerial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddSerialRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).NewRegistration(ctx, in) + return srv.(StorageAuthorityServer).AddSerial(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/NewRegistration", + FullMethod: StorageAuthority_AddSerial_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).NewRegistration(ctx, req.(*proto.Registration)) + return srv.(StorageAuthorityServer).AddSerial(ctx, req.(*AddSerialRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_UpdateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(proto.Registration) +func _StorageAuthority_DeactivateAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AuthorizationID2) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).UpdateRegistration(ctx, in) + return srv.(StorageAuthorityServer).DeactivateAuthorization2(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/UpdateRegistration", + FullMethod: StorageAuthority_DeactivateAuthorization2_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).UpdateRegistration(ctx, req.(*proto.Registration)) + return srv.(StorageAuthorityServer).DeactivateAuthorization2(ctx, req.(*AuthorizationID2)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_AddCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddCertificateRequest) +func _StorageAuthority_DeactivateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegistrationID) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).AddCertificate(ctx, in) + return srv.(StorageAuthorityServer).DeactivateRegistration(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/AddCertificate", + FullMethod: StorageAuthority_DeactivateRegistration_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).AddCertificate(ctx, req.(*AddCertificateRequest)) + return srv.(StorageAuthorityServer).DeactivateRegistration(ctx, req.(*RegistrationID)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_AddPrecertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddCertificateRequest) +func _StorageAuthority_FinalizeAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FinalizeAuthorizationRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).AddPrecertificate(ctx, in) + return srv.(StorageAuthorityServer).FinalizeAuthorization2(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/AddPrecertificate", + FullMethod: StorageAuthority_FinalizeAuthorization2_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).AddPrecertificate(ctx, req.(*AddCertificateRequest)) + return srv.(StorageAuthorityServer).FinalizeAuthorization2(ctx, req.(*FinalizeAuthorizationRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_AddSerial_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddSerialRequest) +func _StorageAuthority_FinalizeOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FinalizeOrderRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).AddSerial(ctx, in) + return srv.(StorageAuthorityServer).FinalizeOrder(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/AddSerial", + FullMethod: StorageAuthority_FinalizeOrder_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).AddSerial(ctx, req.(*AddSerialRequest)) + return srv.(StorageAuthorityServer).FinalizeOrder(ctx, req.(*FinalizeOrderRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_DeactivateRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RegistrationID) +func _StorageAuthority_NewOrderAndAuthzs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NewOrderAndAuthzsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).DeactivateRegistration(ctx, in) + return srv.(StorageAuthorityServer).NewOrderAndAuthzs(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/DeactivateRegistration", + FullMethod: StorageAuthority_NewOrderAndAuthzs_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).DeactivateRegistration(ctx, req.(*RegistrationID)) + return srv.(StorageAuthorityServer).NewOrderAndAuthzs(ctx, req.(*NewOrderAndAuthzsRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_NewOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NewOrderRequest) +func _StorageAuthority_NewRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(proto.Registration) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).NewOrder(ctx, in) + return srv.(StorageAuthorityServer).NewRegistration(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/NewOrder", + FullMethod: StorageAuthority_NewRegistration_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).NewOrder(ctx, req.(*NewOrderRequest)) + return srv.(StorageAuthorityServer).NewRegistration(ctx, req.(*proto.Registration)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_NewOrderAndAuthzs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NewOrderAndAuthzsRequest) +func _StorageAuthority_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RevokeCertificateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).NewOrderAndAuthzs(ctx, in) + return srv.(StorageAuthorityServer).RevokeCertificate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/NewOrderAndAuthzs", + FullMethod: StorageAuthority_RevokeCertificate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).NewOrderAndAuthzs(ctx, req.(*NewOrderAndAuthzsRequest)) + return srv.(StorageAuthorityServer).RevokeCertificate(ctx, req.(*RevokeCertificateRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_SetOrderProcessing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(OrderRequest) +func _StorageAuthority_SetOrderError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetOrderErrorRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).SetOrderProcessing(ctx, in) + return srv.(StorageAuthorityServer).SetOrderError(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/SetOrderProcessing", + FullMethod: StorageAuthority_SetOrderError_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).SetOrderProcessing(ctx, req.(*OrderRequest)) + return srv.(StorageAuthorityServer).SetOrderError(ctx, req.(*SetOrderErrorRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_SetOrderError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetOrderErrorRequest) +func _StorageAuthority_SetOrderProcessing_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(OrderRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).SetOrderError(ctx, in) + return srv.(StorageAuthorityServer).SetOrderProcessing(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/SetOrderError", + FullMethod: StorageAuthority_SetOrderProcessing_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).SetOrderError(ctx, req.(*SetOrderErrorRequest)) + return srv.(StorageAuthorityServer).SetOrderProcessing(ctx, req.(*OrderRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_FinalizeOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FinalizeOrderRequest) +func _StorageAuthority_UpdateRegistrationKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateRegistrationKeyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).FinalizeOrder(ctx, in) + return srv.(StorageAuthorityServer).UpdateRegistrationKey(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/FinalizeOrder", + FullMethod: StorageAuthority_UpdateRegistrationKey_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).FinalizeOrder(ctx, req.(*FinalizeOrderRequest)) + return srv.(StorageAuthorityServer).UpdateRegistrationKey(ctx, req.(*UpdateRegistrationKeyRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_GetOrder_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(OrderRequest) +func _StorageAuthority_UpdateRevokedCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RevokeCertificateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).GetOrder(ctx, in) + return srv.(StorageAuthorityServer).UpdateRevokedCertificate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetOrder", + FullMethod: StorageAuthority_UpdateRevokedCertificate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetOrder(ctx, req.(*OrderRequest)) + return srv.(StorageAuthorityServer).UpdateRevokedCertificate(ctx, req.(*RevokeCertificateRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_GetOrderForNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetOrderForNamesRequest) +func _StorageAuthority_LeaseCRLShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseCRLShardRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).GetOrderForNames(ctx, in) + return srv.(StorageAuthorityServer).LeaseCRLShard(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/GetOrderForNames", + FullMethod: StorageAuthority_LeaseCRLShard_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).GetOrderForNames(ctx, req.(*GetOrderForNamesRequest)) + return srv.(StorageAuthorityServer).LeaseCRLShard(ctx, req.(*LeaseCRLShardRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RevokeCertificateRequest) +func _StorageAuthority_UpdateCRLShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateCRLShardRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).RevokeCertificate(ctx, in) + return srv.(StorageAuthorityServer).UpdateCRLShard(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/RevokeCertificate", + FullMethod: StorageAuthority_UpdateCRLShard_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).RevokeCertificate(ctx, req.(*RevokeCertificateRequest)) + return srv.(StorageAuthorityServer).UpdateCRLShard(ctx, req.(*UpdateCRLShardRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_UpdateRevokedCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RevokeCertificateRequest) +func _StorageAuthority_PauseIdentifiers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PauseRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).UpdateRevokedCertificate(ctx, in) + return srv.(StorageAuthorityServer).PauseIdentifiers(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/UpdateRevokedCertificate", + FullMethod: StorageAuthority_PauseIdentifiers_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).UpdateRevokedCertificate(ctx, req.(*RevokeCertificateRequest)) + return srv.(StorageAuthorityServer).PauseIdentifiers(ctx, req.(*PauseRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_NewAuthorizations2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddPendingAuthorizationsRequest) +func _StorageAuthority_UnpauseAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegistrationID) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).NewAuthorizations2(ctx, in) + return srv.(StorageAuthorityServer).UnpauseAccount(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/NewAuthorizations2", + FullMethod: StorageAuthority_UnpauseAccount_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).NewAuthorizations2(ctx, req.(*AddPendingAuthorizationsRequest)) + return srv.(StorageAuthorityServer).UnpauseAccount(ctx, req.(*RegistrationID)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_FinalizeAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FinalizeAuthorizationRequest) +func _StorageAuthority_AddRateLimitOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddRateLimitOverrideRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).FinalizeAuthorization2(ctx, in) + return srv.(StorageAuthorityServer).AddRateLimitOverride(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/FinalizeAuthorization2", + FullMethod: StorageAuthority_AddRateLimitOverride_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).FinalizeAuthorization2(ctx, req.(*FinalizeAuthorizationRequest)) + return srv.(StorageAuthorityServer).AddRateLimitOverride(ctx, req.(*AddRateLimitOverrideRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_DeactivateAuthorization2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthorizationID2) +func _StorageAuthority_DisableRateLimitOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DisableRateLimitOverrideRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).DeactivateAuthorization2(ctx, in) + return srv.(StorageAuthorityServer).DisableRateLimitOverride(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/DeactivateAuthorization2", + FullMethod: StorageAuthority_DisableRateLimitOverride_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).DeactivateAuthorization2(ctx, req.(*AuthorizationID2)) + return srv.(StorageAuthorityServer).DisableRateLimitOverride(ctx, req.(*DisableRateLimitOverrideRequest)) } return interceptor(ctx, in, info, handler) } -func _StorageAuthority_AddBlockedKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddBlockedKeyRequest) +func _StorageAuthority_EnableRateLimitOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EnableRateLimitOverrideRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageAuthorityServer).AddBlockedKey(ctx, in) + return srv.(StorageAuthorityServer).EnableRateLimitOverride(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/sa.StorageAuthority/AddBlockedKey", + FullMethod: StorageAuthority_EnableRateLimitOverride_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageAuthorityServer).AddBlockedKey(ctx, req.(*AddBlockedKeyRequest)) + return srv.(StorageAuthorityServer).EnableRateLimitOverride(ctx, req.(*EnableRateLimitOverrideRequest)) } return interceptor(ctx, in, info, handler) } @@ -1350,96 +2875,96 @@ var StorageAuthority_ServiceDesc = grpc.ServiceDesc{ HandlerType: (*StorageAuthorityServer)(nil), Methods: []grpc.MethodDesc{ { - MethodName: "GetRegistration", - Handler: _StorageAuthority_GetRegistration_Handler, + MethodName: "CountInvalidAuthorizations2", + Handler: _StorageAuthority_CountInvalidAuthorizations2_Handler, }, { - MethodName: "GetRegistrationByKey", - Handler: _StorageAuthority_GetRegistrationByKey_Handler, + MethodName: "CountPendingAuthorizations2", + Handler: _StorageAuthority_CountPendingAuthorizations2_Handler, }, { - MethodName: "GetSerialMetadata", - Handler: _StorageAuthority_GetSerialMetadata_Handler, + MethodName: "FQDNSetExists", + Handler: _StorageAuthority_FQDNSetExists_Handler, }, { - MethodName: "GetCertificate", - Handler: _StorageAuthority_GetCertificate_Handler, + MethodName: "FQDNSetTimestampsForWindow", + Handler: _StorageAuthority_FQDNSetTimestampsForWindow_Handler, }, { - MethodName: "GetPrecertificate", - Handler: _StorageAuthority_GetPrecertificate_Handler, + MethodName: "GetAuthorization2", + Handler: _StorageAuthority_GetAuthorization2_Handler, }, { - MethodName: "GetCertificateStatus", - Handler: _StorageAuthority_GetCertificateStatus_Handler, + MethodName: "GetCertificate", + Handler: _StorageAuthority_GetCertificate_Handler, }, { - MethodName: "CountCertificatesByNames", - Handler: _StorageAuthority_CountCertificatesByNames_Handler, + MethodName: "GetLintPrecertificate", + Handler: _StorageAuthority_GetLintPrecertificate_Handler, }, { - MethodName: "CountRegistrationsByIP", - Handler: _StorageAuthority_CountRegistrationsByIP_Handler, + MethodName: "GetCertificateStatus", + Handler: _StorageAuthority_GetCertificateStatus_Handler, }, { - MethodName: "CountRegistrationsByIPRange", - Handler: _StorageAuthority_CountRegistrationsByIPRange_Handler, + MethodName: "GetOrder", + Handler: _StorageAuthority_GetOrder_Handler, }, { - MethodName: "CountOrders", - Handler: _StorageAuthority_CountOrders_Handler, + MethodName: "GetOrderForNames", + Handler: _StorageAuthority_GetOrderForNames_Handler, }, { - MethodName: "CountFQDNSets", - Handler: _StorageAuthority_CountFQDNSets_Handler, + MethodName: "GetRegistration", + Handler: _StorageAuthority_GetRegistration_Handler, }, { - MethodName: "FQDNSetExists", - Handler: _StorageAuthority_FQDNSetExists_Handler, + MethodName: "GetRegistrationByKey", + Handler: _StorageAuthority_GetRegistrationByKey_Handler, }, { - MethodName: "PreviousCertificateExists", - Handler: _StorageAuthority_PreviousCertificateExists_Handler, + MethodName: "GetRevocationStatus", + Handler: _StorageAuthority_GetRevocationStatus_Handler, }, { - MethodName: "GetAuthorization2", - Handler: _StorageAuthority_GetAuthorization2_Handler, + MethodName: "GetSerialMetadata", + Handler: _StorageAuthority_GetSerialMetadata_Handler, }, { - MethodName: "GetAuthorizations2", - Handler: _StorageAuthority_GetAuthorizations2_Handler, + MethodName: "GetValidAuthorizations2", + Handler: _StorageAuthority_GetValidAuthorizations2_Handler, }, { - MethodName: "GetPendingAuthorization2", - Handler: _StorageAuthority_GetPendingAuthorization2_Handler, + MethodName: "GetValidOrderAuthorizations2", + Handler: _StorageAuthority_GetValidOrderAuthorizations2_Handler, }, { - MethodName: "CountPendingAuthorizations2", - Handler: _StorageAuthority_CountPendingAuthorizations2_Handler, + MethodName: "IncidentsForSerial", + Handler: _StorageAuthority_IncidentsForSerial_Handler, }, { - MethodName: "GetValidOrderAuthorizations2", - Handler: _StorageAuthority_GetValidOrderAuthorizations2_Handler, + MethodName: "KeyBlocked", + Handler: _StorageAuthority_KeyBlocked_Handler, }, { - MethodName: "CountInvalidAuthorizations2", - Handler: _StorageAuthority_CountInvalidAuthorizations2_Handler, + MethodName: "ReplacementOrderExists", + Handler: _StorageAuthority_ReplacementOrderExists_Handler, }, { - MethodName: "GetValidAuthorizations2", - Handler: _StorageAuthority_GetValidAuthorizations2_Handler, + MethodName: "CheckIdentifiersPaused", + Handler: _StorageAuthority_CheckIdentifiersPaused_Handler, }, { - MethodName: "KeyBlocked", - Handler: _StorageAuthority_KeyBlocked_Handler, + MethodName: "GetPausedIdentifiers", + Handler: _StorageAuthority_GetPausedIdentifiers_Handler, }, { - MethodName: "NewRegistration", - Handler: _StorageAuthority_NewRegistration_Handler, + MethodName: "GetRateLimitOverride", + Handler: _StorageAuthority_GetRateLimitOverride_Handler, }, { - MethodName: "UpdateRegistration", - Handler: _StorageAuthority_UpdateRegistration_Handler, + MethodName: "AddBlockedKey", + Handler: _StorageAuthority_AddBlockedKey_Handler, }, { MethodName: "AddCertificate", @@ -1453,63 +2978,105 @@ var StorageAuthority_ServiceDesc = grpc.ServiceDesc{ MethodName: "AddSerial", Handler: _StorageAuthority_AddSerial_Handler, }, + { + MethodName: "DeactivateAuthorization2", + Handler: _StorageAuthority_DeactivateAuthorization2_Handler, + }, { MethodName: "DeactivateRegistration", Handler: _StorageAuthority_DeactivateRegistration_Handler, }, { - MethodName: "NewOrder", - Handler: _StorageAuthority_NewOrder_Handler, + MethodName: "FinalizeAuthorization2", + Handler: _StorageAuthority_FinalizeAuthorization2_Handler, + }, + { + MethodName: "FinalizeOrder", + Handler: _StorageAuthority_FinalizeOrder_Handler, }, { MethodName: "NewOrderAndAuthzs", Handler: _StorageAuthority_NewOrderAndAuthzs_Handler, }, { - MethodName: "SetOrderProcessing", - Handler: _StorageAuthority_SetOrderProcessing_Handler, + MethodName: "NewRegistration", + Handler: _StorageAuthority_NewRegistration_Handler, + }, + { + MethodName: "RevokeCertificate", + Handler: _StorageAuthority_RevokeCertificate_Handler, }, { MethodName: "SetOrderError", Handler: _StorageAuthority_SetOrderError_Handler, }, { - MethodName: "FinalizeOrder", - Handler: _StorageAuthority_FinalizeOrder_Handler, + MethodName: "SetOrderProcessing", + Handler: _StorageAuthority_SetOrderProcessing_Handler, }, { - MethodName: "GetOrder", - Handler: _StorageAuthority_GetOrder_Handler, + MethodName: "UpdateRegistrationKey", + Handler: _StorageAuthority_UpdateRegistrationKey_Handler, }, { - MethodName: "GetOrderForNames", - Handler: _StorageAuthority_GetOrderForNames_Handler, + MethodName: "UpdateRevokedCertificate", + Handler: _StorageAuthority_UpdateRevokedCertificate_Handler, }, { - MethodName: "RevokeCertificate", - Handler: _StorageAuthority_RevokeCertificate_Handler, + MethodName: "LeaseCRLShard", + Handler: _StorageAuthority_LeaseCRLShard_Handler, }, { - MethodName: "UpdateRevokedCertificate", - Handler: _StorageAuthority_UpdateRevokedCertificate_Handler, + MethodName: "UpdateCRLShard", + Handler: _StorageAuthority_UpdateCRLShard_Handler, }, { - MethodName: "NewAuthorizations2", - Handler: _StorageAuthority_NewAuthorizations2_Handler, + MethodName: "PauseIdentifiers", + Handler: _StorageAuthority_PauseIdentifiers_Handler, }, { - MethodName: "FinalizeAuthorization2", - Handler: _StorageAuthority_FinalizeAuthorization2_Handler, + MethodName: "UnpauseAccount", + Handler: _StorageAuthority_UnpauseAccount_Handler, }, { - MethodName: "DeactivateAuthorization2", - Handler: _StorageAuthority_DeactivateAuthorization2_Handler, + MethodName: "AddRateLimitOverride", + Handler: _StorageAuthority_AddRateLimitOverride_Handler, }, { - MethodName: "AddBlockedKey", - Handler: _StorageAuthority_AddBlockedKey_Handler, + MethodName: "DisableRateLimitOverride", + Handler: _StorageAuthority_DisableRateLimitOverride_Handler, + }, + { + MethodName: "EnableRateLimitOverride", + Handler: _StorageAuthority_EnableRateLimitOverride_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetRevokedCertsByShard", + Handler: _StorageAuthority_GetRevokedCertsByShard_Handler, + ServerStreams: true, + }, + { + StreamName: "GetSerialsByAccount", + Handler: _StorageAuthority_GetSerialsByAccount_Handler, + ServerStreams: true, + }, + { + StreamName: "GetSerialsByKey", + Handler: _StorageAuthority_GetSerialsByKey_Handler, + ServerStreams: true, + }, + { + StreamName: "SerialsForIncident", + Handler: _StorageAuthority_SerialsForIncident_Handler, + ServerStreams: true, + }, + { + StreamName: "GetEnabledRateLimitOverrides", + Handler: _StorageAuthority_GetEnabledRateLimitOverrides_Handler, + ServerStreams: true, }, }, - Streams: []grpc.StreamDesc{}, Metadata: "sa.proto", } diff --git a/sa/proto/sadb.pb.go b/sa/proto/sadb.pb.go new file mode 100644 index 00000000000..184a01df3bd --- /dev/null +++ b/sa/proto/sadb.pb.go @@ -0,0 +1,127 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v3.20.1 +// source: sadb.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Used internally for storage in the DB, not for RPCs. +type Authzs struct { + state protoimpl.MessageState `protogen:"open.v1"` + AuthzIDs []int64 `protobuf:"varint,1,rep,packed,name=authzIDs,proto3" json:"authzIDs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Authzs) Reset() { + *x = Authzs{} + mi := &file_sadb_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Authzs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Authzs) ProtoMessage() {} + +func (x *Authzs) ProtoReflect() protoreflect.Message { + mi := &file_sadb_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Authzs.ProtoReflect.Descriptor instead. +func (*Authzs) Descriptor() ([]byte, []int) { + return file_sadb_proto_rawDescGZIP(), []int{0} +} + +func (x *Authzs) GetAuthzIDs() []int64 { + if x != nil { + return x.AuthzIDs + } + return nil +} + +var File_sadb_proto protoreflect.FileDescriptor + +var file_sadb_proto_rawDesc = string([]byte{ + 0x0a, 0x0a, 0x73, 0x61, 0x64, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x73, 0x61, + 0x22, 0x24, 0x0a, 0x06, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, + 0x74, 0x68, 0x7a, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x08, 0x61, 0x75, + 0x74, 0x68, 0x7a, 0x49, 0x44, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x73, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_sadb_proto_rawDescOnce sync.Once + file_sadb_proto_rawDescData []byte +) + +func file_sadb_proto_rawDescGZIP() []byte { + file_sadb_proto_rawDescOnce.Do(func() { + file_sadb_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sadb_proto_rawDesc), len(file_sadb_proto_rawDesc))) + }) + return file_sadb_proto_rawDescData +} + +var file_sadb_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_sadb_proto_goTypes = []any{ + (*Authzs)(nil), // 0: sa.Authzs +} +var file_sadb_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_sadb_proto_init() } +func file_sadb_proto_init() { + if File_sadb_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_sadb_proto_rawDesc), len(file_sadb_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sadb_proto_goTypes, + DependencyIndexes: file_sadb_proto_depIdxs, + MessageInfos: file_sadb_proto_msgTypes, + }.Build() + File_sadb_proto = out.File + file_sadb_proto_goTypes = nil + file_sadb_proto_depIdxs = nil +} diff --git a/sa/proto/sadb.proto b/sa/proto/sadb.proto new file mode 100644 index 00000000000..353993b14ee --- /dev/null +++ b/sa/proto/sadb.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package sa; +option go_package = "github.com/letsencrypt/boulder/sa/proto"; + +// Used internally for storage in the DB, not for RPCs. +message Authzs { + repeated int64 authzIDs = 1; +} diff --git a/sa/proto/subsets.go b/sa/proto/subsets.go index fcf52279dae..f24c069b518 100644 --- a/sa/proto/subsets.go +++ b/sa/proto/subsets.go @@ -5,42 +5,17 @@ package proto import ( context "context" - proto "github.com/letsencrypt/boulder/core/proto" grpc "google.golang.org/grpc" emptypb "google.golang.org/protobuf/types/known/emptypb" -) -// StorageAuthorityGetterClient is a read-only subset of the sapb.StorageAuthorityClient interface -type StorageAuthorityGetterClient interface { - GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error) - GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error) - GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) - GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) - GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error) - CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error) - CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) - CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error) - CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error) - CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) - FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) - PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) - GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) - GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error) - CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error) - GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) - GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) - KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) - GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error) - GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error) -} + proto "github.com/letsencrypt/boulder/core/proto" +) // StorageAuthorityCertificateClient is a subset of the sapb.StorageAuthorityClient interface that only reads and writes certificates type StorageAuthorityCertificateClient interface { AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) - AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error) + AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) + GetLintPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error) } diff --git a/sa/rate_limits.go b/sa/rate_limits.go deleted file mode 100644 index cc2f2f3e4d2..00000000000 --- a/sa/rate_limits.go +++ /dev/null @@ -1,135 +0,0 @@ -package sa - -import ( - "context" - "strings" - "time" - - "github.com/letsencrypt/boulder/db" - sapb "github.com/letsencrypt/boulder/sa/proto" - "github.com/weppos/publicsuffix-go/publicsuffix" -) - -// baseDomain returns the eTLD+1 of a domain name for the purpose of rate -// limiting. For a domain name that is itself an eTLD, it returns its input. -func baseDomain(name string) string { - eTLDPlusOne, err := publicsuffix.Domain(name) - if err != nil { - // publicsuffix.Domain will return an error if the input name is itself a - // public suffix. In that case we use the input name as the key for rate - // limiting. Since all of its subdomains will have separate keys for rate - // limiting (e.g. "foo.bar.publicsuffix.com" will have - // "bar.publicsuffix.com", this means that domains exactly equal to a - // public suffix get their own rate limit bucket. This is important - // because otherwise they might be perpetually unable to issue, assuming - // the rate of issuance from their subdomains was high enough. - return name - } - return eTLDPlusOne -} - -// addCertificatesPerName adds 1 to the rate limit count for the provided domains, -// in a specific time bucket. It must be executed in a transaction, and the -// input timeToTheHour must be a time rounded to an hour. -func (ssa *SQLStorageAuthority) addCertificatesPerName( - ctx context.Context, - db db.SelectExecer, - names []string, - timeToTheHour time.Time, -) error { - // De-duplicate the base domains. - baseDomainsMap := make(map[string]bool) - var qmarks []string - var values []interface{} - for _, name := range names { - base := baseDomain(name) - if !baseDomainsMap[base] { - baseDomainsMap[base] = true - values = append(values, base, timeToTheHour, 1) - qmarks = append(qmarks, "(?, ?, ?)") - } - } - - _, err := db.Exec(`INSERT INTO certificatesPerName (eTLDPlusOne, time, count) VALUES `+ - strings.Join(qmarks, ", ")+` ON DUPLICATE KEY UPDATE count=count+1;`, - values...) - if err != nil { - return err - } - - return nil -} - -// countCertificates returns the count of certificates issued for a domain's -// eTLD+1 (aka base domain), during a given time range. -func (ssa *SQLStorageAuthority) countCertificates(dbMap db.Selector, domain string, timeRange *sapb.Range) (int64, error) { - var counts []int64 - _, err := dbMap.Select( - &counts, - `SELECT count FROM certificatesPerName - WHERE eTLDPlusOne = :baseDomain AND - time > :earliest AND - time <= :latest`, - map[string]interface{}{ - "baseDomain": baseDomain(domain), - "earliest": time.Unix(0, timeRange.Earliest), - "latest": time.Unix(0, timeRange.Latest), - }) - if err != nil { - if db.IsNoRows(err) { - return 0, nil - } - return 0, err - } - var total int64 - for _, count := range counts { - total += count - } - return total, nil -} - -// addNewOrdersRateLimit adds 1 to the rate limit count for the provided ID, -// in a specific time bucket. It must be executed in a transaction, and the -// input timeToTheMinute must be a time rounded to a minute. -func addNewOrdersRateLimit(ctx context.Context, dbMap db.SelectExecer, regID int64, timeToTheMinute time.Time) error { - _, err := dbMap.Exec(`INSERT INTO newOrdersRL - (regID, time, count) - VALUES (?, ?, 1) - ON DUPLICATE KEY UPDATE count=count+1;`, - regID, - timeToTheMinute, - ) - if err != nil { - return err - } - return nil -} - -// countNewOrders returns the count of orders created in the given time range -// for the given registration ID -func countNewOrders(ctx context.Context, dbMap db.Selector, req *sapb.CountOrdersRequest) (*sapb.Count, error) { - var counts []int64 - _, err := dbMap.Select( - &counts, - `SELECT count FROM newOrdersRL - WHERE regID = :regID AND - time > :earliest AND - time <= :latest`, - map[string]interface{}{ - "regID": req.AccountID, - "earliest": time.Unix(0, req.Range.Earliest), - "latest": time.Unix(0, req.Range.Latest), - }, - ) - if err != nil { - if db.IsNoRows(err) { - return &sapb.Count{Count: 0}, nil - } - return nil, err - } - var total int64 - for _, count := range counts { - total += count - } - return &sapb.Count{Count: total}, nil -} diff --git a/sa/rate_limits_test.go b/sa/rate_limits_test.go deleted file mode 100644 index 8fbe003e619..00000000000 --- a/sa/rate_limits_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package sa - -import ( - "context" - "fmt" - "testing" - "time" - - sapb "github.com/letsencrypt/boulder/sa/proto" - "github.com/letsencrypt/boulder/test" -) - -func TestCertsPerNameRateLimitTable(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() - - aprilFirst, err := time.Parse(time.RFC3339, "2019-04-01T00:00:00Z") - if err != nil { - t.Fatal(err) - } - - type inputCase struct { - time time.Time - names []string - } - inputs := []inputCase{ - {aprilFirst, []string{"example.com"}}, - {aprilFirst, []string{"example.com", "www.example.com"}}, - {aprilFirst, []string{"example.com", "other.example.com"}}, - {aprilFirst, []string{"dyndns.org"}}, - {aprilFirst, []string{"mydomain.dyndns.org"}}, - {aprilFirst, []string{"mydomain.dyndns.org"}}, - {aprilFirst, []string{"otherdomain.dyndns.org"}}, - } - - // For each hour in a week, add an enry for a certificate that has - // progressively more names. - var manyNames []string - for i := 0; i < 7*24; i++ { - manyNames = append(manyNames, fmt.Sprintf("%d.manynames.example.net", i)) - inputs = append(inputs, inputCase{aprilFirst.Add(time.Duration(i) * time.Hour), manyNames}) - } - - for _, input := range inputs { - tx, err := sa.dbMap.Begin() - if err != nil { - t.Fatal(err) - } - err = sa.addCertificatesPerName(context.Background(), tx, input.names, input.time) - if err != nil { - t.Fatal(err) - } - err = tx.Commit() - if err != nil { - t.Fatal(err) - } - } - - const aWeek = time.Duration(7*24) * time.Hour - - testCases := []struct { - caseName string - domainName string - expected int64 - }{ - {"name doesn't exist", "non.example.org", 0}, - {"base name gets dinged for all certs including it", "example.com", 3}, - {"subdomain gets dinged for neighbors", "www.example.com", 3}, - {"other subdomain", "other.example.com", 3}, - {"many subdomains", "1.manynames.example.net", 168}, - {"public suffix gets its own bucket", "dyndns.org", 1}, - {"subdomain of public suffix gets its own bucket", "mydomain.dyndns.org", 2}, - {"subdomain of public suffix gets its own bucket 2", "otherdomain.dyndns.org", 1}, - } - - for _, tc := range testCases { - t.Run(tc.caseName, func(t *testing.T) { - timeRange := &sapb.Range{ - Earliest: aprilFirst.Add(-1 * time.Second).UnixNano(), - Latest: aprilFirst.Add(aWeek).UnixNano(), - } - count, err := sa.countCertificatesByName(sa.dbMap, tc.domainName, timeRange) - if err != nil { - t.Fatal(err) - } - if count != tc.expected { - t.Errorf("Expected count of %d for %q, got %d", tc.expected, tc.domainName, count) - } - }) - } -} - -func TestNewOrdersRateLimitTable(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() - - manyCountRegID := int64(2) - start := time.Now().Truncate(time.Minute) - req := &sapb.CountOrdersRequest{ - AccountID: 1, - Range: &sapb.Range{ - Earliest: start.UnixNano(), - Latest: start.Add(time.Minute * 10).UnixNano(), - }, - } - - for i := 0; i <= 10; i++ { - tx, err := sa.dbMap.Begin() - test.AssertNotError(t, err, "failed to open tx") - for j := 0; j < i+1; j++ { - err = addNewOrdersRateLimit(context.Background(), tx, manyCountRegID, start.Add(time.Minute*time.Duration(i))) - } - test.AssertNotError(t, err, "addNewOrdersRateLimit failed") - test.AssertNotError(t, tx.Commit(), "failed to commit tx") - } - - count, err := countNewOrders(context.Background(), sa.dbMap, req) - test.AssertNotError(t, err, "countNewOrders failed") - test.AssertEquals(t, count.Count, int64(0)) - - req.AccountID = manyCountRegID - count, err = countNewOrders(context.Background(), sa.dbMap, req) - test.AssertNotError(t, err, "countNewOrders failed") - test.AssertEquals(t, count.Count, int64(65)) - - req.Range.Earliest = start.Add(time.Minute * 5).UnixNano() - req.Range.Latest = start.Add(time.Minute * 10).UnixNano() - count, err = countNewOrders(context.Background(), sa.dbMap, req) - test.AssertNotError(t, err, "countNewOrders failed") - test.AssertEquals(t, count.Count, int64(45)) -} diff --git a/sa/rocsp_sa.go b/sa/rocsp_sa.go deleted file mode 100644 index d03af3ed38a..00000000000 --- a/sa/rocsp_sa.go +++ /dev/null @@ -1,45 +0,0 @@ -package sa - -import ( - "context" - "time" - - rocsp_config "github.com/letsencrypt/boulder/rocsp/config" - "golang.org/x/crypto/ocsp" -) - -type rocspWriter interface { - StoreResponse(ctx context.Context, respBytes []byte, shortIssuerID byte, ttl time.Duration) error -} - -// storeOCSPRedis stores an OCSP response in a redis cluster. -func (ssa *SQLStorageAuthority) storeOCSPRedis(ctx context.Context, resp []byte, issuerID int64) error { - nextUpdate, err := getNextUpdate(resp) - if err != nil { - ssa.redisStoreResponse.WithLabelValues("parse_response_error").Inc() - return err - } - ttl := time.Until(nextUpdate) - - shortIssuerID, err := rocsp_config.FindIssuerByID(issuerID, ssa.shortIssuers) - if err != nil { - ssa.redisStoreResponse.WithLabelValues("find_issuer_error").Inc() - return err - } - err = ssa.rocspWriteClient.StoreResponse(ctx, resp, shortIssuerID.ShortID(), ttl) - if err != nil { - ssa.redisStoreResponse.WithLabelValues("store_response_error").Inc() - return err - } - ssa.redisStoreResponse.WithLabelValues("success").Inc() - return nil -} - -// getNextUpdate returns the NextUpdate value from the OCSP response. -func getNextUpdate(resp []byte) (time.Time, error) { - response, err := ocsp.ParseResponse(resp, nil) - if err != nil { - return time.Time{}, err - } - return response.NextUpdate, nil -} diff --git a/sa/rocsp_sa_test.go b/sa/rocsp_sa_test.go deleted file mode 100644 index 2c70a5875f1..00000000000 --- a/sa/rocsp_sa_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package sa - -import ( - "context" - "io/ioutil" - "testing" - - "github.com/letsencrypt/boulder/rocsp" - "github.com/letsencrypt/boulder/test" -) - -func getOCSPResponse() ([]byte, error) { - return ioutil.ReadFile("testdata/ocsp.response") -} - -func TestStoreOCSPRedis(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() - response, err := getOCSPResponse() - test.AssertNotError(t, err, "unexpected error") - ctx := context.Background() - err = sa.storeOCSPRedis(ctx, response, 58923463773186183) - test.AssertNotError(t, err, "unexpected error") -} - -func TestStoreOCSPRedisInvalidIssuer(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() - response, err := getOCSPResponse() - test.AssertNotError(t, err, "unexpected error") - ctx := context.Background() - // 1234 is expected to not be a valid issuerID - err = sa.storeOCSPRedis(ctx, response, 1234) - test.AssertContains(t, err.Error(), "no issuer found for an ID in certificateStatus: 1234") -} - -func TestStoreOCSPRedisFail(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() - sa.rocspWriteClient = rocsp.NewMockWriteFailClient() - response, err := getOCSPResponse() - test.AssertNotError(t, err, "unexpected error") - ctx := context.Background() - err = sa.storeOCSPRedis(ctx, response, 58923463773186183) - test.AssertContains(t, err.Error(), "could not store response") -} diff --git a/sa/sa.go b/sa/sa.go index 9c41f4e613c..74cba23ed46 100644 --- a/sa/sa.go +++ b/sa/sa.go @@ -2,63 +2,50 @@ package sa import ( "context" - "crypto/sha256" "crypto/x509" + "database/sql" "encoding/json" "errors" "fmt" - "math/big" - "net" "strings" - "sync" "time" + "google.golang.org/protobuf/proto" + + "github.com/go-jose/go-jose/v4" "github.com/jmhodges/clock" "github.com/prometheus/client_golang/prometheus" - "golang.org/x/crypto/ocsp" + "github.com/prometheus/client_golang/prometheus/promauto" "google.golang.org/protobuf/types/known/emptypb" - jose "gopkg.in/square/go-jose.v2" + "google.golang.org/protobuf/types/known/timestamppb" "github.com/letsencrypt/boulder/core" corepb "github.com/letsencrypt/boulder/core/proto" "github.com/letsencrypt/boulder/db" berrors "github.com/letsencrypt/boulder/errors" - "github.com/letsencrypt/boulder/features" bgrpc "github.com/letsencrypt/boulder/grpc" "github.com/letsencrypt/boulder/identifier" blog "github.com/letsencrypt/boulder/log" "github.com/letsencrypt/boulder/revocation" - rocsp_config "github.com/letsencrypt/boulder/rocsp/config" sapb "github.com/letsencrypt/boulder/sa/proto" + "github.com/letsencrypt/boulder/unpause" ) -var errIncompleteRequest = errors.New("incomplete gRPC request message") - -type certCountFunc func(db db.Selector, domain string, timeRange *sapb.Range) (int64, error) +var ( + errIncompleteRequest = errors.New("incomplete gRPC request message") +) -// SQLStorageAuthority defines a Storage Authority +// SQLStorageAuthority defines a Storage Authority. +// +// Note that although SQLStorageAuthority does have methods wrapping all of the +// read-only methods provided by the SQLStorageAuthorityRO, those wrapper +// implementations are in saro.go, next to the real implementations. type SQLStorageAuthority struct { - sapb.UnimplementedStorageAuthorityServer - dbMap *db.WrappedMap - dbReadOnlyMap *db.WrappedMap - - // Redis client for storing OCSP responses in Redis. - rocspWriteClient rocspWriter + sapb.UnsafeStorageAuthorityServer - // Short issuer map used by rocsp. - shortIssuers []rocsp_config.ShortIDIssuer + *SQLStorageAuthorityRO - clk clock.Clock - log blog.Logger - - // For RPCs that generate multiple, parallelizable SQL queries, this is the - // max parallelism they will use (to avoid consuming too many MariaDB - // threads). - parallelismPerRPC int - - // We use function types here so we can mock out this internal function in - // unittests. - countCertificatesByName certCountFunc + dbMap *db.WrappedMap // rateLimitWriteErrors is a Counter for the number of times // a ratelimit update transaction failed during AddCertificate request @@ -66,397 +53,256 @@ type SQLStorageAuthority struct { // transactions fail and so use this stat to maintain visibility into the rate // this occurs. rateLimitWriteErrors prometheus.Counter - - // redisStoreResponse is a counter of OCSP responses written to redis by - // result. - redisStoreResponse *prometheus.CounterVec } -// orderFQDNSet contains the SHA256 hash of the lowercased, comma joined names -// from a new-order request, along with the corresponding orderID, the -// registration ID, and the order expiry. This is used to find -// existing orders for reuse. -type orderFQDNSet struct { - ID int64 - SetHash []byte - OrderID int64 - RegistrationID int64 - Expires time.Time -} +var _ sapb.StorageAuthorityServer = (*SQLStorageAuthority)(nil) -// NewSQLStorageAuthority provides persistence using a SQL backend for -// Boulder. It will modify the given gorp.DbMap by adding relevant tables. -func NewSQLStorageAuthority( +// NewSQLStorageAuthorityWrapping provides persistence using a SQL backend for +// Boulder. It takes a read-only storage authority to wrap, which is useful if +// you are constructing both types of implementations and want to share +// read-only database connections between them. +func NewSQLStorageAuthorityWrapping( + ssaro *SQLStorageAuthorityRO, dbMap *db.WrappedMap, - dbReadOnlyMap *db.WrappedMap, - rocspWriteClient rocspWriter, - shortIssuers []rocsp_config.ShortIDIssuer, - clk clock.Clock, - logger blog.Logger, stats prometheus.Registerer, - parallelismPerRPC int, ) (*SQLStorageAuthority, error) { - SetSQLDebug(dbMap, logger) - - rateLimitWriteErrors := prometheus.NewCounter(prometheus.CounterOpts{ + rateLimitWriteErrors := promauto.With(stats).NewCounter(prometheus.CounterOpts{ Name: "rate_limit_write_errors", Help: "number of failed ratelimit update transactions during AddCertificate", }) - stats.MustRegister(rateLimitWriteErrors) - - redisStoreResponse := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "redis_store_response", - Help: "Count of OCSP Response writes to redis", - }, []string{"result"}) - stats.MustRegister(redisStoreResponse) ssa := &SQLStorageAuthority{ - dbMap: dbMap, - dbReadOnlyMap: dbReadOnlyMap, - rocspWriteClient: rocspWriteClient, - shortIssuers: shortIssuers, - clk: clk, - log: logger, - parallelismPerRPC: parallelismPerRPC, - rateLimitWriteErrors: rateLimitWriteErrors, - redisStoreResponse: redisStoreResponse, + SQLStorageAuthorityRO: ssaro, + dbMap: dbMap, + rateLimitWriteErrors: rateLimitWriteErrors, } - ssa.countCertificatesByName = ssa.countCertificates - return ssa, nil } -// GetRegistration obtains a Registration by ID -func (ssa *SQLStorageAuthority) GetRegistration(ctx context.Context, req *sapb.RegistrationID) (*corepb.Registration, error) { - if req == nil || req.Id == 0 { - return nil, errIncompleteRequest - } - - const query = "WHERE id = ?" - model, err := selectRegistration(ssa.dbMap.WithContext(ctx), query, req.Id) +// NewSQLStorageAuthority provides persistence using a SQL backend for +// Boulder. It constructs its own read-only storage authority to wrap. +func NewSQLStorageAuthority( + dbMap *db.WrappedMap, + dbReadOnlyMap *db.WrappedMap, + dbIncidentsMap *db.WrappedMap, + parallelismPerRPC int, + lagFactor time.Duration, + clk clock.Clock, + logger blog.Logger, + stats prometheus.Registerer, +) (*SQLStorageAuthority, error) { + ssaro, err := NewSQLStorageAuthorityRO( + dbReadOnlyMap, dbIncidentsMap, stats, parallelismPerRPC, lagFactor, clk, logger) if err != nil { - if db.IsNoRows(err) { - return nil, berrors.NotFoundError("registration with ID '%d' not found", req.Id) - } return nil, err } - return registrationModelToPb(model) + return NewSQLStorageAuthorityWrapping(ssaro, dbMap, stats) } -// GetRegistrationByKey obtains a Registration by JWK -func (ssa *SQLStorageAuthority) GetRegistrationByKey(ctx context.Context, req *sapb.JSONWebKey) (*corepb.Registration, error) { - if req == nil || len(req.Jwk) == 0 { +// NewRegistration stores a new Registration +func (ssa *SQLStorageAuthority) NewRegistration(ctx context.Context, req *corepb.Registration) (*corepb.Registration, error) { + if len(req.Key) == 0 { return nil, errIncompleteRequest } - var jwk jose.JSONWebKey - err := jwk.UnmarshalJSON(req.Jwk) + reg, err := registrationPbToModel(req) if err != nil { return nil, err } - const query = "WHERE jwk_sha256 = ?" - sha, err := core.KeyDigestB64(jwk.Key) - if err != nil { - return nil, err - } - model, err := selectRegistration(ssa.dbMap.WithContext(ctx), query, sha) + reg.CreatedAt = ssa.clk.Now() + + err = ssa.dbMap.Insert(ctx, reg) if err != nil { - if db.IsNoRows(err) { - return nil, berrors.NotFoundError("no registrations with public key sha256 %q", sha) + if db.IsDuplicate(err) { + // duplicate entry error can only happen when jwk_sha256 collides, indicate + // to caller that the provided key is already in use + return nil, berrors.DuplicateError("key is already in use for a different account") } return nil, err } - - return registrationModelToPb(model) -} - -// incrementIP returns a copy of `ip` incremented at a bit index `index`, -// or in other words the first IP of the next highest subnet given a mask of -// length `index`. -// In order to easily account for overflow, we treat ip as a big.Int and add to -// it. If the increment overflows the max size of a net.IP, return the highest -// possible net.IP. -func incrementIP(ip net.IP, index int) net.IP { - bigInt := new(big.Int) - bigInt.SetBytes([]byte(ip)) - incr := new(big.Int).Lsh(big.NewInt(1), 128-uint(index)) - bigInt.Add(bigInt, incr) - // bigInt.Bytes can be shorter than 16 bytes, so stick it into a - // full-sized net.IP. - resultBytes := bigInt.Bytes() - if len(resultBytes) > 16 { - return net.ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") - } - result := make(net.IP, 16) - copy(result[16-len(resultBytes):], resultBytes) - return result -} - -// ipRange returns a range of IP addresses suitable for querying MySQL for the -// purpose of rate limiting using a range that is inclusive on the lower end and -// exclusive at the higher end. If ip is an IPv4 address, it returns that address, -// plus the one immediately higher than it. If ip is an IPv6 address, it applies -// a /48 mask to it and returns the lowest IP in the resulting network, and the -// first IP outside of the resulting network. -func ipRange(ip net.IP) (net.IP, net.IP) { - ip = ip.To16() - // For IPv6, match on a certain subnet range, since one person can commonly - // have an entire /48 to themselves. - maskLength := 48 - // For IPv4 addresses, do a match on exact address, so begin = ip and end = - // next higher IP. - if ip.To4() != nil { - maskLength = 128 - } - - mask := net.CIDRMask(maskLength, 128) - begin := ip.Mask(mask) - end := incrementIP(begin, maskLength) - - return begin, end + return registrationModelToPb(reg) } -// CountRegistrationsByIP returns the number of registrations created in the -// time range for a single IP address. -func (ssa *SQLStorageAuthority) CountRegistrationsByIP(ctx context.Context, req *sapb.CountRegistrationsByIPRequest) (*sapb.Count, error) { - if len(req.Ip) == 0 || req.Range.Earliest == 0 || req.Range.Latest == 0 { +// UpdateRegistrationKey stores an updated key in a Registration. +func (ssa *SQLStorageAuthority) UpdateRegistrationKey(ctx context.Context, req *sapb.UpdateRegistrationKeyRequest) (*corepb.Registration, error) { + if core.IsAnyNilOrZero(req.RegistrationID, req.Jwk) { return nil, errIncompleteRequest } - var count int64 - err := ssa.dbReadOnlyMap.WithContext(ctx).SelectOne( - &count, - `SELECT COUNT(1) FROM registrations - WHERE - initialIP = :ip AND - :earliest < createdAt AND - createdAt <= :latest`, - map[string]interface{}{ - "ip": req.Ip, - "earliest": time.Unix(0, req.Range.Earliest), - "latest": time.Unix(0, req.Range.Latest), - }) + // Even though we don't need to convert from JSON to an in-memory JSONWebKey + // for the sake of the `Key` field, we do need to do the conversion in order + // to compute the SHA256 key digest. + var jwk jose.JSONWebKey + err := jwk.UnmarshalJSON(req.Jwk) if err != nil { - return &sapb.Count{Count: -1}, err - } - return &sapb.Count{Count: count}, nil -} - -// CountRegistrationsByIPRange returns the number of registrations created in -// the time range in an IP range. For IPv4 addresses, that range is limited to -// the single IP. For IPv6 addresses, that range is a /48, since it's not -// uncommon for one person to have a /48 to themselves. -func (ssa *SQLStorageAuthority) CountRegistrationsByIPRange(ctx context.Context, req *sapb.CountRegistrationsByIPRequest) (*sapb.Count, error) { - if len(req.Ip) == 0 || req.Range.Earliest == 0 || req.Range.Latest == 0 { - return nil, errIncompleteRequest + return nil, fmt.Errorf("parsing JWK: %w", err) } - - var count int64 - beginIP, endIP := ipRange(req.Ip) - err := ssa.dbReadOnlyMap.WithContext(ctx).SelectOne( - &count, - `SELECT COUNT(1) FROM registrations - WHERE - :beginIP <= initialIP AND - initialIP < :endIP AND - :earliest < createdAt AND - createdAt <= :latest`, - map[string]interface{}{ - "earliest": time.Unix(0, req.Range.Earliest), - "latest": time.Unix(0, req.Range.Latest), - "beginIP": beginIP, - "endIP": endIP, - }) + sha, err := core.KeyDigestB64(jwk.Key) if err != nil { - return &sapb.Count{Count: -1}, err + return nil, fmt.Errorf("computing key digest: %w", err) } - return &sapb.Count{Count: count}, nil -} -// CountCertificatesByNames counts, for each input domain, the number of -// certificates issued in the given time range for that domain and its -// subdomains. It returns a map from domains to counts, which is guaranteed to -// contain an entry for each input domain, so long as err is nil. -// Queries will be run in parallel. If any of them error, only one error will -// be returned. -func (ssa *SQLStorageAuthority) CountCertificatesByNames(ctx context.Context, req *sapb.CountCertificatesByNamesRequest) (*sapb.CountByNames, error) { - if len(req.Names) == 0 || req.Range.Earliest == 0 || req.Range.Latest == 0 { - return nil, errIncompleteRequest - } + result, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + result, err := tx.ExecContext(ctx, + "UPDATE registrations SET jwk = ?, jwk_sha256 = ? WHERE id = ? LIMIT 1", + req.Jwk, + sha, + req.RegistrationID, + ) + if err != nil { + if db.IsDuplicate(err) { + // duplicate entry error can only happen when jwk_sha256 collides, indicate + // to caller that the provided key is already in use + return nil, berrors.DuplicateError("key is already in use for a different account") + } + return nil, err + } + rowsAffected, err := result.RowsAffected() + if err != nil || rowsAffected != 1 { + return nil, berrors.InternalServerError("no registration ID '%d' updated with new jwk", req.RegistrationID) + } - work := make(chan string, len(req.Names)) - type result struct { - err error - count int64 - domain string - } - results := make(chan result, len(req.Names)) - for _, domain := range req.Names { - work <- domain - } - close(work) - var wg sync.WaitGroup - ctx, cancel := context.WithCancel(ctx) - defer cancel() - // We may perform up to 100 queries, depending on what's in the certificate - // request. Parallelize them so we don't hit our timeout, but limit the - // parallelism so we don't consume too many threads on the database. - for i := 0; i < ssa.parallelismPerRPC; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for domain := range work { - select { - case <-ctx.Done(): - results <- result{err: ctx.Err()} - return - default: - } - currentCount, err := ssa.countCertificatesByName(ssa.dbReadOnlyMap.WithContext(ctx), domain, req.Range) - if err != nil { - results <- result{err: err} - // Skip any further work - cancel() - return - } - results <- result{ - count: currentCount, - domain: domain, - } + updatedRegistrationModel, err := selectRegistration(ctx, tx, "id", req.RegistrationID) + if err != nil { + if db.IsNoRows(err) { + return nil, berrors.NotFoundError("registration with ID '%d' not found", req.RegistrationID) } - }() - } - wg.Wait() - close(results) - counts := make(map[string]int64) - for r := range results { - if r.err != nil { - return nil, r.err + return nil, err + } + updatedRegistration, err := registrationModelToPb(updatedRegistrationModel) + if err != nil { + return nil, err } - counts[r.domain] = r.count - } - return &sapb.CountByNames{Counts: counts}, nil -} -func ReverseName(domain string) string { - labels := strings.Split(domain, ".") - for i, j := 0, len(labels)-1; i < j; i, j = i+1, j-1 { - labels[i], labels[j] = labels[j], labels[i] + return updatedRegistration, nil + }) + if overallError != nil { + return nil, overallError } - return strings.Join(labels, ".") + + return result.(*corepb.Registration), nil } -// GetCertificate takes a serial number and returns the corresponding -// certificate, or error if it does not exist. -func (ssa *SQLStorageAuthority) GetCertificate(ctx context.Context, req *sapb.Serial) (*corepb.Certificate, error) { - if req == nil || req.Serial == "" { +// AddSerial writes a record of a serial number generation to the DB. +func (ssa *SQLStorageAuthority) AddSerial(ctx context.Context, req *sapb.AddSerialRequest) (*emptypb.Empty, error) { + if core.IsAnyNilOrZero(req.Serial, req.RegID, req.Created, req.Expires) { return nil, errIncompleteRequest } - if !core.ValidSerial(req.Serial) { - return nil, fmt.Errorf("Invalid certificate serial %s", req.Serial) - } - - cert, err := SelectCertificate(ssa.dbMap.WithContext(ctx), req.Serial) - if db.IsNoRows(err) { - return nil, berrors.NotFoundError("certificate with serial %q not found", req.Serial) - } + err := ssa.dbMap.Insert(ctx, &recordedSerialModel{ + Serial: req.Serial, + RegistrationID: req.RegID, + Created: req.Created.AsTime(), + Expires: req.Expires.AsTime(), + }) if err != nil { return nil, err } - return bgrpc.CertToPB(cert), nil + return &emptypb.Empty{}, nil } -// GetCertificateStatus takes a hexadecimal string representing the full 128-bit serial -// number of a certificate and returns data about that certificate's current -// validity. -func (ssa *SQLStorageAuthority) GetCertificateStatus(ctx context.Context, req *sapb.Serial) (*corepb.CertificateStatus, error) { - if req.Serial == "" { +// AddPrecertificate writes a record of a linting certificate to the database. +// +// Note: The name "AddPrecertificate" is a historical artifact, and this is now +// always called with a linting certificate. See #6807. +// +// Note: this is not idempotent: it does not protect against inserting the same +// certificate multiple times. Calling code needs to first insert the cert's +// serial into the Serials table to ensure uniqueness. +func (ssa *SQLStorageAuthority) AddPrecertificate(ctx context.Context, req *sapb.AddCertificateRequest) (*emptypb.Empty, error) { + if core.IsAnyNilOrZero(req.Der, req.RegID, req.IssuerNameID, req.Issued) { return nil, errIncompleteRequest } - if !core.ValidSerial(req.Serial) { - err := fmt.Errorf("Invalid certificate serial %s", req.Serial) - return nil, err - } - - certStatus, err := SelectCertificateStatus(ssa.dbMap.WithContext(ctx), req.Serial) + parsed, err := x509.ParseCertificate(req.Der) if err != nil { return nil, err } + serialHex := core.SerialToString(parsed.SerialNumber) - return bgrpc.CertStatusToPB(certStatus), nil -} - -// NewRegistration stores a new Registration -func (ssa *SQLStorageAuthority) NewRegistration(ctx context.Context, req *corepb.Registration) (*corepb.Registration, error) { - if len(req.Key) == 0 || len(req.InitialIP) == 0 { - return nil, errIncompleteRequest + preCertModel := &lintingCertModel{ + Serial: serialHex, + RegistrationID: req.RegID, + DER: req.Der, + Issued: req.Issued.AsTime(), + Expires: parsed.NotAfter, } - reg, err := registrationPbToModel(req) - if err != nil { - return nil, err - } + _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + // Select to see if precert exists + var row struct { + Count int64 + } + err := tx.SelectOne(ctx, &row, "SELECT COUNT(*) as count FROM precertificates WHERE serial=?", serialHex) + if err != nil { + return nil, err + } + if row.Count > 0 { + return nil, berrors.DuplicateError("cannot add a duplicate cert") + } - reg.CreatedAt = ssa.clk.Now() + err = tx.Insert(ctx, preCertModel) + if err != nil { + return nil, err + } - err = ssa.dbMap.WithContext(ctx).Insert(reg) - if err != nil { - if db.IsDuplicate(err) { - // duplicate entry error can only happen when jwk_sha256 collides, indicate - // to caller that the provided key is already in use - return nil, berrors.DuplicateError("key is already in use for a different account") + // An arbitrary, but valid date for fields revokedDate and lastExpirationNagSent. + // These fields in the database are NOT NULL so we can't omit them; and we don't + // want to pass `time.Time{}` because that results in inserts of `0000-00-00`, which + // is forbidden in strict mode (when NO_ZERO_DATE is on). + dummyDate := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + + status := core.OCSPStatusGood + cs := &certificateStatusModel{ + Serial: serialHex, + Status: status, + OCSPLastUpdated: ssa.clk.Now(), + RevokedDate: dummyDate, + RevokedReason: 0, + LastExpirationNagSent: dummyDate, + NotAfter: parsed.NotAfter, + IsExpired: false, + IssuerID: req.IssuerNameID, + } + err = ssa.dbMap.Insert(ctx, cs) + if err != nil { + return nil, err } - return nil, err - } - return registrationModelToPb(reg) -} -// UpdateRegistration stores an updated Registration -func (ssa *SQLStorageAuthority) UpdateRegistration(ctx context.Context, req *corepb.Registration) (*emptypb.Empty, error) { - if req == nil || req.Id == 0 || len(req.Key) == 0 || len(req.InitialIP) == 0 { - return nil, errIncompleteRequest - } + idents := identifier.FromCert(parsed) - const query = "WHERE id = ?" - curr, err := selectRegistration(ssa.dbMap.WithContext(ctx), query, req.Id) - if err != nil { - if db.IsNoRows(err) { - return nil, berrors.NotFoundError("registration with ID '%d' not found", req.Id) + isRenewal, err := ssa.checkFQDNSetExists( + ctx, + tx.SelectOne, + idents) + if err != nil { + return nil, err } - return nil, err - } - update, err := registrationPbToModel(req) - if err != nil { - return nil, err - } + err = addIssuedNames(ctx, tx, parsed, isRenewal) + if err != nil { + return nil, err + } - // Copy the existing registration model's LockCol to the new updated - // registration model's LockCol - update.LockCol = curr.LockCol - n, err := ssa.dbMap.WithContext(ctx).Update(update) - if err != nil { - if db.IsDuplicate(err) { - // duplicate entry error can only happen when jwk_sha256 collides, indicate - // to caller that the provided key is already in use - return nil, berrors.DuplicateError("key is already in use for a different account") + err = addKeyHash(ctx, tx, parsed) + if err != nil { + return nil, err } - return nil, err - } - if n == 0 { - return nil, berrors.NotFoundError("registration with ID '%d' not found", req.Id) + + return nil, nil + }) + if overallError != nil { + return nil, overallError } return &emptypb.Empty{}, nil } -// AddCertificate stores an issued certificate and returns the digest as -// a string, or an error if any occurred. -func (ssa *SQLStorageAuthority) AddCertificate(ctx context.Context, req *sapb.AddCertificateRequest) (*sapb.AddCertificateResponse, error) { - if len(req.Der) == 0 || req.RegID == 0 || req.Issued == 0 { +// AddCertificate stores an issued certificate, returning an error if it is a +// duplicate or if any other failure occurs. +func (ssa *SQLStorageAuthority) AddCertificate(ctx context.Context, req *sapb.AddCertificateRequest) (*emptypb.Empty, error) { + if core.IsAnyNilOrZero(req.Der, req.RegID, req.Issued) { return nil, errIncompleteRequest } parsedCertificate, err := x509.ParseCertificate(req.Der) @@ -471,16 +317,16 @@ func (ssa *SQLStorageAuthority) AddCertificate(ctx context.Context, req *sapb.Ad Serial: serial, Digest: digest, DER: req.Der, - Issued: time.Unix(0, req.Issued), + Issued: req.Issued.AsTime(), Expires: parsedCertificate.NotAfter, } - isRenewalRaw, overallError := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Executor) (interface{}, error) { + _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { // Select to see if cert exists var row struct { Count int64 } - err := txWithCtx.SelectOne(&row, "SELECT count(1) as count FROM certificates WHERE serial=?", serial) + err := tx.SelectOne(ctx, &row, "SELECT COUNT(*) as count FROM certificates WHERE serial=?", serial) if err != nil { return nil, err } @@ -489,61 +335,28 @@ func (ssa *SQLStorageAuthority) AddCertificate(ctx context.Context, req *sapb.Ad } // Save the final certificate - err = txWithCtx.Insert(cert) - if err != nil { - return nil, err - } - - // NOTE(@cpu): When we collect up names to check if an FQDN set exists (e.g. - // that it is a renewal) we use just the DNSNames from the certificate and - // ignore the Subject Common Name (if any). This is a safe assumption because - // if a certificate we issued were to have a Subj. CN not present as a SAN it - // would be a misissuance and miscalculating whether the cert is a renewal or - // not for the purpose of rate limiting is the least of our troubles. - isRenewal, err := ssa.checkFQDNSetExists( - txWithCtx.SelectOne, - parsedCertificate.DNSNames) + err = tx.Insert(ctx, cert) if err != nil { return nil, err } - return isRenewal, err + return nil, err }) if overallError != nil { return nil, overallError } - // Recast the interface{} return from db.WithTransaction as a bool, returning - // an error if we can't. - var isRenewal bool - if boolVal, ok := isRenewalRaw.(bool); !ok { - return nil, fmt.Errorf( - "AddCertificate db.WithTransaction returned %T out var, expected bool", - isRenewalRaw) - } else { - isRenewal = boolVal - } - - // In a separate transaction perform the work required to update tables used - // for rate limits. Since the effects of failing these writes is slight - // miscalculation of rate limits we choose to not fail the AddCertificate - // operation if the rate limit update transaction fails. - _, rlTransactionErr := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Executor) (interface{}, error) { - // Add to the rate limit table, but only for new certificates. Renewals - // don't count against the certificatesPerName limit. - if !isRenewal { - timeToTheHour := parsedCertificate.NotBefore.Round(time.Hour) - err := ssa.addCertificatesPerName(ctx, txWithCtx, parsedCertificate.DNSNames, timeToTheHour) - if err != nil { - return nil, err - } - } - - // Update the FQDN sets now that there is a final certificate to ensure rate - // limits are calculated correctly. + // In a separate transaction, perform the work required to update the table + // used for order reuse. Since the effect of failing the write is just a + // missed opportunity to reuse an order, we choose to not fail the + // AddCertificate operation if this update transaction fails. + _, fqdnTransactionErr := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + // Update the FQDN sets now that there is a final certificate to ensure + // reuse is determined correctly. err = addFQDNSet( - txWithCtx, - parsedCertificate.DNSNames, + ctx, + tx, + identifier.FromCert(parsedCertificate), core.SerialToString(parsedCertificate.SerialNumber), parsedCertificate.NotBefore, parsedCertificate.NotAfter, @@ -554,486 +367,204 @@ func (ssa *SQLStorageAuthority) AddCertificate(ctx context.Context, req *sapb.Ad return nil, nil }) - // If the ratelimit transaction failed increment a stat and log a warning + // If the FQDN sets transaction failed, increment a stat and log a warning // but don't return an error from AddCertificate. - if rlTransactionErr != nil { + if fqdnTransactionErr != nil { ssa.rateLimitWriteErrors.Inc() - ssa.log.AuditErrf("failed AddCertificate ratelimit update transaction: %v", rlTransactionErr) + ssa.log.Errf("failed AddCertificate FQDN sets insert transaction: %v", fqdnTransactionErr) } - return &sapb.AddCertificateResponse{Digest: digest}, nil + return &emptypb.Empty{}, nil } -func (ssa *SQLStorageAuthority) CountOrders(ctx context.Context, req *sapb.CountOrdersRequest) (*sapb.Count, error) { - if req.AccountID == 0 || req.Range.Earliest == 0 || req.Range.Latest == 0 { +// DeactivateRegistration deactivates a currently valid registration +func (ssa *SQLStorageAuthority) DeactivateRegistration(ctx context.Context, req *sapb.RegistrationID) (*corepb.Registration, error) { + if req == nil || req.Id == 0 { return nil, errIncompleteRequest } - if features.Enabled(features.FasterNewOrdersRateLimit) { - return countNewOrders(ctx, ssa.dbReadOnlyMap, req) - } - - var count int64 - err := ssa.dbReadOnlyMap.WithContext(ctx).SelectOne( - &count, - `SELECT count(1) FROM orders - WHERE registrationID = :acctID AND - created >= :earliest AND - created < :latest`, - map[string]interface{}{ - "acctID": req.AccountID, - "earliest": time.Unix(0, req.Range.Earliest), - "latest": time.Unix(0, req.Range.Latest), - }, - ) - if err != nil { - return nil, err - } - - return &sapb.Count{Count: count}, nil -} + result, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + result, err := tx.ExecContext(ctx, + "UPDATE registrations SET status = ? WHERE status = ? AND id = ? LIMIT 1", + string(core.StatusDeactivated), + string(core.StatusValid), + req.Id, + ) + if err != nil { + return nil, fmt.Errorf("deactivating account %d: %w", req.Id, err) + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return nil, fmt.Errorf("deactivating account %d: %w", req.Id, err) + } + if rowsAffected == 0 { + return nil, berrors.NotFoundError("no active account with id %d", req.Id) + } else if rowsAffected > 1 { + return nil, berrors.InternalServerError("unexpectedly deactivated multiple accounts with id %d", req.Id) + } -// HashNames returns a hash of the names requested. This is intended for use -// when interacting with the orderFqdnSets table. -func HashNames(names []string) []byte { - names = core.UniqueLowerNames(names) - hash := sha256.Sum256([]byte(strings.Join(names, ","))) - return hash[:] -} + updatedRegistrationModel, err := selectRegistration(ctx, tx, "id", req.Id) + if err != nil { + if db.IsNoRows(err) { + return nil, berrors.NotFoundError("fetching account %d: no rows found", req.Id) + } + return nil, fmt.Errorf("fetching account %d: %w", req.Id, err) + } -func addFQDNSet(db db.Inserter, names []string, serial string, issued time.Time, expires time.Time) error { - return db.Insert(&core.FQDNSet{ - SetHash: HashNames(names), - Serial: serial, - Issued: issued, - Expires: expires, - }) -} + updatedRegistration, err := registrationModelToPb(updatedRegistrationModel) + if err != nil { + return nil, err + } -// addOrderFQDNSet creates a new OrderFQDNSet row using the provided -// information. This function accepts a transaction so that the orderFqdnSet -// addition can take place within the order addition transaction. The caller is -// required to rollback the transaction if an error is returned. -func addOrderFQDNSet( - db db.Inserter, - names []string, - orderID int64, - regID int64, - expires time.Time) error { - return db.Insert(&orderFQDNSet{ - SetHash: HashNames(names), - OrderID: orderID, - RegistrationID: regID, - Expires: expires, + return updatedRegistration, nil }) -} - -// deleteOrderFQDNSet deletes a OrderFQDNSet row that matches the provided -// orderID. This function accepts a transaction so that the deletion can -// take place within the finalization transaction. The caller is required to -// rollback the transaction if an error is returned. -func deleteOrderFQDNSet( - db db.Execer, - orderID int64) error { - - result, err := db.Exec(` - DELETE FROM orderFqdnSets - WHERE orderID = ?`, - orderID) - if err != nil { - return err - } - rowsDeleted, err := result.RowsAffected() - if err != nil { - return err + if overallError != nil { + return nil, overallError } - // We always expect there to be an order FQDN set row for each - // pending/processing order that is being finalized. If there isn't one then - // something is amiss and should be raised as an internal server error - if rowsDeleted == 0 { - return berrors.InternalServerError("No orderFQDNSet exists to delete") + + res, ok := result.(*corepb.Registration) + if !ok { + return nil, fmt.Errorf("unexpected casting failure in DeactivateRegistration") } - return nil -} -func addIssuedNames(db db.Execer, cert *x509.Certificate, isRenewal bool) error { - if len(cert.DNSNames) == 0 { - return berrors.InternalServerError("certificate has no DNSNames") - } - var qmarks []string - var values []interface{} - for _, name := range cert.DNSNames { - values = append(values, - ReverseName(name), - core.SerialToString(cert.SerialNumber), - cert.NotBefore, - isRenewal) - qmarks = append(qmarks, "(?, ?, ?, ?)") - } - query := `INSERT INTO issuedNames (reversedName, serial, notBefore, renewal) VALUES ` + strings.Join(qmarks, ", ") + `;` - _, err := db.Exec(query, values...) - return err + return res, nil } -// CountFQDNSets counts the total number of issuances, for a set of domains, -// that occurred during a given window of time. -func (ssa *SQLStorageAuthority) CountFQDNSets(ctx context.Context, req *sapb.CountFQDNSetsRequest) (*sapb.Count, error) { - if req.Window == 0 || len(req.Domains) == 0 { +// DeactivateAuthorization2 deactivates a currently valid or pending authorization. +func (ssa *SQLStorageAuthority) DeactivateAuthorization2(ctx context.Context, req *sapb.AuthorizationID2) (*emptypb.Empty, error) { + if req.Id == 0 { return nil, errIncompleteRequest } - var count int64 - err := ssa.dbReadOnlyMap.WithContext(ctx).SelectOne( - &count, - // We don't do a select across both fqdnSets and fqdnSets_old here because - // this method is only used for rate-limiting and we don't care to spend the - // extra CPU cycles checking the old table. - // TODO(#5670): Remove this comment when the partitioning is fixed. - `SELECT COUNT(1) FROM fqdnSets - WHERE setHash = ? - AND issued > ?`, - HashNames(req.Domains), - ssa.clk.Now().Add(-time.Duration(req.Window)), + _, err := ssa.dbMap.ExecContext(ctx, + `UPDATE authz2 SET status = :deactivated WHERE id = :id and status IN (:valid,:pending)`, + map[string]any{ + "deactivated": statusUint(core.StatusDeactivated), + "id": req.Id, + "valid": statusUint(core.StatusValid), + "pending": statusUint(core.StatusPending), + }, ) - return &sapb.Count{Count: count}, err -} - -// FQDNSetExists returns a bool indicating if one or more FQDN sets |names| -// exists in the database -func (ssa *SQLStorageAuthority) FQDNSetExists(ctx context.Context, req *sapb.FQDNSetExistsRequest) (*sapb.Exists, error) { - if len(req.Domains) == 0 { - return nil, errIncompleteRequest - } - exists, err := ssa.checkFQDNSetExists(ssa.dbMap.WithContext(ctx).SelectOne, req.Domains) if err != nil { return nil, err } - return &sapb.Exists{Exists: exists}, nil -} - -// oneSelectorFunc is a func type that matches both gorp.Transaction.SelectOne -// and gorp.DbMap.SelectOne. -type oneSelectorFunc func(holder interface{}, query string, args ...interface{}) error - -// checkFQDNSetExists uses the given oneSelectorFunc to check whether an fqdnSet -// for the given names exists. -func (ssa *SQLStorageAuthority) checkFQDNSetExists(selector oneSelectorFunc, names []string) (bool, error) { - namehash := HashNames(names) - var exists bool - err := selector( - &exists, - // We select on both tables here because this function is used to determine - // if a given issuance is a renewal, and for that we care about 90 days - // worth of data, not just 7 days like the current fqdnSets table holds. - // TODO(#5670): Remove this OR when the partitioning is fixed. - `SELECT EXISTS (SELECT id FROM fqdnSets WHERE setHash = ? LIMIT 1) - OR EXISTS (SELECT id FROM fqdnSets_old WHERE setHash = ? LIMIT 1)`, - namehash, - namehash, - ) - return exists, err + return &emptypb.Empty{}, nil } -// PreviousCertificateExists returns true iff there was at least one certificate -// issued with the provided domain name, and the most recent such certificate -// was issued by the provided registration ID. This method is currently only -// used to determine if a certificate has previously been issued for a given -// domain name in order to determine if validations should be allowed during -// the v1 API shutoff. -// TODO(#5816): Consider removing this method, as it has no callers. -func (ssa *SQLStorageAuthority) PreviousCertificateExists(ctx context.Context, req *sapb.PreviousCertificateExistsRequest) (*sapb.Exists, error) { - if req.Domain == "" || req.RegID == 0 { +// NewOrderAndAuthzs adds the given authorizations to the database, adds their +// autogenerated IDs to the given order, and then adds the order to the db. +// This is done inside a single transaction to prevent situations where new +// authorizations are created, but then their corresponding order is never +// created, leading to "invisible" pending authorizations. +func (ssa *SQLStorageAuthority) NewOrderAndAuthzs(ctx context.Context, req *sapb.NewOrderAndAuthzsRequest) (*corepb.Order, error) { + if req.NewOrder == nil { return nil, errIncompleteRequest } - - exists := &sapb.Exists{Exists: true} - notExists := &sapb.Exists{Exists: false} - - // Find the most recently issued certificate containing this domain name. - var serial string - err := ssa.dbMap.WithContext(ctx).SelectOne( - &serial, - `SELECT serial FROM issuedNames - WHERE reversedName = ? - ORDER BY notBefore DESC - LIMIT 1`, - ReverseName(req.Domain), - ) - if err != nil { - if db.IsNoRows(err) { - return notExists, nil - } - return nil, err + if len(req.NewAuthzs) == 0 && len(req.NewOrder.V2Authorizations) == 0 { + return nil, errIncompleteRequest } - // Check whether that certificate was issued to the specified account. - var count int - err = ssa.dbMap.WithContext(ctx).SelectOne( - &count, - `SELECT COUNT(1) FROM certificates - WHERE serial = ? - AND registrationID = ?`, - serial, - req.RegID, - ) - if err != nil { - // If no rows found, that means the certificate we found in issuedNames wasn't - // issued by the registration ID we are checking right now, but is not an - // error. - if db.IsNoRows(err) { - return notExists, nil + for _, authz := range req.NewAuthzs { + if authz.RegistrationID != req.NewOrder.RegistrationID { + // This is a belt-and-suspenders check. These were just created by the RA, + // so their RegIDs should match. But if they don't, the consequences would + // be very bad, so we do an extra check here. + return nil, errors.New("new order and authzs must all be associated with same account") } - return nil, err - } - if count > 0 { - return exists, nil } - return notExists, nil -} -// DeactivateRegistration deactivates a currently valid registration -func (ssa *SQLStorageAuthority) DeactivateRegistration(ctx context.Context, req *sapb.RegistrationID) (*emptypb.Empty, error) { - if req == nil || req.Id == 0 { - return nil, errIncompleteRequest - } - _, err := ssa.dbMap.WithContext(ctx).Exec( - "UPDATE registrations SET status = ? WHERE status = ? AND id = ?", - string(core.StatusDeactivated), - string(core.StatusValid), - req.Id, - ) - if err != nil { - return nil, err - } - return &emptypb.Empty{}, nil -} - -// DeactivateAuthorization2 deactivates a currently valid or pending authorization. -// This method is intended to deprecate DeactivateAuthorization. -func (ssa *SQLStorageAuthority) DeactivateAuthorization2(ctx context.Context, req *sapb.AuthorizationID2) (*emptypb.Empty, error) { - if req.Id == 0 { - return nil, errIncompleteRequest - } - - _, err := ssa.dbMap.Exec( - `UPDATE authz2 SET status = :deactivated WHERE id = :id and status IN (:valid,:pending)`, - map[string]interface{}{ - "deactivated": statusUint(core.StatusDeactivated), - "id": req.Id, - "valid": statusUint(core.StatusValid), - "pending": statusUint(core.StatusPending), - }, - ) - if err != nil { - return nil, err - } - return &emptypb.Empty{}, nil -} - -// NewOrder adds a new v2 style order to the database -func (ssa *SQLStorageAuthority) NewOrder(ctx context.Context, req *sapb.NewOrderRequest) (*corepb.Order, error) { - output, err := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Executor) (interface{}, error) { - // Check new order request fields. - if req.RegistrationID == 0 || req.Expires == 0 || len(req.Names) == 0 { - return nil, errIncompleteRequest - } - - order := &orderModel{ - RegistrationID: req.RegistrationID, - Expires: time.Unix(0, req.Expires), - Created: ssa.clk.Now(), - } - - err := txWithCtx.Insert(order) - if err != nil { - return nil, err - } - - for _, id := range req.V2Authorizations { - otoa := &orderToAuthzModel{ - OrderID: order.ID, - AuthzID: id, - } - err := txWithCtx.Insert(otoa) - if err != nil { - return nil, err - } - } - - for _, name := range req.Names { - reqdName := &requestedNameModel{ - OrderID: order.ID, - ReversedName: ReverseName(name), - } - err := txWithCtx.Insert(reqdName) - if err != nil { - return nil, err - } - } - - // Add an FQDNSet entry for the order - err = addOrderFQDNSet(txWithCtx, req.Names, order.ID, order.RegistrationID, order.Expires) - if err != nil { - return nil, err - } - - return order, nil - }) - if err != nil { - return nil, err - } - var order *orderModel - var ok bool - if order, ok = output.(*orderModel); !ok { - return nil, fmt.Errorf("shouldn't happen: casting error in NewOrder") - } - - if features.Enabled(features.FasterNewOrdersRateLimit) { - // Increment the order creation count - err := addNewOrdersRateLimit(ctx, ssa.dbMap, req.RegistrationID, ssa.clk.Now().Truncate(time.Minute)) - if err != nil { - return nil, err - } - } - - res := &corepb.Order{ - // Carry some fields over the from input new order request. - RegistrationID: req.RegistrationID, - Expires: req.Expires, - Names: req.Names, - V2Authorizations: req.V2Authorizations, - // Some fields were generated by the database transaction. - Id: order.ID, - Created: order.Created.UnixNano(), - // A new order is never processing because it can't have been finalized yet. - BeganProcessing: false, - } - - // Calculate the order status before returning it. Since it may have reused all - // valid authorizations the order may be "born" in a ready status. - status, err := ssa.statusForOrder(ctx, res) - if err != nil { - return nil, err - } - res.Status = status - return res, nil -} - -// NewOrderAndAuthzs adds the given authorizations to the database, adds their -// autogenerated IDs to the given order, and then adds the order to the db. -// This is done inside a single transaction to prevent situations where new -// authorizations are created, but then their corresponding order is never -// created, leading to "invisible" pending authorizations. -func (ssa *SQLStorageAuthority) NewOrderAndAuthzs(ctx context.Context, req *sapb.NewOrderAndAuthzsRequest) (*corepb.Order, error) { - output, err := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Executor) (interface{}, error) { + output, err := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { // First, insert all of the new authorizations and record their IDs. - newAuthzIDs := make([]int64, 0) - if len(req.NewAuthzs) != 0 { - inserter, err := db.NewMultiInserter("authz2", authzFields, "id") + newAuthzIDs := make([]int64, 0, len(req.NewAuthzs)) + for _, authz := range req.NewAuthzs { + am, err := newAuthzReqToModel(authz, req.NewOrder.CertificateProfileName) if err != nil { return nil, err } - for _, authz := range req.NewAuthzs { - if authz.Status != string(core.StatusPending) { - return nil, berrors.InternalServerError("authorization must be pending") - } - am, err := authzPBToModel(authz) - if err != nil { - return nil, err - } - err = inserter.Add([]interface{}{ - am.ID, - am.IdentifierType, - am.IdentifierValue, - am.RegistrationID, - am.Status, - am.Expires, - am.Challenges, - am.Attempted, - am.AttemptedAt, - am.Token, - am.ValidationError, - am.ValidationRecord, - }) - if err != nil { - return nil, err - } - } - newAuthzIDs, err = inserter.Insert(txWithCtx) + err = tx.Insert(ctx, am) if err != nil { return nil, err } + newAuthzIDs = append(newAuthzIDs, am.ID) } + // Combine the already-existing and newly-created authzs. + allAuthzIds := append(req.NewOrder.V2Authorizations, newAuthzIDs...) + // Second, insert the new order. - order := &orderModel{ - RegistrationID: req.NewOrder.RegistrationID, - Expires: time.Unix(0, req.NewOrder.Expires), - Created: ssa.clk.Now(), - } - err := txWithCtx.Insert(order) + created := ssa.clk.Now() + encodedAuthzs, err := proto.Marshal(&sapb.Authzs{ + AuthzIDs: allAuthzIds, + }) if err != nil { return nil, err } - // Third, insert all of the orderToAuthz relations. - inserter, err := db.NewMultiInserter("orderToAuthz2", "orderID, authzID", "") - if err != nil { - return nil, err - } - for _, id := range req.NewOrder.V2Authorizations { - err = inserter.Add([]interface{}{order.ID, id}) - if err != nil { - return nil, err - } - } - for _, id := range newAuthzIDs { - err = inserter.Add([]interface{}{order.ID, id}) - if err != nil { - return nil, err - } + om := orderModel{ + RegistrationID: req.NewOrder.RegistrationID, + Expires: req.NewOrder.Expires.AsTime(), + Created: created, + CertificateProfileName: &req.NewOrder.CertificateProfileName, + Replaces: &req.NewOrder.Replaces, + Authzs: encodedAuthzs, } - _, err = inserter.Insert(txWithCtx) + err = tx.Insert(ctx, &om) if err != nil { return nil, err } + orderID := om.ID - // Fourth, insert all of the requestedNames. - inserter, err = db.NewMultiInserter("requestedNames", "orderID, reversedName", "") + // Fourth, insert the FQDNSet entry for the order. + err = addOrderFQDNSet(ctx, tx, identifier.FromProtoSlice(req.NewOrder.Identifiers), orderID, req.NewOrder.RegistrationID, req.NewOrder.Expires.AsTime()) if err != nil { return nil, err } - for _, name := range req.NewOrder.Names { - err = inserter.Add([]interface{}{order.ID, ReverseName(name)}) + + if req.NewOrder.ReplacesSerial != "" { + // Update the replacementOrders table to indicate that this order + // replaces the provided certificate serial. + err := addReplacementOrder(ctx, tx, req.NewOrder.ReplacesSerial, orderID, req.NewOrder.Expires.AsTime()) if err != nil { return nil, err } } - _, err = inserter.Insert(txWithCtx) - if err != nil { - return nil, err - } - // Fifth, insert the FQDNSet entry for the order. - err = addOrderFQDNSet(txWithCtx, req.NewOrder.Names, order.ID, order.RegistrationID, order.Expires) + // Get the partial Authorization objects for the order + authzValidityInfo, err := getAuthorizationStatuses(ctx, tx, allAuthzIds) + // If there was an error getting the authorizations, return it immediately if err != nil { return nil, err } - // Finally, build the overall Order PB and return it. - return &corepb.Order{ + // Finally, build the overall Order PB. + res := &corepb.Order{ // ID and Created were auto-populated on the order model when it was inserted. - Id: order.ID, - Created: order.Created.UnixNano(), + Id: orderID, + Created: timestamppb.New(created), // These are carried over from the original request unchanged. RegistrationID: req.NewOrder.RegistrationID, Expires: req.NewOrder.Expires, - Names: req.NewOrder.Names, - // Have to combine the already-associated and newly-reacted authzs. - V2Authorizations: append(req.NewOrder.V2Authorizations, newAuthzIDs...), + Identifiers: req.NewOrder.Identifiers, + // This includes both reused and newly created authz IDs. + V2Authorizations: allAuthzIds, // A new order is never processing because it can't be finalized yet. BeganProcessing: false, - }, nil + // An empty string is allowed. When the RA retrieves the order and + // transmits it to the CA, the empty string will take the value of + // DefaultCertProfileName from the //issuance package. + CertificateProfileName: req.NewOrder.CertificateProfileName, + Replaces: req.NewOrder.Replaces, + } + + // Calculate the order status before returning it. Since it may have reused + // all valid authorizations the order may be "born" in a ready status. + status, err := statusForOrder(res, authzValidityInfo, ssa.clk.Now()) + if err != nil { + return nil, err + } + res.Status = status + + return res, nil }) if err != nil { return nil, err @@ -1044,22 +575,6 @@ func (ssa *SQLStorageAuthority) NewOrderAndAuthzs(ctx context.Context, req *sapb return nil, fmt.Errorf("casting error in NewOrderAndAuthzs") } - if features.Enabled(features.FasterNewOrdersRateLimit) { - // Increment the order creation count - err := addNewOrdersRateLimit(ctx, ssa.dbMap, req.NewOrder.RegistrationID, ssa.clk.Now().Truncate(time.Minute)) - if err != nil { - return nil, err - } - } - - // Calculate the order status before returning it. Since it may have reused all - // valid authorizations the order may be "born" in a ready status. - status, err := ssa.statusForOrder(ctx, order) - if err != nil { - return nil, err - } - order.Status = status - return order, nil } @@ -1070,8 +585,8 @@ func (ssa *SQLStorageAuthority) SetOrderProcessing(ctx context.Context, req *sap if req.Id == 0 { return nil, errIncompleteRequest } - _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Executor) (interface{}, error) { - result, err := txWithCtx.Exec(` + _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + result, err := tx.ExecContext(ctx, ` UPDATE orders SET beganProcessing = ? WHERE id = ? @@ -1101,21 +616,22 @@ func (ssa *SQLStorageAuthority) SetOrderError(ctx context.Context, req *sapb.Set if req.Id == 0 || req.Error == nil { return nil, errIncompleteRequest } - _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Executor) (interface{}, error) { - om, err := orderToModel(&corepb.Order{ - Id: req.Id, - Error: req.Error, - }) - if err != nil { - return nil, err - } - result, err := txWithCtx.Exec(` + errJSON, err := json.Marshal(req.Error) + if err != nil { + return nil, err + } + if len(errJSON) > mediumBlobSize { + return nil, fmt.Errorf("error object is too large to store in the database") + } + + _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + result, err := tx.ExecContext(ctx, ` UPDATE orders SET error = ? WHERE id = ?`, - om.Error, - om.ID) + errJSON, + req.Id) if err != nil { return nil, berrors.InternalServerError("error updating order error field") } @@ -1141,8 +657,8 @@ func (ssa *SQLStorageAuthority) FinalizeOrder(ctx context.Context, req *sapb.Fin if req.Id == 0 || req.CertificateSerial == "" { return nil, errIncompleteRequest } - _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(txWithCtx db.Executor) (interface{}, error) { - result, err := txWithCtx.Exec(` + _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + result, err := tx.ExecContext(ctx, ` UPDATE orders SET certificateSerial = ? WHERE id = ? AND @@ -1160,7 +676,12 @@ func (ssa *SQLStorageAuthority) FinalizeOrder(ctx context.Context, req *sapb.Fin // Delete the orderFQDNSet row for the order now that it has been finalized. // We use this table for order reuse and should not reuse a finalized order. - err = deleteOrderFQDNSet(txWithCtx, req.Id) + err = deleteOrderFQDNSet(ctx, tx, req.Id) + if err != nil { + return nil, err + } + + err = setReplacementOrderFinalized(ctx, tx, req.Id) if err != nil { return nil, err } @@ -1173,936 +694,878 @@ func (ssa *SQLStorageAuthority) FinalizeOrder(ctx context.Context, req *sapb.Fin return &emptypb.Empty{}, nil } -// authzForOrder retrieves the authorization IDs for an order. -func (ssa *SQLStorageAuthority) authzForOrder(ctx context.Context, orderID int64) ([]int64, error) { - var v2IDs []int64 - _, err := ssa.dbMap.WithContext(ctx).Select( - &v2IDs, - "SELECT authzID FROM orderToAuthz2 WHERE orderID = ?", - orderID, - ) - return v2IDs, err -} - -// namesForOrder finds all of the requested names associated with an order. The -// names are returned in their reversed form (see `sa.ReverseName`). -func (ssa *SQLStorageAuthority) namesForOrder(ctx context.Context, orderID int64) ([]string, error) { - var reversedNames []string - _, err := ssa.dbMap.WithContext(ctx).Select( - &reversedNames, - `SELECT reversedName - FROM requestedNames - WHERE orderID = ?`, - orderID) - if err != nil { - return nil, err - } - return reversedNames, nil -} - -// GetOrder is used to retrieve an already existing order object -func (ssa *SQLStorageAuthority) GetOrder(ctx context.Context, req *sapb.OrderRequest) (*corepb.Order, error) { - if req == nil || req.Id == 0 { +// FinalizeAuthorization2 moves a pending authorization to either the valid or invalid status. If +// the authorization is being moved to invalid the validationError field must be set. If the +// authorization is being moved to valid the validationRecord and expires fields must be set. +func (ssa *SQLStorageAuthority) FinalizeAuthorization2(ctx context.Context, req *sapb.FinalizeAuthorizationRequest) (*emptypb.Empty, error) { + if core.IsAnyNilOrZero(req.Status, req.Attempted, req.Id, req.Expires) { return nil, errIncompleteRequest } - omObj, err := ssa.dbMap.WithContext(ctx).Get(orderModel{}, req.Id) - if err != nil { - if db.IsNoRows(err) { - return nil, berrors.NotFoundError("no order found for ID %d", req.Id) - } - return nil, err + if req.Status != string(core.StatusValid) && req.Status != string(core.StatusInvalid) { + return nil, berrors.InternalServerError("authorization must have status valid or invalid") } - if omObj == nil { - return nil, berrors.NotFoundError("no order found for ID %d", req.Id) + query := `UPDATE authz2 SET + status = :status, + attempted = :attempted, + attemptedAt = :attemptedAt, + validationRecord = :validationRecord, + validationError = :validationError, + expires = :expires + WHERE id = :id AND status = :pending` + var validationRecords []core.ValidationRecord + for _, recordPB := range req.ValidationRecords { + record, err := bgrpc.PBToValidationRecord(recordPB) + if err != nil { + return nil, err + } + if req.Attempted == string(core.ChallengeTypeHTTP01) { + // Remove these fields because they can be rehydrated later + // on from the URL field. + record.Hostname = "" + record.Port = "" + } + validationRecords = append(validationRecords, record) } - order, err := modelToOrder(omObj.(*orderModel)) + vrJSON, err := json.Marshal(validationRecords) if err != nil { return nil, err } - orderExp := time.Unix(0, order.Expires) - if orderExp.Before(ssa.clk.Now()) { - return nil, berrors.NotFoundError("no order found for ID %d", req.Id) + var veJSON []byte + if req.ValidationError != nil { + validationError, err := bgrpc.PBToProblemDetails(req.ValidationError) + if err != nil { + return nil, err + } + j, err := json.Marshal(validationError) + if err != nil { + return nil, err + } + veJSON = j + } + // Check to see if the AttemptedAt time is non zero and convert to + // *time.Time if so. If it is zero, leave nil and don't convert. Keep the + // database attemptedAt field Null instead of 1970-01-01 00:00:00. + var attemptedTime *time.Time + if !core.IsAnyNilOrZero(req.AttemptedAt) { + val := req.AttemptedAt.AsTime() + attemptedTime = &val } - - v2AuthzIDs, err := ssa.authzForOrder(ctx, order.Id) - if err != nil { - return nil, err + params := map[string]any{ + "status": statusToUint[core.AcmeStatus(req.Status)], + "attempted": challTypeToUint[req.Attempted], + "attemptedAt": attemptedTime, + "validationRecord": vrJSON, + "id": req.Id, + "pending": statusUint(core.StatusPending), + "expires": req.Expires.AsTime(), + // if req.ValidationError is nil veJSON should also be nil + // which should result in a NULL field + "validationError": veJSON, } - order.V2Authorizations = v2AuthzIDs - names, err := ssa.namesForOrder(ctx, order.Id) + res, err := ssa.dbMap.ExecContext(ctx, query, params) if err != nil { return nil, err } - // The requested names are stored reversed to improve indexing performance. We - // need to reverse the reversed names here before giving them back to the - // caller. - reversedNames := make([]string, len(names)) - for i, n := range names { - reversedNames[i] = ReverseName(n) - } - order.Names = reversedNames - - // Calculate the status for the order - status, err := ssa.statusForOrder(ctx, order) + rows, err := res.RowsAffected() if err != nil { return nil, err } - order.Status = status - - return order, nil + if rows == 0 { + return nil, berrors.NotFoundError("no pending authorization with id %d", req.Id) + } else if rows > 1 { + return nil, berrors.InternalServerError("multiple rows updated for authorization id %d", req.Id) + } + return &emptypb.Empty{}, nil } -// statusForOrder examines the status of a provided order's authorizations to -// determine what the overall status of the order should be. In summary: -// * If the order has an error, the order is invalid -// * If any of the order's authorizations are in any state other than -// valid or pending, the order is invalid. -// * If any of the order's authorizations are pending, the order is pending. -// * If all of the order's authorizations are valid, and there is -// a certificate serial, the order is valid. -// * If all of the order's authorizations are valid, and we have began -// processing, but there is no certificate serial, the order is processing. -// * If all of the order's authorizations are valid, and we haven't begun -// processing, then the order is status ready. -// An error is returned for any other case. -func (ssa *SQLStorageAuthority) statusForOrder(ctx context.Context, order *corepb.Order) (string, error) { - // Without any further work we know an order with an error is invalid - if order.Error != nil { - return string(core.StatusInvalid), nil - } - - // If the order is expired the status is invalid and we don't need to get - // order authorizations. Its important to exit early in this case because an - // order that references an expired authorization will be itself have been - // expired (because we match the order expiry to the associated authz expiries - // in ra.NewOrder), and expired authorizations may be purged from the DB. - // Because of this purging fetching the authz's for an expired order may - // return fewer authz objects than expected, triggering a 500 error response. - orderExpiry := time.Unix(0, order.Expires) - if orderExpiry.Before(ssa.clk.Now()) { - return string(core.StatusInvalid), nil - } - - // Get the full Authorization objects for the order - authzValidityInfo, err := ssa.getAuthorizationStatuses(ctx, order.V2Authorizations) - // If there was an error getting the authorizations, return it immediately - if err != nil { - return "", err - } - - // If getAuthorizationStatuses returned a different number of authorization - // objects than the order's slice of authorization IDs something has gone - // wrong worth raising an internal error about. - if len(authzValidityInfo) != len(order.V2Authorizations) { - return "", berrors.InternalServerError( - "getAuthorizationStatuses returned the wrong number of authorization statuses "+ - "(%d vs expected %d) for order %d", - len(authzValidityInfo), len(order.V2Authorizations), order.Id) - } - - // Keep a count of the authorizations seen - pendingAuthzs := 0 - validAuthzs := 0 - otherAuthzs := 0 - expiredAuthzs := 0 - - // Loop over each of the order's authorization objects to examine the authz status - for _, info := range authzValidityInfo { - switch core.AcmeStatus(info.Status) { - case core.StatusPending: - pendingAuthzs++ - case core.StatusValid: - validAuthzs++ - case core.StatusInvalid: - otherAuthzs++ - case core.StatusDeactivated: - otherAuthzs++ - case core.StatusRevoked: - otherAuthzs++ - default: - return "", berrors.InternalServerError( - "Order is in an invalid state. Authz has invalid status %s", - info.Status) - } - if info.Expires.Before(ssa.clk.Now()) { - expiredAuthzs++ - } +// addRevokedCertificate is a helper used by both RevokeCertificate and +// UpdateRevokedCertificate. It inserts a new row into the revokedCertificates +// table based on the contents of the input request. The second argument must be +// a transaction object so that it is safe to conduct multiple queries with a +// consistent view of the database. It must only be called when the request +// specifies a non-zero ShardIdx. +func addRevokedCertificate(ctx context.Context, tx db.Executor, req *sapb.RevokeCertificateRequest, revokedDate time.Time) error { + if req.ShardIdx == 0 { + return errors.New("cannot add revoked certificate with shard index 0") } - // An order is invalid if **any** of its authzs are invalid, deactivated, - // revoked, or expired, see https://tools.ietf.org/html/rfc8555#section-7.1.6 - if otherAuthzs > 0 || expiredAuthzs > 0 { - return string(core.StatusInvalid), nil + var serial struct { + Expires time.Time } - // An order is pending if **any** of its authzs are pending - if pendingAuthzs > 0 { - return string(core.StatusPending), nil + err := tx.SelectOne( + ctx, &serial, `SELECT expires FROM serials WHERE serial = ?`, req.Serial) + if err != nil { + return fmt.Errorf("retrieving revoked certificate expiration: %w", err) + } + + err = tx.Insert(ctx, &revokedCertModel{ + IssuerID: req.IssuerID, + Serial: req.Serial, + ShardIdx: req.ShardIdx, + RevokedDate: revokedDate, + RevokedReason: revocation.Reason(req.Reason), + // Round the notAfter up to the next hour, to reduce index size while still + // ensuring we correctly serve revocation info past the actual expiration. + NotAfterHour: serial.Expires.Add(time.Hour).Truncate(time.Hour), + }) + if err != nil { + return fmt.Errorf("inserting revoked certificate row: %w", err) } - // An order is fully authorized if it has valid authzs for each of the order - // names - fullyAuthorized := len(order.Names) == validAuthzs + return nil +} - // If the order isn't fully authorized we've encountered an internal error: - // Above we checked for any invalid or pending authzs and should have returned - // early. Somehow we made it this far but also don't have the correct number - // of valid authzs. - if !fullyAuthorized { - return "", berrors.InternalServerError( - "Order has the incorrect number of valid authorizations & no pending, " + - "deactivated or invalid authorizations") +// RevokeCertificate stores revocation information about a certificate. It will only store this +// information if the certificate is not already marked as revoked. +// +// If ShardIdx is non-zero, RevokeCertificate also writes an entry for this certificate to +// the revokedCertificates table, with the provided shard number. +func (ssa *SQLStorageAuthority) RevokeCertificate(ctx context.Context, req *sapb.RevokeCertificateRequest) (*emptypb.Empty, error) { + if core.IsAnyNilOrZero(req.Serial, req.IssuerID, req.Date, req.ShardIdx) { + return nil, errIncompleteRequest } - // If the order is fully authorized and the certificate serial is set then the - // order is valid - if fullyAuthorized && order.CertificateSerial != "" { - return string(core.StatusValid), nil - } + _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + revokedDate := req.Date.AsTime() - // If the order is fully authorized, and we have began processing it, then the - // order is processing. - if fullyAuthorized && order.BeganProcessing { - return string(core.StatusProcessing), nil - } + res, err := tx.ExecContext(ctx, + `UPDATE certificateStatus SET + status = ?, + revokedReason = ?, + revokedDate = ?, + ocspLastUpdated = ? + WHERE serial = ? AND status != ?`, + string(core.OCSPStatusRevoked), + revocation.Reason(req.Reason), + revokedDate, + revokedDate, + req.Serial, + string(core.OCSPStatusRevoked), + ) + if err != nil { + return nil, err + } + rows, err := res.RowsAffected() + if err != nil { + return nil, err + } + if rows == 0 { + return nil, berrors.AlreadyRevokedError("no certificate with serial %s and status other than %s", req.Serial, string(core.OCSPStatusRevoked)) + } - if fullyAuthorized && !order.BeganProcessing { - return string(core.StatusReady), nil - } + err = addRevokedCertificate(ctx, tx, req, revokedDate) + if err != nil { + return nil, err + } - return "", berrors.InternalServerError( - "Order %d is in an invalid state. No state known for this order's "+ - "authorizations", order.Id) -} + return nil, nil + }) + if overallError != nil { + return nil, overallError + } -type authzValidity struct { - Status string - Expires time.Time + return &emptypb.Empty{}, nil } -func (ssa *SQLStorageAuthority) getAuthorizationStatuses(ctx context.Context, ids []int64) ([]authzValidity, error) { - var qmarks []string - var params []interface{} - for _, id := range ids { - qmarks = append(qmarks, "?") - params = append(params, id) - } - var validityInfo []struct { - Status uint8 - Expires time.Time +// UpdateRevokedCertificate stores new revocation information about an +// already-revoked certificate. It will only store this information if the +// cert is already revoked, if the new revocation reason is `KeyCompromise`, +// and if the revokedDate is identical to the current revokedDate. +func (ssa *SQLStorageAuthority) UpdateRevokedCertificate(ctx context.Context, req *sapb.RevokeCertificateRequest) (*emptypb.Empty, error) { + if core.IsAnyNilOrZero(req.Serial, req.IssuerID, req.Date, req.Backdate, req.ShardIdx) { + return nil, errIncompleteRequest } - _, err := ssa.dbMap.WithContext(ctx).Select( - &validityInfo, - fmt.Sprintf("SELECT status, expires FROM authz2 WHERE id IN (%s)", strings.Join(qmarks, ",")), - params..., - ) - if err != nil { - return nil, err + if revocation.Reason(req.Reason) != revocation.KeyCompromise { + return nil, fmt.Errorf("cannot update revocation for any reason other than keyCompromise (1); got: %d", req.Reason) } - allAuthzValidity := make([]authzValidity, len(validityInfo)) - for i, info := range validityInfo { - allAuthzValidity[i] = authzValidity{ - Status: string(uintToStatus[info.Status]), - Expires: info.Expires, - } - } - return allAuthzValidity, nil -} - -// GetOrderForNames tries to find a **pending** or **ready** order with the -// exact set of names requested, associated with the given accountID. Only -// unexpired orders are considered. If no order meeting these requirements is -// found a nil corepb.Order pointer is returned. -func (ssa *SQLStorageAuthority) GetOrderForNames( - ctx context.Context, - req *sapb.GetOrderForNamesRequest) (*corepb.Order, error) { - - if req.AcctID == 0 || len(req.Names) == 0 { - return nil, errIncompleteRequest - } - - // Hash the names requested for lookup in the orderFqdnSets table - fqdnHash := HashNames(req.Names) - - // Find a possibly-suitable order. We don't include the account ID or order - // status in this query because there's no index that includes those, so - // including them could require the DB to scan extra rows. - // Instead, we select one unexpired order that matches the fqdnSet. If - // that order doesn't match the account ID or status we need, just return - // nothing. We use `ORDER BY expires ASC` because the index on - // (setHash, expires) is in ASC order. DESC would be slightly nicer from a - // user experience perspective but would be slow when there are many entries - // to sort. - // This approach works fine because in most cases there's only one account - // issuing for a given name. If there are other accounts issuing for the same - // name, it just means order reuse happens less often. - var result struct { - OrderID int64 - RegistrationID int64 - } - var err error - err = ssa.dbMap.WithContext(ctx).SelectOne(&result, ` - SELECT orderID, registrationID - FROM orderFqdnSets - WHERE setHash = ? - AND expires > ? - ORDER BY expires ASC - LIMIT 1`, - fqdnHash, ssa.clk.Now()) - - if db.IsNoRows(err) { - return nil, berrors.NotFoundError("no order matching request found") - } else if err != nil { - return nil, err - } - - if result.RegistrationID != req.AcctID { - return nil, berrors.NotFoundError("no order matching request found") - } - - // Get the order - order, err := ssa.GetOrder(ctx, &sapb.OrderRequest{Id: result.OrderID}) - if err != nil { - return nil, err - } - // Only return a pending or ready order - if order.Status != string(core.StatusPending) && - order.Status != string(core.StatusReady) { - return nil, berrors.NotFoundError("no order matching request found") - } - return order, nil -} - -func AuthzMapToPB(m map[string]*core.Authorization) (*sapb.Authorizations, error) { - resp := &sapb.Authorizations{} - for k, v := range m { - authzPB, err := bgrpc.AuthzToPB(*v) + _, overallError := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + thisUpdate := req.Date.AsTime() + revokedDate := req.Backdate.AsTime() + + res, err := tx.ExecContext(ctx, + `UPDATE certificateStatus SET + revokedReason = ?, + ocspLastUpdated = ? + WHERE serial = ? AND status = ? AND revokedReason != ? AND revokedDate = ?`, + revocation.KeyCompromise, + thisUpdate, + req.Serial, + string(core.OCSPStatusRevoked), + revocation.KeyCompromise, + revokedDate, + ) if err != nil { return nil, err } - resp.Authz = append(resp.Authz, &sapb.Authorizations_MapElement{Domain: k, Authz: authzPB}) - } - return resp, nil -} - -// NewAuthorizations2 adds a set of new style authorizations to the database and -// returns either the IDs of the authorizations or an error. -// TODO(#5816): Consider removing this method, as it has no callers. -func (ssa *SQLStorageAuthority) NewAuthorizations2(ctx context.Context, req *sapb.AddPendingAuthorizationsRequest) (*sapb.Authorization2IDs, error) { - if len(req.Authz) == 0 { - return nil, errIncompleteRequest - } + rows, err := res.RowsAffected() + if err != nil { + return nil, err + } + if rows == 0 { + // InternalServerError because we expected this certificate status to exist, + // to already be revoked for a different reason, and to have a matching date. + return nil, berrors.InternalServerError("no certificate with serial %s and revoked reason other than keyCompromise", req.Serial) + } - ids := &sapb.Authorization2IDs{} - var queryArgs []interface{} - var questionsBuf strings.Builder + var rcm revokedCertModel + // Note: this query MUST be updated to enforce the same preconditions as + // the "UPDATE certificateStatus SET revokedReason..." above if this + // query ever becomes the first or only query in this transaction. We are + // currently relying on the query above to exit early if the certificate + // does not have an appropriate status and revocation reason. + err = tx.SelectOne( + ctx, &rcm, `SELECT * FROM revokedCertificates WHERE serial = ?`, req.Serial) + if db.IsNoRows(err) { + // TODO: Remove this fallback codepath once we know that all unexpired + // certs marked as revoked in the certificateStatus table have + // corresponding rows in the revokedCertificates table. That should be + // 90+ days after the RA starts sending ShardIdx in its + // RevokeCertificateRequest messages. + err = addRevokedCertificate(ctx, tx, req, revokedDate) + if err != nil { + return nil, err + } + return nil, nil + } else if err != nil { + return nil, fmt.Errorf("retrieving revoked certificate row: %w", err) + } - for _, authz := range req.Authz { - if authz.Status != string(core.StatusPending) { - return nil, berrors.InternalServerError("authorization must be pending") + if rcm.ShardIdx != req.ShardIdx { + return nil, berrors.InternalServerError("mismatched shard index %d != %d", req.ShardIdx, rcm.ShardIdx) } - am, err := authzPBToModel(authz) + + rcm.RevokedReason = revocation.KeyCompromise + _, err = tx.Update(ctx, &rcm) if err != nil { - return nil, err + return nil, fmt.Errorf("updating revoked certificate row: %w", err) } - // Each authz needs a (?,?...), in the VALUES block. We need one - // for each element in the authzFields string. - fmt.Fprint(&questionsBuf, "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?),") - - // The query arguments must follow the order of the authzFields string. - queryArgs = append(queryArgs, - am.ID, - am.IdentifierType, - am.IdentifierValue, - am.RegistrationID, - am.Status, - am.Expires, - am.Challenges, - am.Attempted, - am.AttemptedAt, - am.Token, - am.ValidationError, - am.ValidationRecord, - ) + return nil, nil + }) + if overallError != nil { + return nil, overallError } - // At this point, the VALUES block question-string has a trailing comma, we need - // to remove it to make sure we're valid SQL. - questionsTrimmed := strings.TrimRight(questionsBuf.String(), ",") - query := fmt.Sprintf("INSERT INTO authz2 (%s) VALUES %s RETURNING id;", authzFields, questionsTrimmed) + return &emptypb.Empty{}, nil +} - rows, err := ssa.dbMap.Db.QueryContext(ctx, query, queryArgs...) +// AddBlockedKey adds a key hash to the blockedKeys table +func (ssa *SQLStorageAuthority) AddBlockedKey(ctx context.Context, req *sapb.AddBlockedKeyRequest) (*emptypb.Empty, error) { + if core.IsAnyNilOrZero(req.KeyHash, req.Added, req.Source) { + return nil, errIncompleteRequest + } + sourceInt, ok := stringToSourceInt[req.Source] + if !ok { + return nil, errors.New("unknown source") + } + cols, qs := blockedKeysColumns, "?, ?, ?, ?" + vals := []any{ + req.KeyHash, + req.Added.AsTime(), + sourceInt, + req.Comment, + } + if req.RevokedBy != 0 { + cols += ", revokedBy" + qs += ", ?" + vals = append(vals, req.RevokedBy) + } + _, err := ssa.dbMap.ExecContext(ctx, + fmt.Sprintf("INSERT INTO blockedKeys (%s) VALUES (%s)", cols, qs), + vals..., + ) if err != nil { + if db.IsDuplicate(err) { + // Ignore duplicate inserts so multiple certs with the same key can + // be revoked. + return &emptypb.Empty{}, nil + } return nil, err } - for rows.Next() { - var idField int64 - err = rows.Scan(&idField) - if err != nil { - rows.Close() - return nil, err - } - ids.Ids = append(ids.Ids, idField) + return &emptypb.Empty{}, nil +} + +// Health implements the grpc.checker interface. +func (ssa *SQLStorageAuthority) Health(ctx context.Context) error { + err := ssa.dbMap.SelectOne(ctx, new(int), "SELECT 1") + if err != nil { + return err } - // Ensure the query wasn't interrupted before it could complete. - err = rows.Close() + err = ssa.SQLStorageAuthorityRO.Health(ctx) if err != nil { - return nil, err + return err } - return ids, nil + return nil } -// GetAuthorization2 returns the authz2 style authorization identified by the provided ID or an error. -// If no authorization is found matching the ID a berrors.NotFound type error is returned. -func (ssa *SQLStorageAuthority) GetAuthorization2(ctx context.Context, req *sapb.AuthorizationID2) (*corepb.Authorization, error) { - if req.Id == 0 { +// LeaseCRLShard marks a single crlShards row as leased until the given time. +// If the request names a specific shard, this function will return an error +// if that shard is already leased. Otherwise, this function will return the +// index of the oldest shard for the given issuer. +func (ssa *SQLStorageAuthority) LeaseCRLShard(ctx context.Context, req *sapb.LeaseCRLShardRequest) (*sapb.LeaseCRLShardResponse, error) { + if core.IsAnyNilOrZero(req.Until, req.IssuerNameID) { return nil, errIncompleteRequest } - obj, err := ssa.dbMap.Get(authzModel{}, req.Id) - if err != nil { - return nil, err + if req.Until.AsTime().Before(ssa.clk.Now()) { + return nil, fmt.Errorf("lease timestamp must be in the future, got %q", req.Until.AsTime()) } - if obj == nil { - return nil, berrors.NotFoundError("authorization %d not found", req.Id) + + if req.MinShardIdx == req.MaxShardIdx { + return ssa.leaseSpecificCRLShard(ctx, req) } - return modelToAuthzPB(*(obj.(*authzModel))) + + return ssa.leaseOldestCRLShard(ctx, req) } -// authzModelMapToPB converts a mapping of domain name to authzModels into a -// protobuf authorizations map -func authzModelMapToPB(m map[string]authzModel) (*sapb.Authorizations, error) { - resp := &sapb.Authorizations{} - for k, v := range m { - authzPB, err := modelToAuthzPB(v) +// leaseOldestCRLShard finds the oldest unleased crl shard for the given issuer +// and then leases it. Shards within the requested range which have never been +// leased or are previously-unknown indices are considered older than any other +// shard. It returns an error if all shards for the issuer are already leased. +func (ssa *SQLStorageAuthority) leaseOldestCRLShard(ctx context.Context, req *sapb.LeaseCRLShardRequest) (*sapb.LeaseCRLShardResponse, error) { + shardIdx, err := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + var shards []*crlShardModel + _, err := tx.Select( + ctx, + &shards, + `SELECT id, issuerID, idx, thisUpdate, nextUpdate, leasedUntil + FROM crlShards + WHERE issuerID = ? + AND idx BETWEEN ? AND ?`, + req.IssuerNameID, req.MinShardIdx, req.MaxShardIdx, + ) if err != nil { - return nil, err + return -1, fmt.Errorf("selecting candidate shards: %w", err) } - resp.Authz = append(resp.Authz, &sapb.Authorizations_MapElement{Domain: k, Authz: authzPB}) - } - return resp, nil -} -// GetAuthorizations2 returns any valid or pending authorizations that exist for the list of domains -// provided. If both a valid and pending authorization exist only the valid one will be returned. -func (ssa *SQLStorageAuthority) GetAuthorizations2(ctx context.Context, req *sapb.GetAuthorizationsRequest) (*sapb.Authorizations, error) { - if len(req.Domains) == 0 || req.RegistrationID == 0 || req.Now == 0 { - return nil, errIncompleteRequest - } - var authzModels []authzModel - params := []interface{}{ - req.RegistrationID, - statusUint(core.StatusValid), - statusUint(core.StatusPending), - time.Unix(0, req.Now), - identifierTypeToUint[string(identifier.DNS)], - } - - useIndex := "" - if features.Enabled(features.GetAuthzUseIndex) { - useIndex = "USE INDEX (regID_identifier_status_expires_idx)" - } - - qmarks := make([]string, len(req.Domains)) - for i, n := range req.Domains { - qmarks[i] = "?" - params = append(params, n) - } - - query := fmt.Sprintf( - `SELECT %s FROM authz2 - %s - WHERE registrationID = ? AND - status IN (?,?) AND - expires > ? AND - identifierType = ? AND - identifierValue IN (%s)`, - authzFields, - useIndex, - strings.Join(qmarks, ","), - ) + // Determine which shard index we want to lease. + var shardIdx int + if len(shards) < (int(req.MaxShardIdx + 1 - req.MinShardIdx)) { + // Some expected shards are missing (i.e. never-before-produced), so we + // pick one at random. + missing := make(map[int]struct{}, req.MaxShardIdx+1-req.MinShardIdx) + for i := req.MinShardIdx; i <= req.MaxShardIdx; i++ { + missing[int(i)] = struct{}{} + } + for _, shard := range shards { + delete(missing, shard.Idx) + } + for idx := range missing { + // Go map iteration is guaranteed to be in randomized key order. + shardIdx = idx + break + } - dbMap := ssa.dbMap - if features.Enabled(features.GetAuthzReadOnly) { - dbMap = ssa.dbReadOnlyMap - } - _, err := dbMap.Select( - &authzModels, - query, - params..., - ) + _, err = tx.ExecContext(ctx, + `INSERT INTO crlShards (issuerID, idx, leasedUntil) + VALUES (?, ?, ?)`, + req.IssuerNameID, + shardIdx, + req.Until.AsTime(), + ) + if err != nil { + return -1, fmt.Errorf("inserting selected shard: %w", err) + } + } else { + // We got all the shards we expect, so we pick the oldest unleased shard. + var oldest *crlShardModel + for _, shard := range shards { + if shard.LeasedUntil.After(ssa.clk.Now()) { + continue + } + if oldest == nil || + (oldest.ThisUpdate != nil && shard.ThisUpdate == nil) || + (oldest.ThisUpdate != nil && shard.ThisUpdate.Before(*oldest.ThisUpdate)) { + oldest = shard + } + } + if oldest == nil { + return -1, fmt.Errorf("issuer %d has no unleased shards in range %d-%d", req.IssuerNameID, req.MinShardIdx, req.MaxShardIdx) + } + shardIdx = oldest.Idx + + res, err := tx.ExecContext(ctx, + `UPDATE crlShards + SET leasedUntil = ? + WHERE issuerID = ? + AND idx = ? + AND leasedUntil = ? + LIMIT 1`, + req.Until.AsTime(), + req.IssuerNameID, + shardIdx, + oldest.LeasedUntil, + ) + if err != nil { + return -1, fmt.Errorf("updating selected shard: %w", err) + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return -1, fmt.Errorf("confirming update of selected shard: %w", err) + } + if rowsAffected != 1 { + return -1, errors.New("failed to lease shard") + } + } + + return shardIdx, err + }) if err != nil { - return nil, err + return nil, fmt.Errorf("leasing oldest shard: %w", err) } - if len(authzModels) == 0 { - return &sapb.Authorizations{}, nil - } + return &sapb.LeaseCRLShardResponse{ + IssuerNameID: req.IssuerNameID, + ShardIdx: int64(shardIdx.(int)), + }, nil +} - authzModelMap := make(map[string]authzModel) - for _, am := range authzModels { - existing, present := authzModelMap[am.IdentifierValue] - if !present || uintToStatus[existing.Status] == core.StatusPending && uintToStatus[am.Status] == core.StatusValid { - authzModelMap[am.IdentifierValue] = am +// leaseSpecificCRLShard attempts to lease the crl shard for the given issuer +// and shard index. It returns an error if the specified shard is already +// leased. +func (ssa *SQLStorageAuthority) leaseSpecificCRLShard(ctx context.Context, req *sapb.LeaseCRLShardRequest) (*sapb.LeaseCRLShardResponse, error) { + if req.MinShardIdx != req.MaxShardIdx { + return nil, fmt.Errorf("request must identify a single shard index: %d != %d", req.MinShardIdx, req.MaxShardIdx) + } + + _, err := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + needToInsert := false + var shardModel crlShardModel + err := tx.SelectOne(ctx, + &shardModel, + `SELECT leasedUntil + FROM crlShards + WHERE issuerID = ? + AND idx = ? + LIMIT 1`, + req.IssuerNameID, + req.MinShardIdx, + ) + if db.IsNoRows(err) { + needToInsert = true + } else if err != nil { + return nil, fmt.Errorf("selecting requested shard: %w", err) + } else if shardModel.LeasedUntil.After(ssa.clk.Now()) { + return nil, fmt.Errorf("shard %d for issuer %d already leased", req.MinShardIdx, req.IssuerNameID) } + + if needToInsert { + _, err = tx.ExecContext(ctx, + `INSERT INTO crlShards (issuerID, idx, leasedUntil) + VALUES (?, ?, ?)`, + req.IssuerNameID, + req.MinShardIdx, + req.Until.AsTime(), + ) + if err != nil { + return nil, fmt.Errorf("inserting selected shard: %w", err) + } + } else { + res, err := tx.ExecContext(ctx, + `UPDATE crlShards + SET leasedUntil = ? + WHERE issuerID = ? + AND idx = ? + AND leasedUntil = ? + LIMIT 1`, + req.Until.AsTime(), + req.IssuerNameID, + req.MinShardIdx, + shardModel.LeasedUntil, + ) + if err != nil { + return nil, fmt.Errorf("updating selected shard: %w", err) + } + rowsAffected, err := res.RowsAffected() + if err != nil { + return -1, fmt.Errorf("confirming update of selected shard: %w", err) + } + if rowsAffected != 1 { + return -1, errors.New("failed to lease shard") + } + } + + return nil, nil + }) + if err != nil { + return nil, fmt.Errorf("leasing specific shard: %w", err) } - return authzModelMapToPB(authzModelMap) + return &sapb.LeaseCRLShardResponse{ + IssuerNameID: req.IssuerNameID, + ShardIdx: req.MinShardIdx, + }, nil } -// FinalizeAuthorization2 moves a pending authorization to either the valid or invalid status. If -// the authorization is being moved to invalid the validationError field must be set. If the -// authorization is being moved to valid the validationRecord and expires fields must be set. -func (ssa *SQLStorageAuthority) FinalizeAuthorization2(ctx context.Context, req *sapb.FinalizeAuthorizationRequest) (*emptypb.Empty, error) { - if req.Status == "" || req.Attempted == "" || req.Expires == 0 || req.Id == 0 { +// UpdateCRLShard updates the thisUpdate and nextUpdate timestamps of a CRL +// shard. It rejects the update if it would cause the thisUpdate timestamp to +// move backwards, but if thisUpdate would stay the same (for instance, multiple +// CRL generations within a single second), it will succeed. +// +// It does *not* reject the update if the shard is no longer +// leased: although this would be unexpected (because the lease timestamp should +// be the same as the crl-updater's context expiration), it's not inherently a +// sign of an update that should be skipped. It does reject the update if the +// identified CRL shard does not exist in the database (it should exist, as +// rows are created if necessary when leased). It also sets the leasedUntil time +// to be equal to thisUpdate, to indicate that the shard is no longer leased. +func (ssa *SQLStorageAuthority) UpdateCRLShard(ctx context.Context, req *sapb.UpdateCRLShardRequest) (*emptypb.Empty, error) { + if core.IsAnyNilOrZero(req.IssuerNameID, req.ThisUpdate) { return nil, errIncompleteRequest } - if req.Status != string(core.StatusValid) && req.Status != string(core.StatusInvalid) { - return nil, berrors.InternalServerError("authorization must have status valid or invalid") - } - query := `UPDATE authz2 SET - status = :status, - attempted = :attempted, - attemptedAt = :attemptedAt, - validationRecord = :validationRecord, - validationError = :validationError, - expires = :expires - WHERE id = :id AND status = :pending` - var validationRecords []core.ValidationRecord - for _, recordPB := range req.ValidationRecords { - record, err := bgrpc.PBToValidationRecord(recordPB) + // Only set the nextUpdate if it's actually present in the request message. + var nextUpdate *time.Time + if req.NextUpdate != nil { + nut := req.NextUpdate.AsTime() + nextUpdate = &nut + } + + _, err := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + res, err := tx.ExecContext(ctx, + `UPDATE crlShards + SET thisUpdate = ?, nextUpdate = ?, leasedUntil = ? + WHERE issuerID = ? + AND idx = ? + AND (thisUpdate is NULL OR thisUpdate <= ?) + LIMIT 1`, + req.ThisUpdate.AsTime(), + nextUpdate, + req.ThisUpdate.AsTime(), + req.IssuerNameID, + req.ShardIdx, + req.ThisUpdate.AsTime(), + ) if err != nil { return nil, err } - validationRecords = append(validationRecords, record) - } - vrJSON, err := json.Marshal(validationRecords) - if err != nil { - return nil, err - } - var veJSON []byte - if req.ValidationError != nil { - validationError, err := bgrpc.PBToProblemDetails(req.ValidationError) + + rowsAffected, err := res.RowsAffected() if err != nil { return nil, err } - j, err := json.Marshal(validationError) - if err != nil { - return nil, err + if rowsAffected == 0 { + return nil, fmt.Errorf("unable to update shard %d for issuer %d; possibly because shard exists", req.ShardIdx, req.IssuerNameID) } - veJSON = j - } - // Check to see if the AttemptedAt time is non zero and convert to - // *time.Time if so. If it is zero, leave nil and don't convert. Keep - // the the database attemptedAt field Null instead of - // 1970-01-01 00:00:00. - var attemptedTime *time.Time - if req.AttemptedAt != 0 { - val := time.Unix(0, req.AttemptedAt).UTC() - attemptedTime = &val - } - params := map[string]interface{}{ - "status": statusToUint[core.AcmeStatus(req.Status)], - "attempted": challTypeToUint[req.Attempted], - "attemptedAt": attemptedTime, - "validationRecord": vrJSON, - "id": req.Id, - "pending": statusUint(core.StatusPending), - "expires": time.Unix(0, req.Expires).UTC(), - // if req.ValidationError is nil veJSON should also be nil - // which should result in a NULL field - "validationError": veJSON, - } - - res, err := ssa.dbMap.Exec(query, params) - if err != nil { - return nil, err - } - rows, err := res.RowsAffected() - if err != nil { - return nil, err - } - if rows == 0 { - return nil, berrors.NotFoundError("authorization with id %d not found", req.Id) - } else if rows > 1 { - return nil, berrors.InternalServerError("multiple rows updated for authorization id %d", req.Id) - } - return &emptypb.Empty{}, nil -} - -// RevokeCertificate stores revocation information about a certificate. It will only store this -// information if the certificate is not already marked as revoked. -func (ssa *SQLStorageAuthority) RevokeCertificate(ctx context.Context, req *sapb.RevokeCertificateRequest) (*emptypb.Empty, error) { - if req.Serial == "" || req.Date == 0 || req.Response == nil { - return nil, errIncompleteRequest - } - revokedDate := time.Unix(0, req.Date) - res, err := ssa.dbMap.Exec( - `UPDATE certificateStatus SET - status = ?, - revokedReason = ?, - revokedDate = ?, - ocspLastUpdated = ?, - ocspResponse = ? - WHERE serial = ? AND status != ?`, - string(core.OCSPStatusRevoked), - revocation.Reason(req.Reason), - revokedDate, - revokedDate, - req.Response, - req.Serial, - string(core.OCSPStatusRevoked), - ) - if err != nil { - return nil, err - } - rows, err := res.RowsAffected() + if rowsAffected != 1 { + return nil, errors.New("update affected unexpected number of rows") + } + return nil, nil + }) if err != nil { return nil, err } - if rows == 0 { - return nil, berrors.AlreadyRevokedError("no certificate with serial %s and status other than %s", req.Serial, string(core.OCSPStatusRevoked)) - } - - // Store the OCSP response in Redis (if configured) on a best effort - // basis. We don't want to fail on an error here while mysql is the - // source of truth. - if ssa.rocspWriteClient != nil { - // Use a new context for the goroutine. We aren't going to wait on - // the goroutine to complete, so we don't want it to be canceled - // when the parent function ends. The rocsp client has a - // configurable timeout that can be set during creation. - rocspCtx := context.Background() - - // Send the response off to redis in a goroutine. - go func() { - err = ssa.storeOCSPRedis(rocspCtx, req.Response, req.IssuerID) - ssa.log.Debugf("failed to store OCSP response in redis: %v", err) - }() - } return &emptypb.Empty{}, nil } -// UpdateRevokedCertificate stores new revocation information about an -// already-revoked certificate. It will only store this information if the -// cert is already revoked, if the new revocation reason is `KeyCompromise`, -// and if the revokedDate is identical to the current revokedDate. -func (ssa *SQLStorageAuthority) UpdateRevokedCertificate(ctx context.Context, req *sapb.RevokeCertificateRequest) (*emptypb.Empty, error) { - if req.Serial == "" || req.Date == 0 || req.Backdate == 0 || req.Response == nil { +// PauseIdentifiers pauses a set of identifiers for the provided account. If an +// identifier is currently paused, this is a no-op. If an identifier was +// previously paused and unpaused, it will be repaused unless it was unpaused +// less than two weeks ago. The response will indicate how many identifiers were +// paused and how many were repaused. All work is accomplished in a transaction +// to limit possible race conditions. +func (ssa *SQLStorageAuthority) PauseIdentifiers(ctx context.Context, req *sapb.PauseRequest) (*sapb.PauseIdentifiersResponse, error) { + if core.IsAnyNilOrZero(req.RegistrationID, req.Identifiers) { return nil, errIncompleteRequest } - if req.Reason != ocsp.KeyCompromise { - return nil, fmt.Errorf("cannot update revocation for any reason other than keyCompromise (1); got: %d", req.Reason) - } - thisUpdate := time.Unix(0, req.Date) - revokedDate := time.Unix(0, req.Backdate) - res, err := ssa.dbMap.Exec( - `UPDATE certificateStatus SET - revokedReason = ?, - ocspLastUpdated = ?, - ocspResponse = ? - WHERE serial = ? AND status = ? AND revokedReason != ? AND revokedDate = ?`, - revocation.Reason(ocsp.KeyCompromise), - thisUpdate, - req.Response, - req.Serial, - string(core.OCSPStatusRevoked), - revocation.Reason(ocsp.KeyCompromise), - revokedDate, - ) - if err != nil { - return nil, err - } - rows, err := res.RowsAffected() + + // Marshal the identifier now that we've crossed the RPC boundary. + idents, err := newIdentifierModelsFromPB(req.Identifiers) if err != nil { return nil, err } - if rows == 0 { - // InternalServerError because we expected this certificate status to exist, - // to already be revoked for a different reason, and to have a matching date. - return nil, berrors.InternalServerError("no certificate with serial %s and revoked reason other than keyCompromise", req.Serial) - } - - // Store the OCSP response in Redis (if configured) on a best effort - // basis. We don't want to fail on an error here while mysql is the - // source of truth. - if ssa.rocspWriteClient != nil { - // Use a new context for the goroutine. We aren't going to wait on - // the goroutine to complete, so we don't want it to be canceled - // when the parent function ends. The rocsp client has a - // configurable timeout that can be set during creation. - rocspCtx := context.Background() - // Send the response off to redis in a goroutine. - go func() { - err = ssa.storeOCSPRedis(rocspCtx, req.Response, req.IssuerID) - ssa.log.Debugf("failed to store OCSP response in redis: %v", err) - }() - } + response := &sapb.PauseIdentifiersResponse{} + _, err = db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + for _, ident := range idents { + pauseError := func(op string, err error) error { + return fmt.Errorf("while %s identifier %s for registration ID %d: %w", + op, ident.Value, req.RegistrationID, err, + ) + } - return &emptypb.Empty{}, nil -} + var entry pausedModel + err := tx.SelectOne(ctx, &entry, ` + SELECT pausedAt, unpausedAt + FROM paused + WHERE + registrationID = ? AND + identifierType = ? AND + identifierValue = ?`, + req.RegistrationID, + ident.Type, + ident.Value, + ) + + switch { + case err != nil && !errors.Is(err, sql.ErrNoRows): + // Error querying the database. + return nil, pauseError("querying pause status for", err) + + case err != nil && errors.Is(err, sql.ErrNoRows): + // Not currently or previously paused, insert a new pause record. + err = tx.Insert(ctx, &pausedModel{ + RegistrationID: req.RegistrationID, + PausedAt: ssa.clk.Now(), + identifierModel: identifierModel{ + Type: ident.Type, + Value: ident.Value, + }, + }) + if err != nil && !db.IsDuplicate(err) { + return nil, pauseError("pausing", err) + } -// GetPendingAuthorization2 returns the most recent Pending authorization with -// the given identifier, if available. This method only supports DNS identifier types. -// TODO(#5816): Consider removing this method, as it has no callers. -func (ssa *SQLStorageAuthority) GetPendingAuthorization2(ctx context.Context, req *sapb.GetPendingAuthorizationRequest) (*corepb.Authorization, error) { - if req.RegistrationID == 0 || req.IdentifierValue == "" || req.ValidUntil == 0 { - return nil, errIncompleteRequest - } - var am authzModel - err := ssa.dbMap.WithContext(ctx).SelectOne( - &am, - fmt.Sprintf(`SELECT %s FROM authz2 WHERE - registrationID = :regID AND - status = :status AND - expires > :validUntil AND - identifierType = :dnsType AND - identifierValue = :ident - ORDER BY expires ASC - LIMIT 1 `, authzFields), - map[string]interface{}{ - "regID": req.RegistrationID, - "status": statusUint(core.StatusPending), - "validUntil": time.Unix(0, req.ValidUntil), - "dnsType": identifierTypeToUint[string(identifier.DNS)], - "ident": req.IdentifierValue, - }, - ) - if err != nil { - if db.IsNoRows(err) { - return nil, berrors.NotFoundError("pending authz not found") - } - return nil, err - } - return modelToAuthzPB(am) -} + // Identifier successfully paused. + response.Paused++ + continue + + case entry.UnpausedAt == nil || entry.PausedAt.After(*entry.UnpausedAt): + // Identifier is already paused. + continue + + case entry.UnpausedAt.After(ssa.clk.Now().Add(-14 * 24 * time.Hour)): + // Previously unpaused less than two weeks ago, skip this identifier. + continue + + case entry.UnpausedAt.After(entry.PausedAt): + // Previously paused (and unpaused), repause the identifier. + _, err := tx.ExecContext(ctx, ` + UPDATE paused + SET pausedAt = ?, + unpausedAt = NULL + WHERE + registrationID = ? AND + identifierType = ? AND + identifierValue = ? AND + unpausedAt IS NOT NULL`, + ssa.clk.Now(), + req.RegistrationID, + ident.Type, + ident.Value, + ) + if err != nil { + return nil, pauseError("repausing", err) + } -// CountPendingAuthorizations2 returns the number of pending, unexpired authorizations -// for the given registration. This method is intended to deprecate CountPendingAuthorizations. -func (ssa *SQLStorageAuthority) CountPendingAuthorizations2(ctx context.Context, req *sapb.RegistrationID) (*sapb.Count, error) { - if req.Id == 0 { - return nil, errIncompleteRequest - } + // Identifier successfully repaused. + response.Repaused++ + continue - var count int64 - err := ssa.dbReadOnlyMap.WithContext(ctx).SelectOne(&count, - `SELECT COUNT(1) FROM authz2 WHERE - registrationID = :regID AND - expires > :expires AND - status = :status`, - map[string]interface{}{ - "regID": req.Id, - "expires": ssa.clk.Now(), - "status": statusUint(core.StatusPending), - }, - ) + default: + // This indicates a database state which should never occur. + return nil, fmt.Errorf("impossible database state encountered while pausing identifier %s", + ident.Value, + ) + } + } + return nil, nil + }) if err != nil { + // Error occurred during transaction. return nil, err } - return &sapb.Count{Count: count}, nil + return response, nil } -// GetValidOrderAuthorizations2 is used to find the valid, unexpired authorizations -// associated with a specific order and account ID. -func (ssa *SQLStorageAuthority) GetValidOrderAuthorizations2(ctx context.Context, req *sapb.GetValidOrderAuthorizationsRequest) (*sapb.Authorizations, error) { - if req.AcctID == 0 || req.Id == 0 { +// UnpauseAccount uses up to 5 iterations of UPDATE queries each with a LIMIT of +// 10,000 to unpause up to 50,000 identifiers and returns a count of identifiers +// unpaused. If the returned count is 50,000 there may be more paused identifiers. +func (ssa *SQLStorageAuthority) UnpauseAccount(ctx context.Context, req *sapb.RegistrationID) (*sapb.Count, error) { + if core.IsAnyNilOrZero(req.Id) { return nil, errIncompleteRequest } - var ams []authzModel - _, err := ssa.dbMap.WithContext(ctx).Select( - &ams, - fmt.Sprintf(`SELECT %s FROM authz2 - LEFT JOIN orderToAuthz2 ON authz2.ID = orderToAuthz2.authzID - WHERE authz2.registrationID = :regID AND - authz2.expires > :expires AND - authz2.status = :status AND - orderToAuthz2.orderID = :orderID`, - authzFields, - ), - map[string]interface{}{ - "regID": req.AcctID, - "expires": ssa.clk.Now(), - "status": statusUint(core.StatusValid), - "orderID": req.Id, - }, - ) - if err != nil { - return nil, err - } + total := &sapb.Count{} + + for range unpause.MaxBatches { + result, err := ssa.dbMap.ExecContext(ctx, ` + UPDATE paused + SET unpausedAt = ? + WHERE + registrationID = ? AND + unpausedAt IS NULL + LIMIT ?`, + ssa.clk.Now(), + req.Id, + unpause.BatchSize, + ) + if err != nil { + return nil, err + } - byName := make(map[string]authzModel) - for _, am := range ams { - if uintToIdentifierType[am.IdentifierType] != string(identifier.DNS) { - return nil, fmt.Errorf("unknown identifier type: %q on authz id %d", am.IdentifierType, am.ID) + rowsAffected, err := result.RowsAffected() + if err != nil { + return nil, err } - existing, present := byName[am.IdentifierValue] - if !present || am.Expires.After(existing.Expires) { - byName[am.IdentifierValue] = am + + total.Count += rowsAffected + if rowsAffected < unpause.BatchSize { + // Fewer than batchSize rows were updated, so we're done. + break } } - return authzModelMapToPB(byName) + return total, nil } -// CountInvalidAuthorizations2 counts invalid authorizations for a user expiring -// in a given time range. This method only supports DNS identifier types. -func (ssa *SQLStorageAuthority) CountInvalidAuthorizations2(ctx context.Context, req *sapb.CountInvalidAuthorizationsRequest) (*sapb.Count, error) { - if req.RegistrationID == 0 || req.Hostname == "" || req.Range.Earliest == 0 || req.Range.Latest == 0 { +// AddRateLimitOverride adds a rate limit override to the database. If the +// override already exists, it will be updated. If the override does not exist, +// it will be inserted and enabled. If the override exists but has been +// disabled, it will be updated but not be re-enabled. The status of the +// override is returned in Enabled field of the response. To re-enable an +// override, use the EnableRateLimitOverride method. +func (ssa *SQLStorageAuthority) AddRateLimitOverride(ctx context.Context, req *sapb.AddRateLimitOverrideRequest) (*sapb.AddRateLimitOverrideResponse, error) { + if core.IsAnyNilOrZero(req, req.Override, req.Override.LimitEnum, req.Override.BucketKey, req.Override.Count, req.Override.Burst, req.Override.Period, req.Override.Comment) { return nil, errIncompleteRequest } - var count int64 - err := ssa.dbReadOnlyMap.WithContext(ctx).SelectOne( - &count, - `SELECT COUNT(1) FROM authz2 WHERE - registrationID = :regID AND - status = :status AND - expires > :expiresEarliest AND - expires <= :expiresLatest AND - identifierType = :dnsType AND - identifierValue = :ident`, - map[string]interface{}{ - "regID": req.RegistrationID, - "dnsType": identifierTypeToUint[string(identifier.DNS)], - "ident": req.Hostname, - "expiresEarliest": time.Unix(0, req.Range.Earliest), - "expiresLatest": time.Unix(0, req.Range.Latest), - "status": statusUint(core.StatusInvalid), - }, - ) - if err != nil { - return nil, err - } - return &sapb.Count{Count: count}, nil -} + var inserted bool + var enabled bool + now := ssa.clk.Now() -// GetValidAuthorizations2 returns the latest authorization for all -// domain names that the account has authorizations for. This method is -// intended to deprecate GetValidAuthorizations. This method only supports -// DNS identifier types. -func (ssa *SQLStorageAuthority) GetValidAuthorizations2(ctx context.Context, req *sapb.GetValidAuthorizationsRequest) (*sapb.Authorizations, error) { - if len(req.Domains) == 0 || req.RegistrationID == 0 || req.Now == 0 { - return nil, errIncompleteRequest - } + _, err := db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + var alreadyEnabled bool + err := tx.SelectOne(ctx, &alreadyEnabled, ` + SELECT enabled + FROM overrides + WHERE limitEnum = ? AND + bucketKey = ?`, + req.Override.LimitEnum, + req.Override.BucketKey, + ) - var authzModels []authzModel - params := []interface{}{ - req.RegistrationID, - statusUint(core.StatusValid), - time.Unix(0, req.Now), - identifierTypeToUint[string(identifier.DNS)], - } - qmarks := make([]string, len(req.Domains)) - for i, n := range req.Domains { - qmarks[i] = "?" - params = append(params, n) - } - _, err := ssa.dbMap.Select( - &authzModels, - fmt.Sprintf( - `SELECT %s FROM authz2 WHERE - registrationID = ? AND - status = ? AND - expires > ? AND - identifierType = ? AND - identifierValue IN (%s)`, - authzFields, - strings.Join(qmarks, ","), - ), - params..., - ) + switch { + case err != nil && !db.IsNoRows(err): + // Error querying the database. + return nil, fmt.Errorf("querying override for rate limit %d and bucket key %s: %w", + req.Override.LimitEnum, + req.Override.BucketKey, + err, + ) + + case db.IsNoRows(err): + // Insert a new overrides row. + new := overrideModelForPB(req.Override, now, true) + err = tx.Insert(ctx, &new) + if err != nil { + return nil, fmt.Errorf("inserting override for rate limit %d and bucket key %s: %w", + req.Override.LimitEnum, + req.Override.BucketKey, + err, + ) + } + inserted = true + enabled = true + + default: + // Update the existing overrides row. + updated := overrideModelForPB(req.Override, now, alreadyEnabled) + _, err = tx.Update(ctx, &updated) + if err != nil { + return nil, fmt.Errorf("updating override for rate limit %d and bucket key %s override: %w", + req.Override.LimitEnum, + req.Override.BucketKey, + err, + ) + } + inserted = false + enabled = alreadyEnabled + } + return nil, nil + }) if err != nil { + // Error occurred during transaction. return nil, err } + return &sapb.AddRateLimitOverrideResponse{Inserted: inserted, Enabled: enabled}, nil +} - authzMap := make(map[string]authzModel, len(authzModels)) - for _, am := range authzModels { - // Only allow DNS identifiers - if uintToIdentifierType[am.IdentifierType] != string(identifier.DNS) { - continue - } - // If there is an existing authorization in the map only replace it with one - // which has a later expiry. - if existing, present := authzMap[am.IdentifierValue]; present && am.Expires.Before(existing.Expires) { - continue +// setRateLimitOverride sets the enabled field of a rate limit override to the +// provided value and updates the updatedAt column. If the override does not +// exist, a NotFoundError is returned. If the override exists but is already in +// the requested state, this is a no-op. +func (ssa *SQLStorageAuthority) setRateLimitOverride(ctx context.Context, limitEnum int64, bucketKey string, enabled bool) (*emptypb.Empty, error) { + overrideColumnsList, err := ssa.dbMap.ColumnsForModel(overrideModel{}) + if err != nil { + // This should never happen, the model is registered at init time. + return nil, fmt.Errorf("getting columns for override model: %w", err) + } + overrideColumns := strings.Join(overrideColumnsList, ", ") + _, err = db.WithTransaction(ctx, ssa.dbMap, func(tx db.Executor) (any, error) { + var existing overrideModel + err := tx.SelectOne(ctx, &existing, + // Use SELECT FOR UPDATE to both verify the row exists and lock it + // for the duration of the transaction. + `SELECT `+overrideColumns+` FROM overrides + WHERE limitEnum = ? AND + bucketKey = ? + FOR UPDATE`, + limitEnum, + bucketKey, + ) + if err != nil { + if db.IsNoRows(err) { + return nil, berrors.NotFoundError( + "no rate limit override found for limit %d and bucket key %s", + limitEnum, + bucketKey, + ) + } + return nil, fmt.Errorf("querying status of override for rate limit %d and bucket key %s: %w", + limitEnum, + bucketKey, + err, + ) } - authzMap[am.IdentifierValue] = am - } - return authzModelMapToPB(authzMap) -} -func addKeyHash(db db.Inserter, cert *x509.Certificate) error { - if cert.RawSubjectPublicKeyInfo == nil { - return errors.New("certificate has a nil RawSubjectPublicKeyInfo") - } - h := sha256.Sum256(cert.RawSubjectPublicKeyInfo) - khm := &keyHashModel{ - KeyHash: h[:], - CertNotAfter: cert.NotAfter, - CertSerial: core.SerialToString(cert.SerialNumber), - } - return db.Insert(khm) -} + if existing.Enabled == enabled { + // No-op + return nil, nil + } -var blockedKeysColumns = "keyHash, added, source, comment" + // Update the existing overrides row. + updated := existing + updated.Enabled = enabled + updated.UpdatedAt = ssa.clk.Now() -// AddBlockedKey adds a key hash to the blockedKeys table -func (ssa *SQLStorageAuthority) AddBlockedKey(ctx context.Context, req *sapb.AddBlockedKeyRequest) (*emptypb.Empty, error) { - if core.IsAnyNilOrZero(req.KeyHash, req.Added, req.Source) { - return nil, errIncompleteRequest - } - sourceInt, ok := stringToSourceInt[req.Source] - if !ok { - return nil, errors.New("unknown source") - } - cols, qs := blockedKeysColumns, "?, ?, ?, ?" - vals := []interface{}{ - req.KeyHash, - time.Unix(0, req.Added), - sourceInt, - req.Comment, - } - if features.Enabled(features.StoreRevokerInfo) && req.RevokedBy != 0 { - cols += ", revokedBy" - qs += ", ?" - vals = append(vals, req.RevokedBy) - } - _, err := ssa.dbMap.Exec( - fmt.Sprintf("INSERT INTO blockedKeys (%s) VALUES (%s)", cols, qs), - vals..., - ) - if err != nil { - if db.IsDuplicate(err) { - // Ignore duplicate inserts so multiple certs with the same key can - // be revoked. - return &emptypb.Empty{}, nil + _, err = tx.Update(ctx, &updated) + if err != nil { + return nil, fmt.Errorf("updating status of override for rate limit %d and bucket key %s to %t: %w", + limitEnum, + bucketKey, + enabled, + err, + ) } + return nil, nil + }) + if err != nil { return nil, err } return &emptypb.Empty{}, nil } -// KeyBlocked checks if a key, indicated by a hash, is present in the blockedKeys table -func (ssa *SQLStorageAuthority) KeyBlocked(ctx context.Context, req *sapb.KeyBlockedRequest) (*sapb.Exists, error) { - if req == nil || req.KeyHash == nil { +// DisableRateLimitOverride disables a rate limit override. If the override does +// not exist, a NotFoundError is returned. If the override exists but is already +// disabled, this is a no-op. +func (ssa *SQLStorageAuthority) DisableRateLimitOverride(ctx context.Context, req *sapb.DisableRateLimitOverrideRequest) (*emptypb.Empty, error) { + if core.IsAnyNilOrZero(req, req.LimitEnum, req.BucketKey) { return nil, errIncompleteRequest } + return ssa.setRateLimitOverride(ctx, req.LimitEnum, req.BucketKey, false) +} - var id int64 - err := ssa.dbMap.SelectOne(&id, `SELECT ID FROM blockedKeys WHERE keyHash = ?`, req.KeyHash) - if err != nil { - if db.IsNoRows(err) { - return &sapb.Exists{Exists: false}, nil - } - return nil, err +// EnableRateLimitOverride enables a rate limit override. If the override does +// not exist, a NotFoundError is returned. If the override exists but is already +// enabled, this is a no-op. +func (ssa *SQLStorageAuthority) EnableRateLimitOverride(ctx context.Context, req *sapb.EnableRateLimitOverrideRequest) (*emptypb.Empty, error) { + if core.IsAnyNilOrZero(req, req.LimitEnum, req.BucketKey) { + return nil, errIncompleteRequest } - - return &sapb.Exists{Exists: true}, nil + return ssa.setRateLimitOverride(ctx, req.LimitEnum, req.BucketKey, true) } diff --git a/sa/sa_test.go b/sa/sa_test.go index 4526f8f310e..8c92fad7fcb 100644 --- a/sa/sa_test.go +++ b/sa/sa_test.go @@ -3,22 +3,38 @@ package sa import ( "bytes" "context" + "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" - "crypto/rsa" + "crypto/sha256" "crypto/x509" "database/sql" + "encoding/base64" "encoding/json" + "errors" "fmt" - "io/ioutil" + "io" "math/big" "math/bits" - "net" + mrand "math/rand/v2" + "net/netip" + "os" "reflect" - "sync" + "slices" + "strconv" + "strings" "testing" "time" + "github.com/go-jose/go-jose/v4" + "github.com/go-sql-driver/mysql" "github.com/jmhodges/clock" + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/letsencrypt/boulder/core" corepb "github.com/letsencrypt/boulder/core/proto" "github.com/letsencrypt/boulder/db" @@ -29,13 +45,10 @@ import ( blog "github.com/letsencrypt/boulder/log" "github.com/letsencrypt/boulder/metrics" "github.com/letsencrypt/boulder/probs" - "github.com/letsencrypt/boulder/rocsp" - rocsp_config "github.com/letsencrypt/boulder/rocsp/config" + "github.com/letsencrypt/boulder/revocation" sapb "github.com/letsencrypt/boulder/sa/proto" "github.com/letsencrypt/boulder/test" "github.com/letsencrypt/boulder/test/vars" - "golang.org/x/crypto/ocsp" - jose "gopkg.in/square/go-jose.v2" ) var log = blog.UseMock() @@ -46,55 +59,75 @@ var ( "kty": "RSA", "n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw", "e": "AQAB" -}` - anotherKey = `{ - "kty":"RSA", - "n": "vd7rZIoTLEe-z1_8G1FcXSw9CQFEJgV4g9V277sER7yx5Qjz_Pkf2YVth6wwwFJEmzc0hoKY-MMYFNwBE4hQHw", - "e":"AQAB" }` ) -// initSA constructs a SQLStorageAuthority and a clean up function -// that should be defer'ed to the end of the test. -func initSA(t *testing.T) (*SQLStorageAuthority, clock.FakeClock, func()) { +func mustTime(s string) time.Time { + t, err := time.Parse("2006-01-02 15:04", s) + if err != nil { + panic(fmt.Sprintf("parsing %q: %s", s, err)) + } + return t.UTC() +} + +func mustTimestamp(s string) *timestamppb.Timestamp { + return timestamppb.New(mustTime(s)) +} + +type fakeServerStream[T any] struct { + grpc.ServerStream + output chan<- *T +} + +func (s *fakeServerStream[T]) Send(msg *T) error { + s.output <- msg + return nil +} + +func (s *fakeServerStream[T]) Context() context.Context { + return context.Background() +} + +// initSA constructs a SQLStorageAuthority and FakeClock for use in tests. +// Database clean ups automatically at the end of the test. +func initSA(t testing.TB) (*SQLStorageAuthority, clock.FakeClock) { + t.Helper() features.Reset() - dbMap, err := NewDbMap(vars.DBConnSA, DbSettings{}) + dbMap, err := DBMapForTest(vars.DBConnSA) + if err != nil { + t.Fatalf("Failed to create dbMap: %s", err) + } + + dbIncidentsMap, err := DBMapForTest(vars.DBConnIncidents) if err != nil { t.Fatalf("Failed to create dbMap: %s", err) } fc := clock.NewFake() - fc.Set(time.Date(2015, 3, 4, 5, 0, 0, 0, time.UTC)) + fc.Set(mustTime("2015-03-04 05:00")) - // Load the standard list of signing certificates from the hierarchy. - rocspIssuers, err := rocsp_config.LoadIssuers(map[string]int{ - "../test/hierarchy/int-e1.cert.pem": 100, - "../test/hierarchy/int-e2.cert.pem": 101, - "../test/hierarchy/int-r3.cert.pem": 102, - "../test/hierarchy/int-r4.cert.pem": 103, - }) + saro, err := NewSQLStorageAuthorityRO(dbMap, dbIncidentsMap, metrics.NoopRegisterer, 1, 0, fc, log) if err != nil { - t.Fatalf("failed to load issuers: %s", err) + t.Fatalf("Failed to create SA: %s", err) } - sa, err := NewSQLStorageAuthority(dbMap, dbMap, rocsp.NewMockWriteSucceedClient(), rocspIssuers, fc, log, metrics.NoopRegisterer, 1) + + sa, err := NewSQLStorageAuthorityWrapping(saro, dbMap, metrics.NoopRegisterer) if err != nil { t.Fatalf("Failed to create SA: %s", err) } - cleanUp := test.ResetSATestDatabase(t) - return sa, fc, cleanUp + t.Cleanup(test.ResetBoulderTestDatabase(t)) + + return sa, fc } // CreateWorkingTestRegistration inserts a new, correct Registration into the // given SA. -func createWorkingRegistration(t *testing.T, sa *SQLStorageAuthority) *corepb.Registration { - initialIP, _ := net.ParseIP("88.77.66.11").MarshalText() +func createWorkingRegistration(t testing.TB, sa *SQLStorageAuthority) *corepb.Registration { reg, err := sa.NewRegistration(context.Background(), &corepb.Registration{ Key: []byte(theKey), - Contact: []string{"mailto:foo@example.com"}, - InitialIP: initialIP, - CreatedAt: time.Date(2003, 5, 10, 0, 0, 0, 0, time.UTC).UnixNano(), + CreatedAt: mustTimestamp("2003-05-10 00:00"), Status: string(core.StatusValid), }) if err != nil { @@ -103,44 +136,40 @@ func createWorkingRegistration(t *testing.T, sa *SQLStorageAuthority) *corepb.Re return reg } -func createPendingAuthorization(t *testing.T, sa *SQLStorageAuthority, domain string, exp time.Time) int64 { +func createPendingAuthorization(t *testing.T, sa *SQLStorageAuthority, regID int64, ident identifier.ACMEIdentifier, exp time.Time) int64 { t.Helper() - authz := core.Authorization{ - Identifier: identifier.DNSIdentifier(domain), - RegistrationID: 1, - Status: "pending", - Expires: &exp, - Challenges: []core.Challenge{ - { - Token: core.NewToken(), - Type: core.ChallengeTypeHTTP01, - Status: core.StatusPending, - }, - }, + tokenStr := core.NewToken() + token, err := base64.RawURLEncoding.DecodeString(tokenStr) + test.AssertNotError(t, err, "computing test authorization challenge token") + + am := authzModel{ + IdentifierType: identifierTypeToUint[string(ident.Type)], + IdentifierValue: ident.Value, + RegistrationID: regID, + Status: statusToUint[core.StatusPending], + Expires: exp, + Challenges: 1 << challTypeToUint[string(core.ChallengeTypeHTTP01)], + Token: token, } - authzPB, err := bgrpc.AuthzToPB(authz) - test.AssertNotError(t, err, "AuthzToPB failed") - ids, err := sa.NewAuthorizations2(context.Background(), &sapb.AddPendingAuthorizationsRequest{ - Authz: []*corepb.Authorization{authzPB}, - }) - test.AssertNotError(t, err, "sa.NewAuthorizations2 failed") - return ids.Ids[0] + + err = sa.dbMap.Insert(context.Background(), &am) + test.AssertNotError(t, err, "creating test authorization") + + return am.ID } -func createFinalizedAuthorization(t *testing.T, sa *SQLStorageAuthority, domain string, exp time.Time, +func createFinalizedAuthorization(t *testing.T, sa *SQLStorageAuthority, regID int64, ident identifier.ACMEIdentifier, exp time.Time, status string, attemptedAt time.Time) int64 { t.Helper() - pendingID := createPendingAuthorization(t, sa, domain, exp) - expInt := exp.UnixNano() + pendingID := createPendingAuthorization(t, sa, regID, ident, exp) attempted := string(core.ChallengeTypeHTTP01) - attemptedAtInt := attemptedAt.UnixNano() _, err := sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ Id: pendingID, Status: status, - Expires: expInt, + Expires: timestamppb.New(exp), Attempted: attempted, - AttemptedAt: attemptedAtInt, + AttemptedAt: timestamppb.New(attemptedAt), }) test.AssertNotError(t, err, "sa.FinalizeAuthorizations2 failed") return pendingID @@ -156,59 +185,46 @@ func goodTestJWK() *jose.JSONWebKey { } func TestAddRegistration(t *testing.T) { - sa, clk, cleanUp := initSA(t) - defer cleanUp() - - jwk := goodTestJWK() - jwkJSON, _ := jwk.MarshalJSON() + sa, clk := initSA(t) - contacts := []string{"mailto:foo@example.com"} - initialIP, _ := net.ParseIP("43.34.43.34").MarshalText() + jwkJSON, _ := goodTestJWK().MarshalJSON() reg, err := sa.NewRegistration(ctx, &corepb.Registration{ - Key: jwkJSON, - Contact: contacts, - InitialIP: initialIP, + Key: jwkJSON, }) if err != nil { t.Fatalf("Couldn't create new registration: %s", err) } test.Assert(t, reg.Id != 0, "ID shouldn't be 0") - test.AssertDeepEquals(t, reg.Contact, contacts) - - _, err = sa.GetRegistration(ctx, &sapb.RegistrationID{Id: 0}) - test.AssertError(t, err, "Registration object for ID 0 was returned") + // Confirm that the registration can be retrieved by ID. dbReg, err := sa.GetRegistration(ctx, &sapb.RegistrationID{Id: reg.Id}) test.AssertNotError(t, err, fmt.Sprintf("Couldn't get registration with ID %v", reg.Id)) createdAt := clk.Now() test.AssertEquals(t, dbReg.Id, reg.Id) test.AssertByteEquals(t, dbReg.Key, jwkJSON) - test.AssertDeepEquals(t, dbReg.CreatedAt, createdAt.UnixNano()) + test.AssertDeepEquals(t, dbReg.CreatedAt.AsTime(), createdAt) - initialIP, _ = net.ParseIP("72.72.72.72").MarshalText() - newReg := &corepb.Registration{ - Id: reg.Id, - Key: jwkJSON, - Contact: []string{"test.com"}, - InitialIP: initialIP, - Agreement: "yes", - } - _, err = sa.UpdateRegistration(ctx, newReg) - test.AssertNotError(t, err, fmt.Sprintf("Couldn't get registration with ID %v", reg.Id)) + _, err = sa.GetRegistration(ctx, &sapb.RegistrationID{Id: 0}) + test.AssertError(t, err, "Registration object for ID 0 was returned") + + // Confirm that the registration can be retrieved by key. dbReg, err = sa.GetRegistrationByKey(ctx, &sapb.JSONWebKey{Jwk: jwkJSON}) test.AssertNotError(t, err, "Couldn't get registration by key") - - test.AssertEquals(t, dbReg.Id, newReg.Id) - test.AssertEquals(t, dbReg.Agreement, newReg.Agreement) - + test.AssertEquals(t, dbReg.Id, dbReg.Id) + test.AssertEquals(t, dbReg.Agreement, dbReg.Agreement) + + anotherKey := `{ + "kty":"RSA", + "n": "vd7rZIoTLEe-z1_8G1FcXSw9CQFEJgV4g9V277sER7yx5Qjz_Pkf2YVth6wwwFJEmzc0hoKY-MMYFNwBE4hQHw", + "e":"AQAB" + }` _, err = sa.GetRegistrationByKey(ctx, &sapb.JSONWebKey{Jwk: []byte(anotherKey)}) test.AssertError(t, err, "Registration object for invalid key was returned") } func TestNoSuchRegistrationErrors(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() + sa, _ := initSA(t) _, err := sa.GetRegistration(ctx, &sapb.RegistrationID{Id: 100}) test.AssertErrorIs(t, err, berrors.NotFound) @@ -219,452 +235,505 @@ func TestNoSuchRegistrationErrors(t *testing.T) { _, err = sa.GetRegistrationByKey(ctx, &sapb.JSONWebKey{Jwk: jwkJSON}) test.AssertErrorIs(t, err, berrors.NotFound) - _, err = sa.UpdateRegistration(ctx, &corepb.Registration{Id: 100, Key: jwkJSON, InitialIP: []byte("foo")}) - test.AssertErrorIs(t, err, berrors.NotFound) + _, err = sa.UpdateRegistrationKey(ctx, &sapb.UpdateRegistrationKeyRequest{RegistrationID: 100, Jwk: jwkJSON}) + test.AssertErrorIs(t, err, berrors.InternalServer) +} + +func TestSelectRegistration(t *testing.T) { + sa, _ := initSA(t) + var ctx = context.Background() + jwk := goodTestJWK() + jwkJSON, _ := jwk.MarshalJSON() + sha, err := core.KeyDigestB64(jwk.Key) + test.AssertNotError(t, err, "couldn't parse jwk.Key") + + reg, err := sa.NewRegistration(ctx, &corepb.Registration{ + Key: jwkJSON, + }) + test.AssertNotError(t, err, fmt.Sprintf("couldn't create new registration: %s", err)) + test.Assert(t, reg.Id != 0, "ID shouldn't be 0") + + _, err = selectRegistration(ctx, sa.dbMap, "id", reg.Id) + test.AssertNotError(t, err, "selecting by id should work") + _, err = selectRegistration(ctx, sa.dbMap, "jwk_sha256", sha) + test.AssertNotError(t, err, "selecting by jwk_sha256 should work") +} + +func TestReplicationLagRetries(t *testing.T) { + sa, clk := initSA(t) + + reg := createWorkingRegistration(t, sa) + + // First, set the lagFactor to 0. Neither selecting a real registration nor + // selecting a nonexistent registration should cause the clock to advance. + sa.lagFactor = 0 + start := clk.Now() + + _, err := sa.GetRegistration(ctx, &sapb.RegistrationID{Id: reg.Id}) + test.AssertNotError(t, err, "selecting extant registration") + test.AssertEquals(t, clk.Now(), start) + test.AssertMetricWithLabelsEquals(t, sa.lagFactorCounter, prometheus.Labels{"method": "GetRegistration", "result": "notfound"}, 0) + + _, err = sa.GetRegistration(ctx, &sapb.RegistrationID{Id: reg.Id + 1}) + test.AssertError(t, err, "selecting nonexistent registration") + test.AssertEquals(t, clk.Now(), start) + // With lagFactor disabled, we should never enter the retry codepath, as a + // result the metric should not increment. + test.AssertMetricWithLabelsEquals(t, sa.lagFactorCounter, prometheus.Labels{"method": "GetRegistration", "result": "notfound"}, 0) + + // Now, set the lagFactor to 1. Trying to select a nonexistent registration + // should cause the clock to advance when GetRegistration sleeps and retries. + sa.lagFactor = 1 + start = clk.Now() + + _, err = sa.GetRegistration(ctx, &sapb.RegistrationID{Id: reg.Id}) + test.AssertNotError(t, err, "selecting extant registration") + test.AssertEquals(t, clk.Now(), start) + // lagFactor is enabled, but the registration exists. + test.AssertMetricWithLabelsEquals(t, sa.lagFactorCounter, prometheus.Labels{"method": "GetRegistration", "result": "notfound"}, 0) + + _, err = sa.GetRegistration(ctx, &sapb.RegistrationID{Id: reg.Id + 1}) + test.AssertError(t, err, "selecting nonexistent registration") + test.AssertEquals(t, clk.Now(), start.Add(1)) + // With lagFactor enabled, we should enter the retry codepath and as a result + // the metric should increment. + test.AssertMetricWithLabelsEquals(t, sa.lagFactorCounter, prometheus.Labels{"method": "GetRegistration", "result": "notfound"}, 1) +} + +// findIssuedName is a small helper test function to directly query the +// issuedNames table for a given name to find a serial (or return an err). +func findIssuedName(ctx context.Context, dbMap db.OneSelector, issuedName string) (string, error) { + var issuedNamesSerial string + err := dbMap.SelectOne( + ctx, + &issuedNamesSerial, + `SELECT serial FROM issuedNames + WHERE reversedName = ? + ORDER BY notBefore DESC + LIMIT 1`, + issuedName) + return issuedNamesSerial, err +} + +func TestAddSerial(t *testing.T) { + sa, clk := initSA(t) + + reg := createWorkingRegistration(t, sa) + serial, testCert := test.ThrowAwayCert(t, clk) + + _, err := sa.AddSerial(context.Background(), &sapb.AddSerialRequest{ + RegID: reg.Id, + Created: timestamppb.New(testCert.NotBefore), + Expires: timestamppb.New(testCert.NotAfter), + }) + test.AssertError(t, err, "adding without serial should fail") + + _, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{ + Serial: serial, + Created: timestamppb.New(testCert.NotBefore), + Expires: timestamppb.New(testCert.NotAfter), + }) + test.AssertError(t, err, "adding without regid should fail") + + _, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{ + Serial: serial, + RegID: reg.Id, + Expires: timestamppb.New(testCert.NotAfter), + }) + test.AssertError(t, err, "adding without created should fail") + + _, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{ + Serial: serial, + RegID: reg.Id, + Created: timestamppb.New(testCert.NotBefore), + }) + test.AssertError(t, err, "adding without expires should fail") + + _, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{ + Serial: serial, + RegID: reg.Id, + Created: timestamppb.New(testCert.NotBefore), + Expires: timestamppb.New(testCert.NotAfter), + }) + test.AssertNotError(t, err, "adding serial should have succeeded") +} + +func TestGetSerialMetadata(t *testing.T) { + sa, clk := initSA(t) + + reg := createWorkingRegistration(t, sa) + serial, _ := test.ThrowAwayCert(t, clk) + + _, err := sa.GetSerialMetadata(context.Background(), &sapb.Serial{Serial: serial}) + test.AssertError(t, err, "getting nonexistent serial should have failed") + + now := clk.Now() + hourLater := now.Add(time.Hour) + _, err = sa.AddSerial(context.Background(), &sapb.AddSerialRequest{ + Serial: serial, + RegID: reg.Id, + Created: timestamppb.New(now), + Expires: timestamppb.New(hourLater), + }) + test.AssertNotError(t, err, "failed to add test serial") + + m, err := sa.GetSerialMetadata(context.Background(), &sapb.Serial{Serial: serial}) + + test.AssertNotError(t, err, "getting serial should have succeeded") + test.AssertEquals(t, m.Serial, serial) + test.AssertEquals(t, m.RegistrationID, reg.Id) + test.AssertEquals(t, now, timestamppb.New(now).AsTime()) + test.AssertEquals(t, m.Expires.AsTime(), timestamppb.New(hourLater).AsTime()) +} + +func TestAddPrecertificate(t *testing.T) { + ctx := context.Background() + sa, clk := initSA(t) + + reg := createWorkingRegistration(t, sa) + + // Create a throw-away self signed certificate with a random name and + // serial number + serial, testCert := test.ThrowAwayCert(t, clk) + + // Add the cert as a precertificate + regID := reg.Id + issuedTime := mustTimestamp("2018-04-01 07:00") + _, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, + RegID: regID, + Issued: issuedTime, + IssuerNameID: 1, + }) + test.AssertNotError(t, err, "Couldn't add test cert") + + // It should have the expected certificate status + certStatus, err := sa.GetCertificateStatus(ctx, &sapb.Serial{Serial: serial}) + test.AssertNotError(t, err, "Couldn't get status for test cert") + test.AssertEquals(t, certStatus.Status, string(core.OCSPStatusGood)) + now := clk.Now() + test.AssertEquals(t, now, certStatus.OcspLastUpdated.AsTime()) + + // It should show up in the issued names table + issuedNamesSerial, err := findIssuedName(ctx, sa.dbMap, reverseFQDN(testCert.DNSNames[0])) + test.AssertNotError(t, err, "expected no err querying issuedNames for precert") + test.AssertEquals(t, issuedNamesSerial, serial) + + // We should also be able to call AddCertificate with the same cert + // without it being an error. The duplicate err on inserting to + // issuedNames should be ignored. + _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, + RegID: regID, + Issued: issuedTime, + }) + test.AssertNotError(t, err, "unexpected err adding final cert after precert") +} + +func TestAddPrecertificateNoOCSP(t *testing.T) { + sa, clk := initSA(t) + + reg := createWorkingRegistration(t, sa) + _, testCert := test.ThrowAwayCert(t, clk) + + regID := reg.Id + issuedTime := mustTimestamp("2018-04-01 07:00") + _, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, + RegID: regID, + Issued: issuedTime, + IssuerNameID: 1, + }) + test.AssertNotError(t, err, "Couldn't add test cert") +} + +func TestAddPreCertificateDuplicate(t *testing.T) { + sa, clk := initSA(t) + + reg := createWorkingRegistration(t, sa) + + _, testCert := test.ThrowAwayCert(t, clk) + issuedTime := clk.Now() + + _, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, + Issued: timestamppb.New(issuedTime), + RegID: reg.Id, + IssuerNameID: 1, + }) + test.AssertNotError(t, err, "Couldn't add test certificate") + + _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, + Issued: timestamppb.New(issuedTime), + RegID: reg.Id, + IssuerNameID: 1, + }) + test.AssertDeepEquals(t, err, berrors.DuplicateError("cannot add a duplicate cert")) +} + +func TestAddPrecertificateIncomplete(t *testing.T) { + sa, clk := initSA(t) + + reg := createWorkingRegistration(t, sa) + + // Create a throw-away self signed certificate with a random name and + // serial number + _, testCert := test.ThrowAwayCert(t, clk) + + // Add the cert as a precertificate + regID := reg.Id + _, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, + RegID: regID, + Issued: mustTimestamp("2018-04-01 07:00"), + // Leaving out IssuerNameID + }) + + test.AssertError(t, err, "Adding precert with no issuer did not fail") +} + +func TestAddPrecertificateKeyHash(t *testing.T) { + sa, clk := initSA(t) + reg := createWorkingRegistration(t, sa) + + serial, testCert := test.ThrowAwayCert(t, clk) + _, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, + RegID: reg.Id, + Issued: timestamppb.New(testCert.NotBefore), + IssuerNameID: 1, + }) + test.AssertNotError(t, err, "failed to add precert") + + var keyHashes []keyHashModel + _, err = sa.dbMap.Select(context.Background(), &keyHashes, "SELECT * FROM keyHashToSerial") + test.AssertNotError(t, err, "failed to retrieve rows from keyHashToSerial") + test.AssertEquals(t, len(keyHashes), 1) + test.AssertEquals(t, keyHashes[0].CertSerial, serial) + test.AssertEquals(t, keyHashes[0].CertNotAfter, testCert.NotAfter) + test.AssertEquals(t, keyHashes[0].CertNotAfter, timestamppb.New(testCert.NotAfter).AsTime()) + spkiHash := sha256.Sum256(testCert.RawSubjectPublicKeyInfo) + test.Assert(t, bytes.Equal(keyHashes[0].KeyHash, spkiHash[:]), "spki hash mismatch") } func TestAddCertificate(t *testing.T) { - sa, clk, cleanUp := initSA(t) - defer cleanUp() + sa, clk := initSA(t) reg := createWorkingRegistration(t, sa) - // An example cert taken from EFF's website - certDER, err := ioutil.ReadFile("www.eff.org.der") - test.AssertNotError(t, err, "Couldn't read example cert DER") + serial, testCert := test.ThrowAwayCert(t, clk) + + issuedTime := sa.clk.Now() + _, err := sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, + RegID: reg.Id, + Issued: timestamppb.New(issuedTime), + }) + test.AssertNotError(t, err, "Couldn't add test cert") + + retrievedCert, err := sa.GetCertificate(ctx, &sapb.Serial{Serial: serial}) + test.AssertNotError(t, err, "Couldn't get test cert by full serial") + test.AssertByteEquals(t, testCert.Raw, retrievedCert.Der) + test.AssertEquals(t, retrievedCert.Issued.AsTime(), issuedTime) - // Calling AddCertificate with a non-nil issued should succeed - digest, err := sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER, + // Calling AddCertificate with empty args should fail. + _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ + Der: nil, RegID: reg.Id, - Issued: sa.clk.Now().UnixNano(), - }) - test.AssertNotError(t, err, "Couldn't add www.eff.org.der") - test.AssertEquals(t, digest.Digest, "qWoItDZmR4P9eFbeYgXXP3SR4ApnkQj8x4LsB_ORKBo") - - retrievedCert, err := sa.GetCertificate(ctx, &sapb.Serial{Serial: "000000000000000000000000000000021bd4"}) - test.AssertNotError(t, err, "Couldn't get www.eff.org.der by full serial") - test.AssertByteEquals(t, certDER, retrievedCert.Der) - // Because nil was provided as the Issued time we expect the cert was stored - // with an issued time equal to now - test.AssertEquals(t, retrievedCert.Issued, clk.Now().UnixNano()) - - // Test cert generated locally by Boulder / CFSSL, names [example.com, - // www.example.com, admin.example.com] - certDER2, err := ioutil.ReadFile("test-cert.der") - test.AssertNotError(t, err, "Couldn't read example cert DER") - serial := "ffdd9b8a82126d96f61d378d5ba99a0474f0" - - // Add the certificate with a specific issued time instead of nil - issuedTime := time.Date(2018, 4, 1, 7, 0, 0, 0, time.UTC) - digest2, err := sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER2, + Issued: timestamppb.New(issuedTime), + }) + test.AssertError(t, err, "shouldn't be able to add cert with no DER") + _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, + RegID: 0, + Issued: timestamppb.New(issuedTime), + }) + test.AssertError(t, err, "shouldn't be able to add cert with no regID") + _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, RegID: reg.Id, - Issued: issuedTime.UnixNano(), - }) - test.AssertNotError(t, err, "Couldn't add test-cert.der") - test.AssertEquals(t, digest2.Digest, "vrlPN5wIPME1D2PPsCy-fGnTWh8dMyyYQcXPRkjHAQI") - - retrievedCert2, err := sa.GetCertificate(ctx, &sapb.Serial{Serial: serial}) - test.AssertNotError(t, err, "Couldn't get test-cert.der") - test.AssertByteEquals(t, certDER2, retrievedCert2.Der) - // The cert should have been added with the specific issued time we provided - // as the issued field. - test.AssertEquals(t, retrievedCert2.Issued, issuedTime.UnixNano()) - - // Test adding OCSP response with cert - certDER3, err := ioutil.ReadFile("test-cert2.der") - test.AssertNotError(t, err, "Couldn't read example cert DER") - ocspResp := []byte{0, 0, 1} + Issued: nil, + }) + test.AssertError(t, err, "shouldn't be able to add cert with no issued timestamp") _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER3, + Der: testCert.Raw, RegID: reg.Id, - Ocsp: ocspResp, - Issued: issuedTime.UnixNano(), + Issued: timestamppb.New(time.Time{}), }) - test.AssertNotError(t, err, "Couldn't add test-cert2.der") + test.AssertError(t, err, "shouldn't be able to add cert with zero issued timestamp") } func TestAddCertificateDuplicate(t *testing.T) { - sa, clk, cleanUp := initSA(t) - defer cleanUp() + sa, clk := initSA(t) reg := createWorkingRegistration(t, sa) - _, testCert := test.ThrowAwayCert(t, 1) + _, testCert := test.ThrowAwayCert(t, clk) issuedTime := clk.Now() _, err := sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ Der: testCert.Raw, RegID: reg.Id, - Issued: issuedTime.UnixNano(), + Issued: timestamppb.New(issuedTime), }) test.AssertNotError(t, err, "Couldn't add test certificate") _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ Der: testCert.Raw, RegID: reg.Id, - Issued: issuedTime.UnixNano(), + Issued: timestamppb.New(issuedTime), }) test.AssertDeepEquals(t, err, berrors.DuplicateError("cannot add a duplicate cert")) } -func TestCountCertificatesByNames(t *testing.T) { - sa, clk, cleanUp := initSA(t) - defer cleanUp() - - // Test cert generated locally by Boulder / CFSSL, names [example.com, - // www.example.com, admin.example.com] - certDER, err := ioutil.ReadFile("test-cert.der") - test.AssertNotError(t, err, "Couldn't read example cert DER") +func TestFQDNSetTimestampsForWindow(t *testing.T) { + sa, fc := initSA(t) - cert, err := x509.ParseCertificate(certDER) - test.AssertNotError(t, err, "Couldn't parse example cert DER") - - // Set the test clock's time to the time from the test certificate, plus an - // hour to account for rounding. - clk.Add(time.Hour - clk.Now().Sub(cert.NotBefore)) - now := clk.Now() - yesterday := clk.Now().Add(-24 * time.Hour).UnixNano() - twoDaysAgo := clk.Now().Add(-48 * time.Hour).UnixNano() - tomorrow := clk.Now().Add(24 * time.Hour).UnixNano() + tx, err := sa.dbMap.BeginTx(ctx) + test.AssertNotError(t, err, "Failed to open transaction") - // Count for a name that doesn't have any certs - req := &sapb.CountCertificatesByNamesRequest{ - Names: []string{"example.com"}, - Range: &sapb.Range{ - Earliest: yesterday, - Latest: now.UnixNano(), - }, + idents := identifier.ACMEIdentifiers{ + identifier.NewDNS("a.example.com"), + identifier.NewDNS("B.example.com"), } - counts, err := sa.CountCertificatesByNames(ctx, req) - test.AssertNotError(t, err, "Error counting certs.") - test.AssertEquals(t, len(counts.Counts), 1) - test.AssertEquals(t, counts.Counts["example.com"], int64(0)) - // Add the test cert and query for its names. - reg := createWorkingRegistration(t, sa) - issued := sa.clk.Now() - _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER, - RegID: reg.Id, - Issued: issued.UnixNano(), - }) - test.AssertNotError(t, err, "Couldn't add test-cert.der") - - // Time range including now should find the cert - counts, err = sa.CountCertificatesByNames(ctx, req) - test.AssertNotError(t, err, "sa.CountCertificatesByName failed") - test.AssertEquals(t, len(counts.Counts), 1) - test.AssertEquals(t, counts.Counts["example.com"], int64(1)) - - // Time range between two days ago and yesterday should not. - req.Range.Earliest = twoDaysAgo - req.Range.Latest = yesterday - counts, err = sa.CountCertificatesByNames(ctx, req) - test.AssertNotError(t, err, "Error counting certs.") - test.AssertEquals(t, len(counts.Counts), 1) - test.AssertEquals(t, counts.Counts["example.com"], int64(0)) - - // Time range between now and tomorrow also should not (time ranges are - // inclusive at the tail end, but not the beginning end). - req.Range.Earliest = now.UnixNano() - req.Range.Latest = tomorrow - counts, err = sa.CountCertificatesByNames(ctx, req) - test.AssertNotError(t, err, "Error counting certs.") - test.AssertEquals(t, len(counts.Counts), 1) - test.AssertEquals(t, counts.Counts["example.com"], int64(0)) - - // Add a second test cert (for example.co.bn) and query for multiple names. - names := []string{"example.com", "foo.com", "example.co.bn"} - - // Override countCertificatesByName with an implementation of certCountFunc - // that will block forever if it's called in serial, but will succeed if - // called in parallel. - var interlocker sync.WaitGroup - interlocker.Add(len(names)) - sa.parallelismPerRPC = len(names) - oldCertCountFunc := sa.countCertificatesByName - sa.countCertificatesByName = func(sel db.Selector, domain string, timeRange *sapb.Range) (int64, error) { - interlocker.Done() - interlocker.Wait() - return oldCertCountFunc(sel, domain, timeRange) - } - - certDER2, err := ioutil.ReadFile("test-cert2.der") - test.AssertNotError(t, err, "Couldn't read test-cert2.der") - _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER2, - RegID: reg.Id, - Issued: issued.UnixNano(), - }) - test.AssertNotError(t, err, "Couldn't add test-cert2.der") - req.Names = names - req.Range.Earliest = yesterday - req.Range.Latest = now.Add(10000 * time.Hour).UnixNano() - counts, err = sa.CountCertificatesByNames(ctx, req) - test.AssertNotError(t, err, "Error counting certs.") - test.AssertEquals(t, len(counts.Counts), 3) - - expected := map[string]int64{ - "example.co.bn": 1, - "foo.com": 0, - "example.com": 1, - } - for name, count := range counts.Counts { - domain := name - actualCount := count - expectedCount := expected[domain] - test.AssertEquals(t, actualCount, expectedCount) - } -} - -func TestCountRegistrationsByIP(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() - - contact := []string{"mailto:foo@example.com"} - - // Create one IPv4 registration - key, _ := jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(1), E: 1}}.MarshalJSON() - initialIP, _ := net.ParseIP("43.34.43.34").MarshalText() - _, err := sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, - Contact: contact, - }) - // Create two IPv6 registrations, both within the same /48 - key, _ = jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(2), E: 1}}.MarshalJSON() - initialIP, _ = net.ParseIP("2001:cdba:1234:5678:9101:1121:3257:9652").MarshalText() - test.AssertNotError(t, err, "Couldn't insert registration") - _, err = sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, - Contact: contact, - }) - test.AssertNotError(t, err, "Couldn't insert registration") - key, _ = jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(3), E: 1}}.MarshalJSON() - initialIP, _ = net.ParseIP("2001:cdba:1234:5678:9101:1121:3257:9653").MarshalText() - _, err = sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, - Contact: contact, - }) - test.AssertNotError(t, err, "Couldn't insert registration") - - req := &sapb.CountRegistrationsByIPRequest{ - Ip: net.ParseIP("1.1.1.1"), - Range: &sapb.Range{ - Earliest: fc.Now().Add(-time.Hour * 24).UnixNano(), - Latest: fc.Now().UnixNano(), - }, - } - - // There should be 0 registrations for an IPv4 address we didn't add - // a registration for - count, err := sa.CountRegistrationsByIP(ctx, req) - test.AssertNotError(t, err, "Failed to count registrations") - test.AssertEquals(t, count.Count, int64(0)) - // There should be 1 registration for the IPv4 address we did add - // a registration for. - req.Ip = net.ParseIP("43.34.43.34") - count, err = sa.CountRegistrationsByIP(ctx, req) - test.AssertNotError(t, err, "Failed to count registrations") - test.AssertEquals(t, count.Count, int64(1)) - // There should be 1 registration for the first IPv6 address we added - // a registration for - req.Ip = net.ParseIP("2001:cdba:1234:5678:9101:1121:3257:9652") - count, err = sa.CountRegistrationsByIP(ctx, req) - test.AssertNotError(t, err, "Failed to count registrations") - test.AssertEquals(t, count.Count, int64(1)) - // There should be 1 registration for the second IPv6 address we added - // a registration for as well - req.Ip = net.ParseIP("2001:cdba:1234:5678:9101:1121:3257:9653") - count, err = sa.CountRegistrationsByIP(ctx, req) - test.AssertNotError(t, err, "Failed to count registrations") - test.AssertEquals(t, count.Count, int64(1)) - // There should be 0 registrations for an IPv6 address in the same /48 as the - // two IPv6 addresses with registrations - req.Ip = net.ParseIP("2001:cdba:1234:0000:0000:0000:0000:0000") - count, err = sa.CountRegistrationsByIP(ctx, req) - test.AssertNotError(t, err, "Failed to count registrations") - test.AssertEquals(t, count.Count, int64(0)) -} + // Invalid Window + req := &sapb.CountFQDNSetsRequest{ + Identifiers: idents.ToProtoSlice(), + Window: nil, + } + _, err = sa.FQDNSetTimestampsForWindow(ctx, req) + test.AssertErrorIs(t, err, errIncompleteRequest) -func TestCountRegistrationsByIPRange(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() - - contact := []string{"mailto:foo@example.com"} - - // Create one IPv4 registration - key, _ := jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(1), E: 1}}.MarshalJSON() - initialIP, _ := net.ParseIP("43.34.43.34").MarshalText() - _, err := sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, - Contact: contact, - }) - // Create two IPv6 registrations, both within the same /48 - key, _ = jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(2), E: 1}}.MarshalJSON() - initialIP, _ = net.ParseIP("2001:cdba:1234:5678:9101:1121:3257:9652").MarshalText() - test.AssertNotError(t, err, "Couldn't insert registration") - _, err = sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, - Contact: contact, - }) - test.AssertNotError(t, err, "Couldn't insert registration") - key, _ = jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(3), E: 1}}.MarshalJSON() - initialIP, _ = net.ParseIP("2001:cdba:1234:5678:9101:1121:3257:9653").MarshalText() - _, err = sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, - Contact: contact, - }) - test.AssertNotError(t, err, "Couldn't insert registration") - - req := &sapb.CountRegistrationsByIPRequest{ - Ip: net.ParseIP("1.1.1.1"), - Range: &sapb.Range{ - Earliest: fc.Now().Add(-time.Hour * 24).UnixNano(), - Latest: fc.Now().UnixNano(), - }, - } - - // There should be 0 registrations in the range for an IPv4 address we didn't - // add a registration for - req.Ip = net.ParseIP("1.1.1.1") - count, err := sa.CountRegistrationsByIPRange(ctx, req) - test.AssertNotError(t, err, "Failed to count registrations") - test.AssertEquals(t, count.Count, int64(0)) - // There should be 1 registration in the range for the IPv4 address we did - // add a registration for - req.Ip = net.ParseIP("43.34.43.34") - count, err = sa.CountRegistrationsByIPRange(ctx, req) - test.AssertNotError(t, err, "Failed to count registrations") - test.AssertEquals(t, count.Count, int64(1)) - // There should be 2 registrations in the range for the first IPv6 address we added - // a registration for because it's in the same /48 - req.Ip = net.ParseIP("2001:cdba:1234:5678:9101:1121:3257:9652") - count, err = sa.CountRegistrationsByIPRange(ctx, req) - test.AssertNotError(t, err, "Failed to count registrations") - test.AssertEquals(t, count.Count, int64(2)) - // There should be 2 registrations in the range for the second IPv6 address - // we added a registration for as well, because it too is in the same /48 - req.Ip = net.ParseIP("2001:cdba:1234:5678:9101:1121:3257:9653") - count, err = sa.CountRegistrationsByIPRange(ctx, req) - test.AssertNotError(t, err, "Failed to count registrations") - test.AssertEquals(t, count.Count, int64(2)) - // There should also be 2 registrations in the range for an arbitrary IPv6 address in - // the same /48 as the registrations we added - req.Ip = net.ParseIP("2001:cdba:1234:0000:0000:0000:0000:0000") - count, err = sa.CountRegistrationsByIPRange(ctx, req) - test.AssertNotError(t, err, "Failed to count registrations") - test.AssertEquals(t, count.Count, int64(2)) -} + window := time.Hour * 3 + req = &sapb.CountFQDNSetsRequest{ + Identifiers: idents.ToProtoSlice(), + Window: durationpb.New(window), + } -func TestFQDNSets(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + // Ensure zero issuance has occurred for names. + resp, err := sa.FQDNSetTimestampsForWindow(ctx, req) + test.AssertNotError(t, err, "Failed to count name sets") + test.AssertEquals(t, len(resp.Timestamps), 0) - tx, err := sa.dbMap.Begin() - test.AssertNotError(t, err, "Failed to open transaction") - names := []string{"a.example.com", "B.example.com"} + // Add an issuance for names inside the window. expires := fc.Now().Add(time.Hour * 2).UTC() - issued := fc.Now() - err = addFQDNSet(tx, names, "serial", issued, expires) + firstIssued := fc.Now() + err = addFQDNSet(ctx, tx, idents, "serial", firstIssued, expires) test.AssertNotError(t, err, "Failed to add name set") test.AssertNotError(t, tx.Commit(), "Failed to commit transaction") - threeHours := time.Hour * 3 - req := &sapb.CountFQDNSetsRequest{ - Domains: names, - Window: threeHours.Nanoseconds(), - } - // only one valid - count, err := sa.CountFQDNSets(ctx, req) + // Ensure there's 1 issuance timestamp for names inside the window. + resp, err = sa.FQDNSetTimestampsForWindow(ctx, req) test.AssertNotError(t, err, "Failed to count name sets") - test.AssertEquals(t, count.Count, int64(1)) + test.AssertEquals(t, len(resp.Timestamps), 1) + test.AssertEquals(t, firstIssued, resp.Timestamps[len(resp.Timestamps)-1].AsTime()) - // check hash isn't affected by changing name order/casing - req.Domains = []string{"b.example.com", "A.example.COM"} - count, err = sa.CountFQDNSets(ctx, req) + // Ensure that the hash isn't affected by changing name order/casing. + req.Identifiers = []*corepb.Identifier{ + identifier.NewDNS("b.example.com").ToProto(), + identifier.NewDNS("A.example.COM").ToProto(), + } + resp, err = sa.FQDNSetTimestampsForWindow(ctx, req) test.AssertNotError(t, err, "Failed to count name sets") - test.AssertEquals(t, count.Count, int64(1)) + test.AssertEquals(t, len(resp.Timestamps), 1) + test.AssertEquals(t, firstIssued, resp.Timestamps[len(resp.Timestamps)-1].AsTime()) - // add another valid set - tx, err = sa.dbMap.Begin() + // Add another issuance for names inside the window. + tx, err = sa.dbMap.BeginTx(ctx) test.AssertNotError(t, err, "Failed to open transaction") - err = addFQDNSet(tx, names, "anotherSerial", issued, expires) + err = addFQDNSet(ctx, tx, idents, "anotherSerial", firstIssued, expires) test.AssertNotError(t, err, "Failed to add name set") test.AssertNotError(t, tx.Commit(), "Failed to commit transaction") - // only two valid - req.Domains = names - count, err = sa.CountFQDNSets(ctx, req) + // Ensure there are two issuance timestamps for names inside the window. + req.Identifiers = idents.ToProtoSlice() + resp, err = sa.FQDNSetTimestampsForWindow(ctx, req) test.AssertNotError(t, err, "Failed to count name sets") - test.AssertEquals(t, count.Count, int64(2)) + test.AssertEquals(t, len(resp.Timestamps), 2) + test.AssertEquals(t, firstIssued, resp.Timestamps[len(resp.Timestamps)-1].AsTime()) - // add an expired set - tx, err = sa.dbMap.Begin() + // Add another issuance for names but just outside the window. + tx, err = sa.dbMap.BeginTx(ctx) test.AssertNotError(t, err, "Failed to open transaction") - err = addFQDNSet( - tx, - names, - "yetAnotherSerial", - issued.Add(-threeHours), - expires.Add(-threeHours), - ) + err = addFQDNSet(ctx, tx, idents, "yetAnotherSerial", firstIssued.Add(-window), expires) test.AssertNotError(t, err, "Failed to add name set") test.AssertNotError(t, tx.Commit(), "Failed to commit transaction") - // only two valid - count, err = sa.CountFQDNSets(ctx, req) + // Ensure there are still only two issuance timestamps in the window. + resp, err = sa.FQDNSetTimestampsForWindow(ctx, req) test.AssertNotError(t, err, "Failed to count name sets") - test.AssertEquals(t, count.Count, int64(2)) + test.AssertEquals(t, len(resp.Timestamps), 2) + test.AssertEquals(t, firstIssued, resp.Timestamps[len(resp.Timestamps)-1].AsTime()) + + resp, err = sa.FQDNSetTimestampsForWindow(ctx, &sapb.CountFQDNSetsRequest{ + Identifiers: idents.ToProtoSlice(), + Window: durationpb.New(window), + Limit: 1, + }) + test.AssertNotError(t, err, "Failed to count name sets") + test.AssertEquals(t, len(resp.Timestamps), 1) + test.AssertEquals(t, firstIssued, resp.Timestamps[len(resp.Timestamps)-1].AsTime()) } -func TestFQDNSetsExists(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() +func TestFQDNSetExists(t *testing.T) { + sa, fc := initSA(t) + + idents := identifier.ACMEIdentifiers{ + identifier.NewDNS("a.example.com"), + identifier.NewDNS("B.example.com"), + } - names := []string{"a.example.com", "B.example.com"} - exists, err := sa.FQDNSetExists(ctx, &sapb.FQDNSetExistsRequest{Domains: names}) + exists, err := sa.FQDNSetExists(ctx, &sapb.FQDNSetExistsRequest{Identifiers: idents.ToProtoSlice()}) test.AssertNotError(t, err, "Failed to check FQDN set existence") test.Assert(t, !exists.Exists, "FQDN set shouldn't exist") - tx, err := sa.dbMap.Begin() + tx, err := sa.dbMap.BeginTx(ctx) test.AssertNotError(t, err, "Failed to open transaction") expires := fc.Now().Add(time.Hour * 2).UTC() issued := fc.Now() - err = addFQDNSet(tx, names, "serial", issued, expires) + err = addFQDNSet(ctx, tx, idents, "serial", issued, expires) test.AssertNotError(t, err, "Failed to add name set") test.AssertNotError(t, tx.Commit(), "Failed to commit transaction") - exists, err = sa.FQDNSetExists(ctx, &sapb.FQDNSetExistsRequest{Domains: names}) + exists, err = sa.FQDNSetExists(ctx, &sapb.FQDNSetExistsRequest{Identifiers: idents.ToProtoSlice()}) test.AssertNotError(t, err, "Failed to check FQDN set existence") test.Assert(t, exists.Exists, "FQDN set does exist") } type execRecorder struct { - query string - args []interface{} + valuesPerRow int + query string + args []any } -func (e *execRecorder) Exec(query string, args ...interface{}) (sql.Result, error) { +func (e *execRecorder) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) { e.query = query e.args = args - return nil, nil + return rowsResult{int64(len(args) / e.valuesPerRow)}, nil +} + +type rowsResult struct { + rowsAffected int64 +} + +func (r rowsResult) LastInsertId() (int64, error) { + return r.rowsAffected, nil +} + +func (r rowsResult) RowsAffected() (int64, error) { + return r.rowsAffected, nil } func TestAddIssuedNames(t *testing.T) { serial := big.NewInt(1) expectedSerial := "000000000000000000000000000000000001" - notBefore := time.Date(2018, 2, 14, 12, 0, 0, 0, time.UTC) - placeholdersPerName := "(?, ?, ?, ?)" - baseQuery := "INSERT INTO issuedNames (reversedName, serial, notBefore, renewal) VALUES" + notBefore := mustTime("2018-02-14 12:00") + expectedNotBefore := notBefore.Truncate(24 * time.Hour) + placeholdersPerName := "(?,?,?,?)" + baseQuery := "INSERT INTO issuedNames (reversedName,serial,notBefore,renewal) VALUES" testCases := []struct { Name string @@ -672,7 +741,7 @@ func TestAddIssuedNames(t *testing.T) { SerialNumber *big.Int NotBefore time.Time Renewal bool - ExpectedArgs []interface{} + ExpectedArgs []any }{ { Name: "One domain, not a renewal", @@ -680,10 +749,10 @@ func TestAddIssuedNames(t *testing.T) { SerialNumber: serial, NotBefore: notBefore, Renewal: false, - ExpectedArgs: []interface{}{ + ExpectedArgs: []any{ "uk.co.example", expectedSerial, - notBefore, + expectedNotBefore, false, }, }, @@ -693,14 +762,14 @@ func TestAddIssuedNames(t *testing.T) { SerialNumber: serial, NotBefore: notBefore, Renewal: false, - ExpectedArgs: []interface{}{ + ExpectedArgs: []any{ "uk.co.example", expectedSerial, - notBefore, + expectedNotBefore, false, "xyz.example", expectedSerial, - notBefore, + expectedNotBefore, false, }, }, @@ -710,10 +779,10 @@ func TestAddIssuedNames(t *testing.T) { SerialNumber: serial, NotBefore: notBefore, Renewal: true, - ExpectedArgs: []interface{}{ + ExpectedArgs: []any{ "uk.co.example", expectedSerial, - notBefore, + expectedNotBefore, true, }, }, @@ -723,14 +792,14 @@ func TestAddIssuedNames(t *testing.T) { SerialNumber: serial, NotBefore: notBefore, Renewal: true, - ExpectedArgs: []interface{}{ + ExpectedArgs: []any{ "uk.co.example", expectedSerial, - notBefore, + expectedNotBefore, true, "xyz.example", expectedSerial, - notBefore, + expectedNotBefore, true, }, }, @@ -738,8 +807,9 @@ func TestAddIssuedNames(t *testing.T) { for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - var e execRecorder + e := execRecorder{valuesPerRow: 4} err := addIssuedNames( + ctx, &e, &x509.Certificate{ DNSNames: tc.IssuedNames, @@ -749,10 +819,10 @@ func TestAddIssuedNames(t *testing.T) { tc.Renewal) test.AssertNotError(t, err, "addIssuedNames failed") expectedPlaceholders := placeholdersPerName - for i := 0; i < len(tc.IssuedNames)-1; i++ { - expectedPlaceholders = fmt.Sprintf("%s, %s", expectedPlaceholders, placeholdersPerName) + for range len(tc.IssuedNames) - 1 { + expectedPlaceholders = fmt.Sprintf("%s,%s", expectedPlaceholders, placeholdersPerName) } - expectedQuery := fmt.Sprintf("%s %s;", baseQuery, expectedPlaceholders) + expectedQuery := fmt.Sprintf("%s %s", baseQuery, expectedPlaceholders) test.AssertEquals(t, e.query, expectedQuery) if !reflect.DeepEqual(e.args, tc.ExpectedArgs) { t.Errorf("Wrong args: got\n%#v, expected\n%#v", e.args, tc.ExpectedArgs) @@ -761,92 +831,58 @@ func TestAddIssuedNames(t *testing.T) { } } -func TestPreviousCertificateExists(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() +func TestDeactivateAuthorization2(t *testing.T) { + sa, fc := initSA(t) reg := createWorkingRegistration(t, sa) - // An example cert taken from EFF's website - certDER, err := ioutil.ReadFile("www.eff.org.der") - test.AssertNotError(t, err, "reading cert DER") - - issued := sa.clk.Now() - _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER, - Issued: issued.UnixNano(), - RegID: reg.Id, - IssuerID: 1, - }) - test.AssertNotError(t, err, "Failed to add precertificate") - _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER, - RegID: reg.Id, - Issued: issued.UnixNano(), - }) - test.AssertNotError(t, err, "calling AddCertificate") - - cases := []struct { - name string - domain string - regID int64 - expected bool - }{ - {"matches", "www.eff.org", reg.Id, true}, - {"wrongDomain", "wwoof.org", reg.Id, false}, - {"wrongAccount", "www.eff.org", 3333, false}, - } - - for _, testCase := range cases { - t.Run(testCase.name, func(t *testing.T) { - exists, err := sa.PreviousCertificateExists(context.Background(), - &sapb.PreviousCertificateExistsRequest{ - Domain: testCase.domain, - RegID: testCase.regID, - }) - test.AssertNotError(t, err, "calling PreviousCertificateExists") - if exists.Exists != testCase.expected { - t.Errorf("wanted %v got %v", testCase.expected, exists.Exists) - } - }) - } -} - -func TestDeactivateAuthorization2(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() - // deactivate a pending authorization expires := fc.Now().Add(time.Hour).UTC() attemptedAt := fc.Now() - authzID := createPendingAuthorization(t, sa, "example.com", expires) + authzID := createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("example.com"), expires) _, err := sa.DeactivateAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: authzID}) test.AssertNotError(t, err, "sa.DeactivateAuthorization2 failed") - // deactivate a valid authorization" - authzID = createFinalizedAuthorization(t, sa, "example.com", expires, "valid", attemptedAt) + // deactivate a valid authorization + authzID = createFinalizedAuthorization(t, sa, reg.Id, identifier.NewDNS("example.com"), expires, "valid", attemptedAt) _, err = sa.DeactivateAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: authzID}) test.AssertNotError(t, err, "sa.DeactivateAuthorization2 failed") } func TestDeactivateAccount(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() + sa, _ := initSA(t) reg := createWorkingRegistration(t, sa) - _, err := sa.DeactivateRegistration(context.Background(), &sapb.RegistrationID{Id: reg.Id}) + // An incomplete request should be rejected. + _, err := sa.DeactivateRegistration(context.Background(), &sapb.RegistrationID{}) + test.AssertError(t, err, "Incomplete request should fail") + test.AssertContains(t, err.Error(), "incomplete") + + // Deactivating should work, and return the same account but with updated + // status and cleared contacts. + got, err := sa.DeactivateRegistration(context.Background(), &sapb.RegistrationID{Id: reg.Id}) test.AssertNotError(t, err, "DeactivateRegistration failed") + test.AssertEquals(t, got.Id, reg.Id) + test.AssertEquals(t, core.AcmeStatus(got.Status), core.StatusDeactivated) - dbReg, err := sa.GetRegistration(context.Background(), &sapb.RegistrationID{Id: reg.Id}) + // Double-check that the DeactivateRegistration method returned the right + // thing, by fetching the same account ourselves. + got, err = sa.GetRegistration(context.Background(), &sapb.RegistrationID{Id: reg.Id}) test.AssertNotError(t, err, "GetRegistration failed") - test.AssertEquals(t, core.AcmeStatus(dbReg.Status), core.StatusDeactivated) + test.AssertEquals(t, got.Id, reg.Id) + test.AssertEquals(t, core.AcmeStatus(got.Status), core.StatusDeactivated) + + // Attempting to deactivate it a second time should fail, since it is already + // deactivated. + _, err = sa.DeactivateRegistration(context.Background(), &sapb.RegistrationID{Id: reg.Id}) + test.AssertError(t, err, "Deactivating an already-deactivated account should fail") } -func TestReverseName(t *testing.T) { +func TestReverseFQDN(t *testing.T) { testCases := []struct { - inputDomain string - inputReversed string + fqdn string + reversed string }{ {"", ""}, {"...", "..."}, @@ -857,139 +893,365 @@ func TestReverseName(t *testing.T) { } for _, tc := range testCases { - output := ReverseName(tc.inputDomain) - test.AssertEquals(t, output, tc.inputReversed) + output := reverseFQDN(tc.fqdn) + test.AssertEquals(t, output, tc.reversed) + + output = reverseFQDN(tc.reversed) + test.AssertEquals(t, output, tc.fqdn) } } -func TestNewOrder(t *testing.T) { - sa, _, cleanup := initSA(t) - defer cleanup() - - // Create a test registration to reference - key, _ := jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(1), E: 1}}.MarshalJSON() - initialIP, _ := net.ParseIP("42.42.42.42").MarshalText() - reg, err := sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, - }) - test.AssertNotError(t, err, "Couldn't create test registration") - - order, err := sa.NewOrder(context.Background(), &sapb.NewOrderRequest{ - RegistrationID: reg.Id, - Expires: 1, - Names: []string{"example.com", "just.another.example.com"}, - V2Authorizations: []int64{1, 2, 3}, - }) - test.AssertNotError(t, err, "sa.NewOrder failed") - test.AssertEquals(t, order.Id, int64(1)) +func TestEncodeIssuedName(t *testing.T) { + testCases := []struct { + issuedName string + reversed string + oneWay bool + }{ + // Empty strings and bare separators/TLDs should be unchanged. + {"", "", false}, + {"...", "...", false}, + {"com", "com", false}, + // FQDNs should be reversed. + {"example.com", "com.example", false}, + {"www.example.com", "com.example.www", false}, + {"world.wide.web.example.com", "com.example.web.wide.world", false}, + // IP addresses should stay the same. + {"1.2.3.4", "1.2.3.4", false}, + {"2602:ff3a:1:abad:c0f:fee:abad:cafe", "2602:ff3a:1:abad:c0f:fee:abad:cafe", false}, + // Tricksy FQDNs that look like IPv6 addresses should be parsed as FQDNs. + {"2602.ff3a.1.abad.c0f.fee.abad.cafe", "cafe.abad.fee.c0f.abad.1.ff3a.2602", false}, + {"2602.ff3a.0001.abad.0c0f.0fee.abad.cafe", "cafe.abad.0fee.0c0f.abad.0001.ff3a.2602", false}, + // IPv6 addresses should be returned in RFC 5952 format. + {"2602:ff3a:0001:abad:0c0f:0fee:abad:cafe", "2602:ff3a:1:abad:c0f:fee:abad:cafe", true}, + } - var authzIDs []int64 - _, err = sa.dbMap.Select(&authzIDs, "SELECT authzID FROM orderToAuthz2 WHERE orderID = ?;", order.Id) - test.AssertNotError(t, err, "Failed to count orderToAuthz entries") - test.AssertEquals(t, len(authzIDs), 3) - test.AssertDeepEquals(t, authzIDs, []int64{1, 2, 3}) + for _, tc := range testCases { + output := EncodeIssuedName(tc.issuedName) + test.AssertEquals(t, output, tc.reversed) - names, err := sa.namesForOrder(context.Background(), order.Id) - test.AssertNotError(t, err, "namesForOrder errored") - test.AssertEquals(t, len(names), 2) - test.AssertDeepEquals(t, names, []string{"com.example", "com.example.another.just"}) + if !tc.oneWay { + output = EncodeIssuedName(tc.reversed) + test.AssertEquals(t, output, tc.issuedName) + } + } } func TestNewOrderAndAuthzs(t *testing.T) { - sa, _, cleanup := initSA(t) - defer cleanup() + sa, _ := initSA(t) - // Create a test registration to reference - key, _ := jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(1), E: 1}}.MarshalJSON() - initialIP, _ := net.ParseIP("42.42.42.42").MarshalText() - reg, err := sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, - }) - test.AssertNotError(t, err, "Couldn't create test registration") + reg := createWorkingRegistration(t, sa) // Insert two pre-existing authorizations to reference - idA := createPendingAuthorization(t, sa, "a.com", sa.clk.Now().Add(time.Hour)) - idB := createPendingAuthorization(t, sa, "b.com", sa.clk.Now().Add(time.Hour)) - test.AssertEquals(t, idA, int64(1)) - test.AssertEquals(t, idB, int64(2)) + idA := createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("a.com"), sa.clk.Now().Add(time.Hour)) + idB := createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("b.com"), sa.clk.Now().Add(time.Hour)) - order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + nowC := sa.clk.Now().Add(time.Hour) + nowD := sa.clk.Now().Add(time.Hour) + expires := sa.clk.Now().Add(2 * time.Hour) + req := &sapb.NewOrderAndAuthzsRequest{ // Insert an order for four names, two of which already have authzs NewOrder: &sapb.NewOrderRequest{ - RegistrationID: reg.Id, - Expires: 1, - Names: []string{"a.com", "b.com", "c.com", "d.com"}, - V2Authorizations: []int64{1, 2}, + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("a.com").ToProto(), + identifier.NewDNS("b.com").ToProto(), + identifier.NewDNS("c.com").ToProto(), + identifier.NewDNS("d.com").ToProto(), + }, + V2Authorizations: []int64{idA, idB}, }, // And add new authorizations for the other two names. - NewAuthzs: []*corepb.Authorization{ + NewAuthzs: []*sapb.NewAuthzRequest{ { - Identifier: "c.com", + Identifier: &corepb.Identifier{Type: "dns", Value: "c.com"}, RegistrationID: reg.Id, - Expires: sa.clk.Now().Add(time.Hour).UnixNano(), - Status: "pending", - Challenges: []*corepb.Challenge{{Token: core.NewToken()}}, + Expires: timestamppb.New(nowC), + ChallengeTypes: []string{string(core.ChallengeTypeHTTP01)}, + Token: core.NewToken(), }, { - Identifier: "d.com", + Identifier: &corepb.Identifier{Type: "dns", Value: "d.com"}, RegistrationID: reg.Id, - Expires: sa.clk.Now().Add(time.Hour).UnixNano(), - Status: "pending", - Challenges: []*corepb.Challenge{{Token: core.NewToken()}}, + Expires: timestamppb.New(nowD), + ChallengeTypes: []string{string(core.ChallengeTypeHTTP01)}, + Token: core.NewToken(), }, }, - }) - test.AssertNotError(t, err, "sa.NewOrder failed") - test.AssertEquals(t, order.Id, int64(1)) - test.AssertDeepEquals(t, order.V2Authorizations, []int64{1, 2, 3, 4}) - - var authzIDs []int64 - _, err = sa.dbMap.Select(&authzIDs, "SELECT authzID FROM orderToAuthz2 WHERE orderID = ?;", order.Id) - test.AssertNotError(t, err, "Failed to count orderToAuthz entries") - test.AssertEquals(t, len(authzIDs), 4) - test.AssertDeepEquals(t, authzIDs, []int64{1, 2, 3, 4}) - - names, err := sa.namesForOrder(context.Background(), order.Id) - test.AssertNotError(t, err, "namesForOrder errored") - test.AssertEquals(t, len(names), 4) - test.AssertDeepEquals(t, names, []string{"com.a", "com.b", "com.c", "com.d"}) + } + order, err := sa.NewOrderAndAuthzs(context.Background(), req) + test.AssertNotError(t, err, "sa.NewOrderAndAuthzs failed") + test.Assert(t, order.Id != 0, "order ID should be non-zero") + test.AssertEquals(t, len(order.V2Authorizations), 4) + test.AssertSliceContains(t, order.V2Authorizations, idA) + test.AssertSliceContains(t, order.V2Authorizations, idB) + // Ensure that two new authzs were created. + var newAuthzIDs []int64 + for _, id := range order.V2Authorizations { + if id != idA && id != idB { + newAuthzIDs = append(newAuthzIDs, id) + } + } + test.AssertEquals(t, len(newAuthzIDs), 2) + test.Assert(t, newAuthzIDs[0] != newAuthzIDs[1], "expected distinct new authz IDs") } -func TestSetOrderProcessing(t *testing.T) { - sa, fc, cleanup := initSA(t) - defer cleanup() - - // Create a test registration to reference - key, _ := jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(1), E: 1}}.MarshalJSON() - initialIP, _ := net.ParseIP("42.42.42.42").MarshalText() - reg, err := sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, - }) - test.AssertNotError(t, err, "Couldn't create test registration") +func TestNewOrderAndAuthzs_ReuseOnly(t *testing.T) { + sa, fc := initSA(t) - // Add one valid authz - expires := fc.Now().Add(time.Hour) - attemptedAt := fc.Now() - authzID := createFinalizedAuthorization(t, sa, "example.com", expires, "valid", attemptedAt) + reg := createWorkingRegistration(t, sa) + expires := fc.Now().Add(2 * time.Hour) - order := &corepb.Order{ - RegistrationID: reg.Id, - Expires: sa.clk.Now().Add(365 * 24 * time.Hour).UnixNano(), - Names: []string{"example.com"}, - V2Authorizations: []int64{authzID}, + // Insert two pre-existing authorizations to reference + idA := createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("a.com"), sa.clk.Now().Add(time.Hour)) + idB := createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("b.com"), sa.clk.Now().Add(time.Hour)) + req := &sapb.NewOrderAndAuthzsRequest{ + // Insert an order for four names, two of which already have authzs + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("a.com").ToProto(), + identifier.NewDNS("b.com").ToProto(), + }, + V2Authorizations: []int64{idA, idB}, + }, + } + order, err := sa.NewOrderAndAuthzs(context.Background(), req) + if err != nil { + t.Fatal("sa.NewOrderAndAuthzs:", err) + } + if !reflect.DeepEqual(order.V2Authorizations, []int64{idA, idB}) { + t.Errorf("sa.NewOrderAndAuthzs().V2Authorizations: want [%d, %d], got %v", idA, idB, order.V2Authorizations) + } +} + +func TestNewOrderAndAuthzs_CreateOnly(t *testing.T) { + sa, fc := initSA(t) + + reg := createWorkingRegistration(t, sa) + expires := fc.Now().Add(2 * time.Hour) + + // Insert two pre-existing authorizations to reference + _ = createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("a.com"), sa.clk.Now().Add(time.Hour)) + _ = createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("b.com"), sa.clk.Now().Add(time.Hour)) + req := &sapb.NewOrderAndAuthzsRequest{ + // Insert an order for four names, two of which already have authzs + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("a.com").ToProto(), + identifier.NewDNS("b.com").ToProto(), + }, + }, + NewAuthzs: []*sapb.NewAuthzRequest{ + { + Identifier: &corepb.Identifier{Type: "dns", Value: "a.com"}, + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + ChallengeTypes: []string{string(core.ChallengeTypeDNS01)}, + Token: core.NewToken(), + }, + }, + } + order, err := sa.NewOrderAndAuthzs(context.Background(), req) + if err != nil { + t.Fatal("sa.NewOrderAndAuthzs:", err) + } + if len(order.V2Authorizations) != 1 { + t.Fatalf("len(sa.NewOrderAndAuthzs().V2Authorizations): want 1, got %v", len(order.V2Authorizations)) + } + gotAuthz, err := sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: order.V2Authorizations[0]}) + if err != nil { + t.Fatalf("retrieving inserted authz: %s", err) + } + if gotAuthz.Identifier.Value != "a.com" { + t.Errorf("New order authz identifier = %v, want %v", gotAuthz.Identifier.Value, "a.com") + } +} + +func TestNewOrderAndAuthzs_NoAuthzsError(t *testing.T) { + sa, fc := initSA(t) + + reg := createWorkingRegistration(t, sa) + expires := fc.Now().Add(2 * time.Hour) + + // Insert two pre-existing authorizations to reference + req := &sapb.NewOrderAndAuthzsRequest{ + // Insert an order for four names, two of which already have authzs + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + Identifiers: nil, + }, + NewAuthzs: nil, + } + _, err := sa.NewOrderAndAuthzs(context.Background(), req) + if err != errIncompleteRequest { + t.Errorf("sa.NewOrderAndAuthzs with no authzs: want %v, got %v", errIncompleteRequest, err) + } +} + +// TestNewOrderAndAuthzs_NonNilInnerOrder verifies that a nil +// sapb.NewOrderAndAuthzsRequest NewOrder object returns an error. +func TestNewOrderAndAuthzs_NonNilInnerOrder(t *testing.T) { + sa, fc := initSA(t) + + reg := createWorkingRegistration(t, sa) + + expires := fc.Now().Add(2 * time.Hour) + _, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewAuthzs: []*sapb.NewAuthzRequest{ + { + Identifier: &corepb.Identifier{Type: "dns", Value: "c.com"}, + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + ChallengeTypes: []string{string(core.ChallengeTypeDNS01)}, + Token: core.NewToken(), + }, + }, + }) + test.AssertErrorIs(t, err, errIncompleteRequest) +} + +func TestNewOrderAndAuthzs_MismatchedRegID(t *testing.T) { + sa, _ := initSA(t) + + _, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: 1, + }, + NewAuthzs: []*sapb.NewAuthzRequest{ + { + RegistrationID: 2, + }, + }, + }) + test.AssertError(t, err, "mismatched regIDs should fail") + test.AssertContains(t, err.Error(), "same account") +} + +func TestNewOrderAndAuthzs_NewAuthzExpectedFields(t *testing.T) { + sa, fc := initSA(t) + + reg := createWorkingRegistration(t, sa) + expires := fc.Now().Add(time.Hour) + domain := "a.com" + + // Create an authz that does not yet exist in the database with some invalid + // data smuggled in. + order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewAuthzs: []*sapb.NewAuthzRequest{ + { + Identifier: &corepb.Identifier{Type: "dns", Value: domain}, + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + ChallengeTypes: []string{string(core.ChallengeTypeHTTP01)}, + Token: core.NewToken(), + }, + }, + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + Identifiers: []*corepb.Identifier{identifier.NewDNS(domain).ToProto()}, + }, + }) + test.AssertNotError(t, err, "sa.NewOrderAndAuthzs failed") + + // Safely get the authz for the order we created above. + obj, err := sa.dbReadOnlyMap.Get(ctx, authzModel{}, order.V2Authorizations[0]) + test.AssertNotError(t, err, fmt.Sprintf("authorization %d not found", order.V2Authorizations[0])) + + // To access the data stored in obj at compile time, we type assert obj + // into a pointer to an authzModel. + am, ok := obj.(*authzModel) + test.Assert(t, ok, "Could not type assert obj into authzModel") + + // If we're making a brand new authz, it should have the pending status + // regardless of what incorrect status value was passed in during construction. + test.AssertEquals(t, am.Status, statusUint(core.StatusPending)) + + // Testing for the existence of these boxed nils is a definite break from + // our paradigm of avoiding passing around boxed nils whenever possible. + // However, the existence of these boxed nils in relation to this test is + // actually expected. If these tests fail, then a possible SA refactor or RA + // bug placed incorrect data into brand new authz input fields. + test.AssertBoxedNil(t, am.Attempted, "am.Attempted should be nil") + test.AssertBoxedNil(t, am.AttemptedAt, "am.AttemptedAt should be nil") + test.AssertBoxedNil(t, am.ValidationError, "am.ValidationError should be nil") + test.AssertBoxedNil(t, am.ValidationRecord, "am.ValidationRecord should be nil") +} + +func TestNewOrderAndAuthzs_Profile(t *testing.T) { + sa, fc := initSA(t) + + reg := createWorkingRegistration(t, sa) + expires := fc.Now().Add(time.Hour) + + // Create and order and authz while specifying a profile. + order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, + CertificateProfileName: "test", + }, + NewAuthzs: []*sapb.NewAuthzRequest{ + { + Identifier: &corepb.Identifier{Type: "dns", Value: "example.com"}, + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + ChallengeTypes: []string{string(core.ChallengeTypeHTTP01)}, + Token: core.NewToken(), + }, + }, + }) + if err != nil { + t.Fatalf("inserting order and authzs: %s", err) + } + + // Retrieve the order and check that the profile is correct. + gotOrder, err := sa.GetOrder(context.Background(), &sapb.OrderRequest{Id: order.Id}) + if err != nil { + t.Fatalf("retrieving inserted order: %s", err) + } + if gotOrder.CertificateProfileName != "test" { + t.Errorf("order.CertificateProfileName = %v, want %v", gotOrder.CertificateProfileName, "test") + } + + // Retrieve the authz and check that the profile is correct. + // Safely get the authz for the order we created above. + gotAuthz, err := sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: order.V2Authorizations[0]}) + if err != nil { + t.Fatalf("retrieving inserted authz: %s", err) + } + if gotAuthz.CertificateProfileName != "test" { + t.Errorf("authz.CertificateProfileName = %v, want %v", gotAuthz.CertificateProfileName, "test") } +} + +func TestSetOrderProcessing(t *testing.T) { + sa, fc := initSA(t) + + reg := createWorkingRegistration(t, sa) + + // Add one valid authz + expires := fc.Now().Add(time.Hour) + attemptedAt := fc.Now() + authzID := createFinalizedAuthorization(t, sa, reg.Id, identifier.NewDNS("example.com"), expires, "valid", attemptedAt) // Add a new order in pending status with no certificate serial - order, err = sa.NewOrder(context.Background(), &sapb.NewOrderRequest{ - RegistrationID: order.RegistrationID, - Expires: order.Expires, - Names: order.Names, - V2Authorizations: order.V2Authorizations, + expires1Year := sa.clk.Now().Add(365 * 24 * time.Hour) + order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires1Year), + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, + V2Authorizations: []int64{authzID}, + }, }) - test.AssertNotError(t, err, "NewOrder failed") + test.AssertNotError(t, err, "NewOrderAndAuthzs failed") // Set the order to be processing _, err = sa.SetOrderProcessing(context.Background(), &sapb.OrderRequest{Id: order.Id}) @@ -1011,38 +1273,24 @@ func TestSetOrderProcessing(t *testing.T) { } func TestFinalizeOrder(t *testing.T) { - sa, fc, cleanup := initSA(t) - defer cleanup() - - // Create a test registration to reference - key, _ := jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(1), E: 1}}.MarshalJSON() - initialIP, _ := net.ParseIP("42.42.42.42").MarshalText() - reg, err := sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, - }) - test.AssertNotError(t, err, "Couldn't create test registration") + sa, fc := initSA(t) - // Add one valid authz + reg := createWorkingRegistration(t, sa) expires := fc.Now().Add(time.Hour) attemptedAt := fc.Now() - authzID := createFinalizedAuthorization(t, sa, "example.com", expires, "valid", attemptedAt) - - order := &corepb.Order{ - RegistrationID: reg.Id, - Expires: sa.clk.Now().Add(365 * 24 * time.Hour).UnixNano(), - Names: []string{"example.com"}, - V2Authorizations: []int64{authzID}, - } + authzID := createFinalizedAuthorization(t, sa, reg.Id, identifier.NewDNS("example.com"), expires, "valid", attemptedAt) - // Add a new order with an empty certificate serial - order, err = sa.NewOrder(context.Background(), &sapb.NewOrderRequest{ - RegistrationID: order.RegistrationID, - Expires: order.Expires, - Names: order.Names, - V2Authorizations: order.V2Authorizations, + // Add a new order in pending status with no certificate serial + expires1Year := sa.clk.Now().Add(365 * 24 * time.Hour) + order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires1Year), + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, + V2Authorizations: []int64{authzID}, + }, }) - test.AssertNotError(t, err, "NewOrder failed") + test.AssertNotError(t, err, "NewOrderAndAuthzs failed") // Set the order to processing so it can be finalized _, err = sa.SetOrderProcessing(ctx, &sapb.OrderRequest{Id: order.Id}) @@ -1063,51 +1311,50 @@ func TestFinalizeOrder(t *testing.T) { test.AssertEquals(t, updatedOrder.Status, string(core.StatusValid)) } -func TestOrder(t *testing.T) { - sa, fc, cleanup := initSA(t) - defer cleanup() - - // Create a test registration to reference - key, _ := jose.JSONWebKey{Key: &rsa.PublicKey{N: big.NewInt(1), E: 1}}.MarshalJSON() - initialIP, _ := net.ParseIP("42.42.42.42").MarshalText() - reg, err := sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, - }) - test.AssertNotError(t, err, "Couldn't create test registration") +// TestGetOrder tests that round-tripping a simple order through +// NewOrderAndAuthzs and GetOrder has the expected result. +func TestGetOrder(t *testing.T) { + sa, fc := initSA(t) + reg := createWorkingRegistration(t, sa) + ident := identifier.NewDNS("example.com") authzExpires := fc.Now().Add(time.Hour) - authzID := createPendingAuthorization(t, sa, "example.com", authzExpires) + authzID := createPendingAuthorization(t, sa, reg.Id, ident, authzExpires) // Set the order to expire in two hours - expires := fc.Now().Add(2 * time.Hour).UnixNano() + expires := fc.Now().Add(2 * time.Hour) inputOrder := &corepb.Order{ RegistrationID: reg.Id, - Expires: expires, - Names: []string{"example.com"}, + Expires: timestamppb.New(expires), + Identifiers: []*corepb.Identifier{ident.ToProto()}, V2Authorizations: []int64{authzID}, } // Create the order - order, err := sa.NewOrder(context.Background(), &sapb.NewOrderRequest{ - RegistrationID: inputOrder.RegistrationID, - Expires: inputOrder.Expires, - Names: inputOrder.Names, - V2Authorizations: inputOrder.V2Authorizations, + order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: inputOrder.RegistrationID, + Expires: inputOrder.Expires, + Identifiers: inputOrder.Identifiers, + V2Authorizations: inputOrder.V2Authorizations, + }, }) - test.AssertNotError(t, err, "sa.NewOrder failed") + test.AssertNotError(t, err, "sa.NewOrderAndAuthzs failed") - // The Order from GetOrder should match the following expected order - expectedOrder := &corepb.Order{ - // The registration ID, authorizations, expiry, and names should match the - // input to NewOrder + // Fetch the order by its ID and make sure it matches the expected + storedOrder, err := sa.GetOrder(context.Background(), &sapb.OrderRequest{Id: order.Id}) + test.AssertNotError(t, err, "sa.GetOrder failed") + created := sa.clk.Now() + test.AssertDeepEquals(t, storedOrder, &corepb.Order{ + // The registration ID, authorizations, expiry, and identifiers should match the + // input to NewOrderAndAuthzs RegistrationID: inputOrder.RegistrationID, V2Authorizations: inputOrder.V2Authorizations, - Names: inputOrder.Names, + Identifiers: inputOrder.Identifiers, Expires: inputOrder.Expires, // The ID should have been set to 1 by the SA - Id: 1, + Id: storedOrder.Id, // The status should be pending Status: string(core.StatusPending), // The serial should be empty since this is a pending order @@ -1115,20 +1362,72 @@ func TestOrder(t *testing.T) { // We should not be processing it BeganProcessing: false, // The created timestamp should have been set to the current time - Created: sa.clk.Now().UnixNano(), + Created: timestamppb.New(created), + }) +} + +// TestGetOrderWithProfile tests that round-tripping a simple order through +// NewOrderAndAuthzs and GetOrder has the expected result. +func TestGetOrderWithProfile(t *testing.T) { + sa, fc := initSA(t) + + reg := createWorkingRegistration(t, sa) + ident := identifier.NewDNS("example.com") + authzExpires := fc.Now().Add(time.Hour) + authzID := createPendingAuthorization(t, sa, reg.Id, ident, authzExpires) + + // Set the order to expire in two hours + expires := fc.Now().Add(2 * time.Hour) + + inputOrder := &corepb.Order{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + Identifiers: []*corepb.Identifier{ident.ToProto()}, + V2Authorizations: []int64{authzID}, + CertificateProfileName: "tbiapb", } + // Create the order + order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: inputOrder.RegistrationID, + Expires: inputOrder.Expires, + Identifiers: inputOrder.Identifiers, + V2Authorizations: inputOrder.V2Authorizations, + CertificateProfileName: inputOrder.CertificateProfileName, + }, + }) + test.AssertNotError(t, err, "sa.NewOrderAndAuthzs failed") + // Fetch the order by its ID and make sure it matches the expected storedOrder, err := sa.GetOrder(context.Background(), &sapb.OrderRequest{Id: order.Id}) test.AssertNotError(t, err, "sa.GetOrder failed") - test.AssertDeepEquals(t, storedOrder, expectedOrder) + created := sa.clk.Now() + test.AssertDeepEquals(t, storedOrder, &corepb.Order{ + // The registration ID, authorizations, expiry, and names should match the + // input to NewOrderAndAuthzs + RegistrationID: inputOrder.RegistrationID, + V2Authorizations: inputOrder.V2Authorizations, + Identifiers: inputOrder.Identifiers, + Expires: inputOrder.Expires, + // The ID should have been set to 1 by the SA + Id: storedOrder.Id, + // The status should be pending + Status: string(core.StatusPending), + // The serial should be empty since this is a pending order + CertificateSerial: "", + // We should not be processing it + BeganProcessing: false, + // The created timestamp should have been set to the current time + Created: timestamppb.New(created), + CertificateProfileName: "tbiapb", + }) } // TestGetAuthorization2NoRows ensures that the GetAuthorization2 function returns // the correct error when there are no results for the provided ID. func TestGetAuthorization2NoRows(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() + sa, _ := initSA(t) // An empty authz ID should result in a not found berror. id := int64(123) @@ -1137,192 +1436,78 @@ func TestGetAuthorization2NoRows(t *testing.T) { test.AssertErrorIs(t, err, berrors.NotFound) } -func TestGetAuthorizations2(t *testing.T) { - sa, fc, cleanup := initSA(t) - defer cleanup() - - reg := createWorkingRegistration(t, sa) - exp := fc.Now().AddDate(0, 0, 10).UTC() - attemptedAt := fc.Now() - - identA := "aaa" - identB := "bbb" - identC := "ccc" - identD := "ddd" - idents := []string{identA, identB, identC} - - authzIDA := createFinalizedAuthorization(t, sa, "aaa", exp, "valid", attemptedAt) - authzIDB := createPendingAuthorization(t, sa, "bbb", exp) - nearbyExpires := fc.Now().UTC().Add(time.Hour) - authzIDC := createPendingAuthorization(t, sa, "ccc", nearbyExpires) - - // Associate authorizations with an order so that GetAuthorizations2 thinks - // they are WFE2 authorizations. - err := sa.dbMap.Insert(&orderToAuthzModel{ - OrderID: 1, - AuthzID: authzIDA, - }) - test.AssertNotError(t, err, "sa.dbMap.Insert failed") - err = sa.dbMap.Insert(&orderToAuthzModel{ - OrderID: 1, - AuthzID: authzIDB, - }) - test.AssertNotError(t, err, "sa.dbMap.Insert failed") - err = sa.dbMap.Insert(&orderToAuthzModel{ - OrderID: 1, - AuthzID: authzIDC, - }) - test.AssertNotError(t, err, "sa.dbMap.Insert failed") - - // Set an expiry cut off of 1 day in the future similar to `RA.NewOrder`. This - // should exclude pending authorization C based on its nearbyExpires expiry - // value. - expiryCutoff := fc.Now().AddDate(0, 0, 1).UnixNano() - // Get authorizations for the names used above. - authz, err := sa.GetAuthorizations2(context.Background(), &sapb.GetAuthorizationsRequest{ - RegistrationID: reg.Id, - Domains: idents, - Now: expiryCutoff, - }) - // It should not fail - test.AssertNotError(t, err, "sa.GetAuthorizations2 failed") - // We should get back two authorizations since one of the three authorizations - // created above expires too soon. - test.AssertEquals(t, len(authz.Authz), 2) - - // Get authorizations for the names used above, and one name that doesn't exist - authz, err = sa.GetAuthorizations2(context.Background(), &sapb.GetAuthorizationsRequest{ - RegistrationID: reg.Id, - Domains: append(idents, identD), - Now: expiryCutoff, - }) - // It should not fail - test.AssertNotError(t, err, "sa.GetAuthorizations2 failed") - // It should still return only two authorizations - test.AssertEquals(t, len(authz.Authz), 2) -} - -func TestCountOrders(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() - - reg := createWorkingRegistration(t, sa) - now := sa.clk.Now() - expires := now.Add(24 * time.Hour) - - req := &sapb.CountOrdersRequest{ - AccountID: 12345, - Range: &sapb.Range{ - Earliest: now.Add(-time.Hour).UnixNano(), - Latest: now.Add(time.Second).UnixNano(), - }, - } - - // Counting new orders for a reg ID that doesn't exist should return 0 - count, err := sa.CountOrders(ctx, req) - test.AssertNotError(t, err, "Couldn't count new orders for fake reg ID") - test.AssertEquals(t, count.Count, int64(0)) - - // Add a pending authorization - authzID := createPendingAuthorization(t, sa, "example.com", expires) - - // Add one pending order - order, err := sa.NewOrder(ctx, &sapb.NewOrderRequest{ - RegistrationID: reg.Id, - Expires: expires.UnixNano(), - Names: []string{"example.com"}, - V2Authorizations: []int64{authzID}, - }) - test.AssertNotError(t, err, "Couldn't create new pending order") - - // Counting new orders for the reg ID should now yield 1 - req.AccountID = reg.Id - count, err = sa.CountOrders(ctx, req) - test.AssertNotError(t, err, "Couldn't count new orders for reg ID") - test.AssertEquals(t, count.Count, int64(1)) - - // Moving the count window to after the order was created should return the - // count to 0 - earliest := time.Unix(0, order.Created).Add(time.Minute) - req.Range.Earliest = earliest.UnixNano() - req.Range.Latest = earliest.Add(time.Hour).UnixNano() - count, err = sa.CountOrders(ctx, req) - test.AssertNotError(t, err, "Couldn't count new orders for reg ID") - test.AssertEquals(t, count.Count, int64(0)) -} - func TestFasterGetOrderForNames(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + sa, fc := initSA(t) - domain := "example.com" + ident := identifier.NewDNS("example.com") expires := fc.Now().Add(time.Hour) key, _ := goodTestJWK().MarshalJSON() - initialIP, _ := net.ParseIP("42.42.42.42").MarshalText() reg, err := sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, + Key: key, }) test.AssertNotError(t, err, "Couldn't create test registration") - authzIDs := createPendingAuthorization(t, sa, domain, expires) + authzIDs := createPendingAuthorization(t, sa, reg.Id, ident, expires) - expiresNano := expires.UnixNano() - _, err = sa.NewOrder(ctx, &sapb.NewOrderRequest{ - RegistrationID: reg.Id, - Expires: expiresNano, - V2Authorizations: []int64{authzIDs}, - Names: []string{domain}, + _, err = sa.NewOrderAndAuthzs(ctx, &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + V2Authorizations: []int64{authzIDs}, + Identifiers: []*corepb.Identifier{ident.ToProto()}, + }, }) - test.AssertNotError(t, err, "sa.NewOrder failed") + test.AssertNotError(t, err, "sa.NewOrderAndAuthzs failed") - _, err = sa.NewOrder(ctx, &sapb.NewOrderRequest{ - RegistrationID: reg.Id, - Expires: expiresNano, - V2Authorizations: []int64{authzIDs}, - Names: []string{domain}, + _, err = sa.NewOrderAndAuthzs(ctx, &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires), + V2Authorizations: []int64{authzIDs}, + Identifiers: []*corepb.Identifier{ident.ToProto()}, + }, }) - test.AssertNotError(t, err, "sa.NewOrder failed") + test.AssertNotError(t, err, "sa.NewOrderAndAuthzs failed") _, err = sa.GetOrderForNames(ctx, &sapb.GetOrderForNamesRequest{ - AcctID: reg.Id, - Names: []string{domain}, + AcctID: reg.Id, + Identifiers: []*corepb.Identifier{ident.ToProto()}, }) test.AssertNotError(t, err, "sa.GetOrderForNames failed") } func TestGetOrderForNames(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + sa, fc := initSA(t) // Give the order we create a short lifetime orderLifetime := time.Hour - expires := fc.Now().Add(orderLifetime).UnixNano() + expires := fc.Now().Add(orderLifetime) // Create two test registrations to associate with orders key, _ := goodTestJWK().MarshalJSON() - initialIP, _ := net.ParseIP("42.42.42.42").MarshalText() regA, err := sa.NewRegistration(ctx, &corepb.Registration{ - Key: key, - InitialIP: initialIP, + Key: key, }) test.AssertNotError(t, err, "Couldn't create test registration") // Add one pending authz for the first name for regA and one // pending authz for the second name for regA authzExpires := fc.Now().Add(time.Hour) - authzIDA := createPendingAuthorization(t, sa, "example.com", authzExpires) - authzIDB := createPendingAuthorization(t, sa, "just.another.example.com", authzExpires) + authzIDA := createPendingAuthorization(t, sa, regA.Id, identifier.NewDNS("example.com"), authzExpires) + authzIDB := createPendingAuthorization(t, sa, regA.Id, identifier.NewDNS("just.another.example.com"), authzExpires) ctx := context.Background() - names := []string{"example.com", "just.another.example.com"} + idents := identifier.ACMEIdentifiers{ + identifier.NewDNS("example.com"), + identifier.NewDNS("just.another.example.com"), + } // Call GetOrderForNames for a set of names we haven't created an order for // yet result, err := sa.GetOrderForNames(ctx, &sapb.GetOrderForNamesRequest{ - AcctID: regA.Id, - Names: names, + AcctID: regA.Id, + Identifiers: idents.ToProtoSlice(), }) // We expect the result to return an error test.AssertError(t, err, "sa.GetOrderForNames did not return an error for an empty result") @@ -1332,22 +1517,24 @@ func TestGetOrderForNames(t *testing.T) { test.Assert(t, result == nil, "sa.GetOrderForNames for non-existent order returned non-nil result") // Add a new order for a set of names - order, err := sa.NewOrder(ctx, &sapb.NewOrderRequest{ - RegistrationID: regA.Id, - Expires: expires, - V2Authorizations: []int64{authzIDA, authzIDB}, - Names: names, + order, err := sa.NewOrderAndAuthzs(ctx, &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: regA.Id, + Expires: timestamppb.New(expires), + V2Authorizations: []int64{authzIDA, authzIDB}, + Identifiers: idents.ToProtoSlice(), + }, }) // It shouldn't error - test.AssertNotError(t, err, "sa.NewOrder failed") + test.AssertNotError(t, err, "sa.NewOrderAndAuthzs failed") // The order ID shouldn't be nil - test.AssertNotNil(t, order.Id, "NewOrder returned with a nil Id") + test.AssertNotNil(t, order.Id, "NewOrderAndAuthzs returned with a nil Id") // Call GetOrderForNames with the same account ID and set of names as the - // above NewOrder call + // above NewOrderAndAuthzs call result, err = sa.GetOrderForNames(ctx, &sapb.GetOrderForNamesRequest{ - AcctID: regA.Id, - Names: names, + AcctID: regA.Id, + Identifiers: idents.ToProtoSlice(), }) // It shouldn't error test.AssertNotError(t, err, "sa.GetOrderForNames failed") @@ -1355,11 +1542,11 @@ func TestGetOrderForNames(t *testing.T) { test.AssertNotNil(t, result, "Returned order was nil") test.AssertEquals(t, result.Id, order.Id) - // Call GetOrderForNames with a different account ID from the NewOrder call + // Call GetOrderForNames with a different account ID from the NewOrderAndAuthzs call regB := int64(1337) result, err = sa.GetOrderForNames(ctx, &sapb.GetOrderForNamesRequest{ - AcctID: regB, - Names: names, + AcctID: regB, + Identifiers: idents.ToProtoSlice(), }) // It should error test.AssertError(t, err, "sa.GetOrderForNames did not return an error for an empty result") @@ -1372,10 +1559,10 @@ func TestGetOrderForNames(t *testing.T) { fc.Add(2 * orderLifetime) // Call GetOrderForNames again with the same account ID and set of names as - // the initial NewOrder call + // the initial NewOrderAndAuthzs call result, err = sa.GetOrderForNames(ctx, &sapb.GetOrderForNamesRequest{ - AcctID: regA.Id, - Names: names, + AcctID: regA.Id, + Identifiers: idents.ToProtoSlice(), }) // It should error since there is no result test.AssertError(t, err, "sa.GetOrderForNames did not return an error for an empty result") @@ -1388,32 +1575,34 @@ func TestGetOrderForNames(t *testing.T) { // Create two valid authorizations authzExpires = fc.Now().Add(time.Hour) attemptedAt := fc.Now() - authzIDC := createFinalizedAuthorization(t, sa, "zombo.com", authzExpires, "valid", attemptedAt) - authzIDD := createFinalizedAuthorization(t, sa, "welcome.to.zombo.com", authzExpires, "valid", attemptedAt) + authzIDC := createFinalizedAuthorization(t, sa, regA.Id, identifier.NewDNS("zombo.com"), authzExpires, "valid", attemptedAt) + authzIDD := createFinalizedAuthorization(t, sa, regA.Id, identifier.NewDNS("welcome.to.zombo.com"), authzExpires, "valid", attemptedAt) // Add a fresh order that uses the authorizations created above - names = []string{"zombo.com", "welcome.to.zombo.com"} - order, err = sa.NewOrder(ctx, &sapb.NewOrderRequest{ - RegistrationID: regA.Id, - Expires: fc.Now().Add(orderLifetime).UnixNano(), - V2Authorizations: []int64{authzIDC, authzIDD}, - Names: names, + expires = fc.Now().Add(orderLifetime) + order, err = sa.NewOrderAndAuthzs(ctx, &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: regA.Id, + Expires: timestamppb.New(expires), + V2Authorizations: []int64{authzIDC, authzIDD}, + Identifiers: idents.ToProtoSlice(), + }, }) // It shouldn't error - test.AssertNotError(t, err, "sa.NewOrder failed") + test.AssertNotError(t, err, "sa.NewOrderAndAuthzs failed") // The order ID shouldn't be nil - test.AssertNotNil(t, order.Id, "NewOrder returned with a nil Id") + test.AssertNotNil(t, order.Id, "NewOrderAndAuthzs returned with a nil Id") // Call GetOrderForNames with the same account ID and set of names as - // the earlier NewOrder call + // the earlier NewOrderAndAuthzs call result, err = sa.GetOrderForNames(ctx, &sapb.GetOrderForNamesRequest{ - AcctID: regA.Id, - Names: names, + AcctID: regA.Id, + Identifiers: idents.ToProtoSlice(), }) // It should not error since a ready order can be reused. test.AssertNotError(t, err, "sa.GetOrderForNames returned an unexpected error for ready order reuse") // The order returned should have the same ID as the order we created above - test.AssertEquals(t, result != nil, true) + test.AssertNotNil(t, result, "sa.GetOrderForNames returned nil result") test.AssertEquals(t, result.Id, order.Id) // Set the order processing so it can be finalized @@ -1426,10 +1615,10 @@ func TestGetOrderForNames(t *testing.T) { test.AssertNotError(t, err, "sa.FinalizeOrder failed") // Call GetOrderForNames with the same account ID and set of names as - // the earlier NewOrder call + // the earlier NewOrderAndAuthzs call result, err = sa.GetOrderForNames(ctx, &sapb.GetOrderForNamesRequest{ - AcctID: regA.Id, - Names: names, + AcctID: regA.Id, + Identifiers: idents.ToProtoSlice(), }) // It should error since a valid order should not be reused. test.AssertError(t, err, "sa.GetOrderForNames did not return an error for an empty result") @@ -1441,12 +1630,10 @@ func TestGetOrderForNames(t *testing.T) { } func TestStatusForOrder(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + sa, fc := initSA(t) ctx := context.Background() expires := fc.Now().Add(time.Hour) - expiresNano := expires.UnixNano() alreadyExpired := expires.Add(-2 * time.Hour) attemptedAt := fc.Now() @@ -1455,76 +1642,93 @@ func TestStatusForOrder(t *testing.T) { // Create a pending authz, an expired authz, an invalid authz, a deactivated authz, // and a valid authz - pendingID := createPendingAuthorization(t, sa, "pending.your.order.is.up", expires) - expiredID := createPendingAuthorization(t, sa, "expired.your.order.is.up", alreadyExpired) - invalidID := createFinalizedAuthorization(t, sa, "invalid.your.order.is.up", expires, "invalid", attemptedAt) - validID := createFinalizedAuthorization(t, sa, "valid.your.order.is.up", expires, "valid", attemptedAt) - deactivatedID := createPendingAuthorization(t, sa, "deactivated.your.order.is.up", expires) + pendingID := createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("pending.your.order.is.up"), expires) + expiredID := createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("expired.your.order.is.up"), alreadyExpired) + invalidID := createFinalizedAuthorization(t, sa, reg.Id, identifier.NewDNS("invalid.your.order.is.up"), expires, "invalid", attemptedAt) + validID := createFinalizedAuthorization(t, sa, reg.Id, identifier.NewDNS("valid.your.order.is.up"), expires, "valid", attemptedAt) + deactivatedID := createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("deactivated.your.order.is.up"), expires) _, err := sa.DeactivateAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: deactivatedID}) test.AssertNotError(t, err, "sa.DeactivateAuthorization2 failed") testCases := []struct { Name string AuthorizationIDs []int64 - OrderNames []string - OrderExpires int64 + OrderIdents identifier.ACMEIdentifiers + OrderExpires *timestamppb.Timestamp ExpectedStatus string SetProcessing bool Finalize bool }{ { - Name: "Order with an invalid authz", - OrderNames: []string{"pending.your.order.is.up", "invalid.your.order.is.up", "deactivated.your.order.is.up", "valid.your.order.is.up"}, + Name: "Order with an invalid authz", + OrderIdents: identifier.ACMEIdentifiers{ + identifier.NewDNS("pending.your.order.is.up"), + identifier.NewDNS("invalid.your.order.is.up"), + identifier.NewDNS("deactivated.your.order.is.up"), + identifier.NewDNS("valid.your.order.is.up"), + }, AuthorizationIDs: []int64{pendingID, invalidID, deactivatedID, validID}, ExpectedStatus: string(core.StatusInvalid), }, { - Name: "Order with an expired authz", - OrderNames: []string{"pending.your.order.is.up", "expired.your.order.is.up", "deactivated.your.order.is.up", "valid.your.order.is.up"}, + Name: "Order with an expired authz", + OrderIdents: identifier.ACMEIdentifiers{ + identifier.NewDNS("pending.your.order.is.up"), + identifier.NewDNS("expired.your.order.is.up"), + identifier.NewDNS("deactivated.your.order.is.up"), + identifier.NewDNS("valid.your.order.is.up"), + }, AuthorizationIDs: []int64{pendingID, expiredID, deactivatedID, validID}, ExpectedStatus: string(core.StatusInvalid), }, { - Name: "Order with a deactivated authz", - OrderNames: []string{"pending.your.order.is.up", "deactivated.your.order.is.up", "valid.your.order.is.up"}, + Name: "Order with a deactivated authz", + OrderIdents: identifier.ACMEIdentifiers{ + identifier.NewDNS("pending.your.order.is.up"), + identifier.NewDNS("deactivated.your.order.is.up"), + identifier.NewDNS("valid.your.order.is.up"), + }, AuthorizationIDs: []int64{pendingID, deactivatedID, validID}, ExpectedStatus: string(core.StatusInvalid), }, { - Name: "Order with a pending authz", - OrderNames: []string{"valid.your.order.is.up", "pending.your.order.is.up"}, + Name: "Order with a pending authz", + OrderIdents: identifier.ACMEIdentifiers{ + identifier.NewDNS("valid.your.order.is.up"), + identifier.NewDNS("pending.your.order.is.up"), + }, AuthorizationIDs: []int64{validID, pendingID}, ExpectedStatus: string(core.StatusPending), }, { Name: "Order with only valid authzs, not yet processed or finalized", - OrderNames: []string{"valid.your.order.is.up"}, + OrderIdents: identifier.ACMEIdentifiers{identifier.NewDNS("valid.your.order.is.up")}, AuthorizationIDs: []int64{validID}, ExpectedStatus: string(core.StatusReady), }, { Name: "Order with only valid authzs, set processing", - OrderNames: []string{"valid.your.order.is.up"}, + OrderIdents: identifier.ACMEIdentifiers{identifier.NewDNS("valid.your.order.is.up")}, AuthorizationIDs: []int64{validID}, SetProcessing: true, ExpectedStatus: string(core.StatusProcessing), }, { Name: "Order with only valid authzs, not yet processed or finalized, OrderReadyStatus feature flag", - OrderNames: []string{"valid.your.order.is.up"}, + OrderIdents: identifier.ACMEIdentifiers{identifier.NewDNS("valid.your.order.is.up")}, AuthorizationIDs: []int64{validID}, ExpectedStatus: string(core.StatusReady), }, { Name: "Order with only valid authzs, set processing", - OrderNames: []string{"valid.your.order.is.up"}, + OrderIdents: identifier.ACMEIdentifiers{identifier.NewDNS("valid.your.order.is.up")}, AuthorizationIDs: []int64{validID}, SetProcessing: true, ExpectedStatus: string(core.StatusProcessing), }, { Name: "Order with only valid authzs, set processing and finalized", - OrderNames: []string{"valid.your.order.is.up"}, + OrderIdents: identifier.ACMEIdentifiers{identifier.NewDNS("valid.your.order.is.up")}, AuthorizationIDs: []int64{validID}, SetProcessing: true, Finalize: true, @@ -1537,16 +1741,19 @@ func TestStatusForOrder(t *testing.T) { // If the testcase doesn't specify an order expiry use a default timestamp // in the near future. orderExpiry := tc.OrderExpires - if orderExpiry == 0 { - orderExpiry = expiresNano + if !orderExpiry.IsValid() { + orderExpiry = timestamppb.New(expires) } - newOrder, err := sa.NewOrder(ctx, &sapb.NewOrderRequest{ - RegistrationID: reg.Id, - Expires: orderExpiry, - V2Authorizations: tc.AuthorizationIDs, - Names: tc.OrderNames, + + newOrder, err := sa.NewOrderAndAuthzs(ctx, &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: orderExpiry, + V2Authorizations: tc.AuthorizationIDs, + Identifiers: tc.OrderIdents.ToProtoSlice(), + }, }) - test.AssertNotError(t, err, "NewOrder errored unexpectedly") + test.AssertNotError(t, err, "NewOrderAndAuthzs errored unexpectedly") // If requested, set the order to processing if tc.SetProcessing { _, err := sa.SetOrderProcessing(ctx, &sapb.OrderRequest{Id: newOrder.Id}) @@ -1571,15 +1778,15 @@ func TestStatusForOrder(t *testing.T) { } func TestUpdateChallengesDeleteUnused(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + sa, fc := initSA(t) expires := fc.Now().Add(time.Hour) ctx := context.Background() attemptedAt := fc.Now() // Create a valid authz - authzID := createFinalizedAuthorization(t, sa, "example.com", expires, "valid", attemptedAt) + reg := createWorkingRegistration(t, sa) + authzID := createFinalizedAuthorization(t, sa, reg.Id, identifier.NewDNS("example.com"), expires, "valid", attemptedAt) result, err := sa.GetAuthorization2(ctx, &sapb.AuthorizationID2{Id: authzID}) test.AssertNotError(t, err, "sa.GetAuthorization2 failed") @@ -1596,23 +1803,26 @@ func TestUpdateChallengesDeleteUnused(t *testing.T) { } func TestRevokeCertificate(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + sa, fc := initSA(t) reg := createWorkingRegistration(t, sa) // Add a cert to the DB to test with. - certDER, err := ioutil.ReadFile("www.eff.org.der") - test.AssertNotError(t, err, "Couldn't read example cert DER") + serial, testCert := test.ThrowAwayCert(t, fc) + issuedTime := sa.clk.Now() + _, err := sa.AddSerial(ctx, &sapb.AddSerialRequest{ + RegID: reg.Id, + Serial: core.SerialToString(testCert.SerialNumber), + Created: timestamppb.New(testCert.NotBefore), + Expires: timestamppb.New(testCert.NotAfter), + }) + test.AssertNotError(t, err, "failed to add test serial") _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER, - RegID: reg.Id, - Ocsp: nil, - Issued: sa.clk.Now().UnixNano(), - IssuerID: 1, + Der: testCert.Raw, + RegID: reg.Id, + Issued: timestamppb.New(issuedTime), + IssuerNameID: 1, }) - test.AssertNotError(t, err, "Couldn't add www.eff.org.der") - - serial := "000000000000000000000000000000021bd4" + test.AssertNotError(t, err, "Couldn't add test cert") status, err := sa.GetCertificateStatus(ctx, &sapb.Serial{Serial: serial}) test.AssertNotError(t, err, "GetCertificateStatus failed") @@ -1622,70 +1832,130 @@ func TestRevokeCertificate(t *testing.T) { now := fc.Now() reason := int64(1) - response := []byte{1, 2, 3} + _, err = sa.RevokeCertificate(context.Background(), &sapb.RevokeCertificateRequest{ + IssuerID: 1, Serial: serial, - Date: now.UnixNano(), + Date: timestamppb.New(now), Reason: reason, - Response: response, + ShardIdx: 1, }) - test.AssertNotError(t, err, "RevokeCertificate failed") + test.AssertNotError(t, err, "RevokeCertificate with no OCSP response should succeed") status, err = sa.GetCertificateStatus(ctx, &sapb.Serial{Serial: serial}) test.AssertNotError(t, err, "GetCertificateStatus failed") test.AssertEquals(t, core.OCSPStatus(status.Status), core.OCSPStatusRevoked) test.AssertEquals(t, status.RevokedReason, reason) - test.AssertEquals(t, status.RevokedDate, now.UnixNano()) - test.AssertEquals(t, status.OcspLastUpdated, now.UnixNano()) - test.AssertDeepEquals(t, status.OcspResponse, response) + test.AssertEquals(t, status.RevokedDate.AsTime(), now) + test.AssertEquals(t, status.OcspLastUpdated.AsTime(), now) _, err = sa.RevokeCertificate(context.Background(), &sapb.RevokeCertificateRequest{ + IssuerID: 1, Serial: serial, - Date: now.UnixNano(), + Date: timestamppb.New(now), Reason: reason, - Response: response, }) test.AssertError(t, err, "RevokeCertificate should've failed when certificate already revoked") } -func TestUpdateRevokedCertificate(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() +func TestRevokeCertificateWithShard(t *testing.T) { + sa, fc := initSA(t) // Add a cert to the DB to test with. reg := createWorkingRegistration(t, sa) - certDER, err := ioutil.ReadFile("www.eff.org.der") - serial := "000000000000000000000000000000021bd4" - issuedTime := fc.Now().UnixNano() - test.AssertNotError(t, err, "Couldn't read example cert DER") + eeCert, err := core.LoadCert("../test/hierarchy/ee-e1.cert.pem") + test.AssertNotError(t, err, "failed to load test cert") + _, err = sa.AddSerial(ctx, &sapb.AddSerialRequest{ + RegID: reg.Id, + Serial: core.SerialToString(eeCert.SerialNumber), + Created: timestamppb.New(eeCert.NotBefore), + Expires: timestamppb.New(eeCert.NotAfter), + }) + test.AssertNotError(t, err, "failed to add test serial") _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER, - RegID: reg.Id, - Ocsp: nil, - Issued: issuedTime, + Der: eeCert.Raw, + RegID: reg.Id, + Issued: timestamppb.New(eeCert.NotBefore), + IssuerNameID: 1, + }) + test.AssertNotError(t, err, "failed to add test cert") + + serial := core.SerialToString(eeCert.SerialNumber) + fc.Add(1 * time.Hour) + now := fc.Now() + reason := int64(1) + + _, err = sa.RevokeCertificate(context.Background(), &sapb.RevokeCertificateRequest{ IssuerID: 1, + ShardIdx: 9, + Serial: serial, + Date: timestamppb.New(now), + Reason: reason, + }) + test.AssertNotError(t, err, "RevokeCertificate with no OCSP response should succeed") + + status, err := sa.GetCertificateStatus(ctx, &sapb.Serial{Serial: serial}) + test.AssertNotError(t, err, "GetCertificateStatus failed") + test.AssertEquals(t, core.OCSPStatus(status.Status), core.OCSPStatusRevoked) + test.AssertEquals(t, status.RevokedReason, reason) + test.AssertEquals(t, status.RevokedDate.AsTime(), now) + test.AssertEquals(t, status.OcspLastUpdated.AsTime(), now) + test.AssertEquals(t, status.NotAfter.AsTime(), eeCert.NotAfter) + + var result revokedCertModel + err = sa.dbMap.SelectOne( + ctx, &result, `SELECT * FROM revokedCertificates WHERE serial = ?`, core.SerialToString(eeCert.SerialNumber)) + test.AssertNotError(t, err, "should be exactly one row in revokedCertificates") + test.AssertEquals(t, result.ShardIdx, int64(9)) + test.AssertEquals(t, result.RevokedReason, revocation.KeyCompromise) +} + +func TestUpdateRevokedCertificate(t *testing.T) { + sa, fc := initSA(t) + + // Add a cert to the DB to test with. + reg := createWorkingRegistration(t, sa) + serial, testCert := test.ThrowAwayCert(t, fc) + issuedTime := fc.Now() + _, err := sa.AddSerial(ctx, &sapb.AddSerialRequest{ + RegID: reg.Id, + Serial: core.SerialToString(testCert.SerialNumber), + Created: timestamppb.New(testCert.NotBefore), + Expires: timestamppb.New(testCert.NotAfter), + }) + test.AssertNotError(t, err, "failed to add test serial") + _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, + RegID: reg.Id, + Issued: timestamppb.New(issuedTime), + IssuerNameID: 1, }) - test.AssertNotError(t, err, "Couldn't add www.eff.org.der") + test.AssertNotError(t, err, "Couldn't add test cert") fc.Add(1 * time.Hour) // Try to update it before its been revoked + now := fc.Now() _, err = sa.UpdateRevokedCertificate(context.Background(), &sapb.RevokeCertificateRequest{ + IssuerID: 1, Serial: serial, - Date: fc.Now().UnixNano(), - Backdate: fc.Now().UnixNano(), - Reason: ocsp.KeyCompromise, + Date: timestamppb.New(now), + Backdate: timestamppb.New(now), + Reason: int64(revocation.KeyCompromise), Response: []byte{4, 5, 6}, + ShardIdx: 1, }) test.AssertError(t, err, "UpdateRevokedCertificate should have failed") test.AssertContains(t, err.Error(), "no certificate with serial") // Now revoke it, so we can update it. - revokedTime := fc.Now().UnixNano() + revokedTime := fc.Now() _, err = sa.RevokeCertificate(context.Background(), &sapb.RevokeCertificateRequest{ + IssuerID: 1, Serial: serial, - Date: revokedTime, - Reason: ocsp.CessationOfOperation, + Date: timestamppb.New(revokedTime), + Reason: int64(revocation.CessationOfOperation), Response: []byte{1, 2, 3}, + ShardIdx: 1, }) test.AssertNotError(t, err, "RevokeCertificate failed") @@ -1693,463 +1963,409 @@ func TestUpdateRevokedCertificate(t *testing.T) { status, err := sa.GetCertificateStatus(ctx, &sapb.Serial{Serial: serial}) test.AssertNotError(t, err, "GetCertificateStatus failed") test.AssertEquals(t, core.OCSPStatus(status.Status), core.OCSPStatusRevoked) - test.AssertEquals(t, int(status.RevokedReason), ocsp.CessationOfOperation) + test.AssertEquals(t, revocation.Reason(status.RevokedReason), revocation.CessationOfOperation) fc.Add(1 * time.Hour) // Try to update its revocation info with no backdate + now = fc.Now() _, err = sa.UpdateRevokedCertificate(context.Background(), &sapb.RevokeCertificateRequest{ + IssuerID: 1, Serial: serial, - Date: fc.Now().UnixNano(), - Reason: ocsp.KeyCompromise, + Date: timestamppb.New(now), + Reason: int64(revocation.KeyCompromise), Response: []byte{4, 5, 6}, + ShardIdx: 1, }) test.AssertError(t, err, "UpdateRevokedCertificate should have failed") test.AssertContains(t, err.Error(), "incomplete") // Try to update its revocation info for a reason other than keyCompromise _, err = sa.UpdateRevokedCertificate(context.Background(), &sapb.RevokeCertificateRequest{ + IssuerID: 1, Serial: serial, - Date: fc.Now().UnixNano(), - Backdate: revokedTime, - Reason: ocsp.Unspecified, + Date: timestamppb.New(now), + Backdate: timestamppb.New(revokedTime), + Reason: int64(revocation.Unspecified), Response: []byte{4, 5, 6}, + ShardIdx: 1, }) test.AssertError(t, err, "UpdateRevokedCertificate should have failed") test.AssertContains(t, err.Error(), "cannot update revocation for any reason other than keyCompromise") // Try to update the revocation info of the wrong certificate _, err = sa.UpdateRevokedCertificate(context.Background(), &sapb.RevokeCertificateRequest{ + IssuerID: 1, Serial: "000000000000000000000000000000021bd5", - Date: fc.Now().UnixNano(), - Backdate: revokedTime, - Reason: ocsp.KeyCompromise, + Date: timestamppb.New(now), + Backdate: timestamppb.New(revokedTime), + Reason: int64(revocation.KeyCompromise), Response: []byte{4, 5, 6}, + ShardIdx: 1, }) test.AssertError(t, err, "UpdateRevokedCertificate should have failed") test.AssertContains(t, err.Error(), "no certificate with serial") // Try to update its revocation info with the wrong backdate _, err = sa.UpdateRevokedCertificate(context.Background(), &sapb.RevokeCertificateRequest{ + IssuerID: 1, Serial: serial, - Date: fc.Now().UnixNano(), - Backdate: fc.Now().UnixNano(), - Reason: ocsp.KeyCompromise, + Date: timestamppb.New(now), + Backdate: timestamppb.New(now), + Reason: int64(revocation.KeyCompromise), Response: []byte{4, 5, 6}, + ShardIdx: 1, }) test.AssertError(t, err, "UpdateRevokedCertificate should have failed") test.AssertContains(t, err.Error(), "no certificate with serial") - // Try to update its revocation info correctly + // Try to update its revocation info with the wrong shard _, err = sa.UpdateRevokedCertificate(context.Background(), &sapb.RevokeCertificateRequest{ + IssuerID: 1, + Serial: serial, + Date: timestamppb.New(now), + Backdate: timestamppb.New(revokedTime), + Reason: int64(revocation.KeyCompromise), + Response: []byte{4, 5, 6}, + ShardIdx: 2, + }) + test.AssertError(t, err, "UpdateRevokedCertificate should have failed") + test.AssertContains(t, err.Error(), "mismatched shard index") + + // Try to update its revocation info correctly + _, err = sa.UpdateRevokedCertificate(context.Background(), &sapb.RevokeCertificateRequest{ + IssuerID: 1, Serial: serial, - Date: fc.Now().UnixNano(), - Backdate: revokedTime, - Reason: ocsp.KeyCompromise, + Date: timestamppb.New(now), + Backdate: timestamppb.New(revokedTime), + Reason: int64(revocation.KeyCompromise), Response: []byte{4, 5, 6}, + ShardIdx: 1, }) test.AssertNotError(t, err, "UpdateRevokedCertificate failed") } func TestAddCertificateRenewalBit(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + sa, fc := initSA(t) reg := createWorkingRegistration(t, sa) - // An example cert taken from EFF's website - certDER, err := ioutil.ReadFile("www.eff.org.der") - test.AssertNotError(t, err, "Unexpected error reading www.eff.org.der test file") - cert, err := x509.ParseCertificate(certDER) - test.AssertNotError(t, err, "Unexpected error parsing www.eff.org.der test file") - names := cert.DNSNames - - expires := fc.Now().Add(time.Hour * 2).UTC() - issued := fc.Now() - serial := "thrilla" - - // Add a FQDN set for the names so that it will be considered a renewal - tx, err := sa.dbMap.Begin() - test.AssertNotError(t, err, "Failed to open transaction") - err = addFQDNSet(tx, names, serial, issued, expires) - test.AssertNotError(t, err, "Failed to add name set") - test.AssertNotError(t, tx.Commit(), "Failed to commit transaction") - - // Add the certificate with the same names. - _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER, - Issued: issued.UnixNano(), - RegID: reg.Id, - IssuerID: 1, - }) - test.AssertNotError(t, err, "Failed to add precertificate") - _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER, - RegID: reg.Id, - Issued: issued.UnixNano(), - }) - test.AssertNotError(t, err, "Failed to add certificate") - - assertIsRenewal := func(t *testing.T, name string, expected bool) { + assertIsRenewal := func(t *testing.T, issuedName string, expected bool) { t.Helper() var count int err := sa.dbMap.SelectOne( + ctx, &count, - `SELECT COUNT(1) FROM issuedNames + `SELECT COUNT(*) FROM issuedNames WHERE reversedName = ? AND renewal = ?`, - ReverseName(name), + issuedName, expected, ) test.AssertNotError(t, err, "Unexpected error from SelectOne on issuedNames") test.AssertEquals(t, count, 1) } - // All of the names should have a issuedNames row marking it as a renewal. - for _, name := range names { - assertIsRenewal(t, name, true) - } + // Add a certificate with never-before-seen identifiers. + _, testCert := test.ThrowAwayCert(t, fc) + _, err := sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, + Issued: timestamppb.New(testCert.NotBefore), + RegID: reg.Id, + IssuerNameID: 1, + }) + test.AssertNotError(t, err, "Failed to add precertificate") + _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ + Der: testCert.Raw, + RegID: reg.Id, + Issued: timestamppb.New(testCert.NotBefore), + }) + test.AssertNotError(t, err, "Failed to add certificate") - // Add a certificate with different names. - certDER, err = ioutil.ReadFile("test-cert.der") - test.AssertNotError(t, err, "Unexpected error reading test-cert.der test file") - cert, err = x509.ParseCertificate(certDER) - test.AssertNotError(t, err, "Unexpected error parsing test-cert.der test file") - names = cert.DNSNames + // No identifier should have an issuedNames row marking it as a renewal. + for _, name := range testCert.DNSNames { + assertIsRenewal(t, reverseFQDN(name), false) + } + for _, ip := range testCert.IPAddresses { + assertIsRenewal(t, ip.String(), false) + } + // Make a new cert and add its FQDN set to the db so it will be considered a + // renewal + serial, testCert := test.ThrowAwayCert(t, fc) + err = addFQDNSet(ctx, sa.dbMap, identifier.FromCert(testCert), serial, testCert.NotBefore, testCert.NotAfter) + test.AssertNotError(t, err, "Failed to add identifier set") _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER, - Issued: issued.UnixNano(), - RegID: reg.Id, - IssuerID: 1, + Der: testCert.Raw, + Issued: timestamppb.New(testCert.NotBefore), + RegID: reg.Id, + IssuerNameID: 1, }) test.AssertNotError(t, err, "Failed to add precertificate") _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: certDER, + Der: testCert.Raw, RegID: reg.Id, - Issued: issued.UnixNano(), + Issued: timestamppb.New(testCert.NotBefore), }) test.AssertNotError(t, err, "Failed to add certificate") - // None of the names should have a issuedNames row marking it as a renewal. - for _, name := range names { - assertIsRenewal(t, name, false) + // Each identifier should have an issuedNames row marking it as a renewal. + for _, name := range testCert.DNSNames { + assertIsRenewal(t, reverseFQDN(name), true) + } + for _, ip := range testCert.IPAddresses { + assertIsRenewal(t, ip.String(), true) } } -func TestCountCertificatesRenewalBit(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() +func TestFinalizeAuthorization2(t *testing.T) { + sa, fc := initSA(t) + + fc.Set(mustTime("2021-01-01 00:00")) - // Create a test registration reg := createWorkingRegistration(t, sa) + authzID := createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("aaa"), fc.Now().Add(time.Hour)) + expires := fc.Now().Add(time.Hour * 2).UTC() + attemptedAt := fc.Now() + ip, _ := netip.MustParseAddr("1.1.1.1").MarshalText() - // Create a small throw away key for the test certificates. - testKey, err := rsa.GenerateKey(rand.Reader, 512) - test.AssertNotError(t, err, "error generating test key") - - // Create an initial test certificate for a set of domain names, issued an - // hour ago. - template := &x509.Certificate{ - SerialNumber: big.NewInt(1337), - DNSNames: []string{"www.not-example.com", "not-example.com", "admin.not-example.com"}, - NotBefore: fc.Now().Add(-time.Hour), - BasicConstraintsValid: true, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - } - certADER, err := x509.CreateCertificate(rand.Reader, template, template, testKey.Public(), testKey) - test.AssertNotError(t, err, "Failed to create test cert A") - certA, _ := x509.ParseCertificate(certADER) - - // Update the template with a new serial number and a not before of now and - // create a second test cert for the same names. This will be a renewal. - template.SerialNumber = big.NewInt(7331) - template.NotBefore = fc.Now() - certBDER, err := x509.CreateCertificate(rand.Reader, template, template, testKey.Public(), testKey) - test.AssertNotError(t, err, "Failed to create test cert B") - certB, _ := x509.ParseCertificate(certBDER) - - // Update the template with a third serial number and a partially overlapping - // set of names. This will not be a renewal but will help test the exact name - // counts. - template.SerialNumber = big.NewInt(0xC0FFEE) - template.DNSNames = []string{"www.not-example.com"} - certCDER, err := x509.CreateCertificate(rand.Reader, template, template, testKey.Public(), testKey) - test.AssertNotError(t, err, "Failed to create test cert C") - - countName := func(t *testing.T, expectedName string) int64 { - req := &sapb.CountCertificatesByNamesRequest{ - Names: []string{expectedName}, - Range: &sapb.Range{ - Earliest: fc.Now().Add(-5 * time.Hour).UnixNano(), - Latest: fc.Now().Add(5 * time.Hour).UnixNano(), + _, err := sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ + Id: authzID, + ValidationRecords: []*corepb.ValidationRecord{ + { + Hostname: "example.com", + Port: "80", + Url: "http://example.com", + AddressUsed: ip, + ResolverAddrs: []string{"resolver:5353"}, }, - } - counts, err := sa.CountCertificatesByNames(context.Background(), req) - test.AssertNotError(t, err, "Unexpected err from CountCertificatesByNames") - for name, count := range counts.Counts { - if name == expectedName { - return count - } - } - return 0 - } - - // Add the first certificate - it won't be considered a renewal. - issued := certA.NotBefore - _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: certADER, - RegID: reg.Id, - Issued: issued.UnixNano(), + }, + Status: string(core.StatusValid), + Expires: timestamppb.New(expires), + Attempted: string(core.ChallengeTypeHTTP01), + AttemptedAt: timestamppb.New(attemptedAt), }) - test.AssertNotError(t, err, "Failed to add CertA test certificate") - - // The count for the base domain should be 1 - just certA has been added. - test.AssertEquals(t, countName(t, "not-example.com"), int64(1)) + test.AssertNotError(t, err, "sa.FinalizeAuthorization2 failed") - // Add the second certificate - it should be considered a renewal - issued = certB.NotBefore - _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: certBDER, - RegID: reg.Id, - Issued: issued.UnixNano(), - }) - test.AssertNotError(t, err, "Failed to add CertB test certificate") + dbVer, err := sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: authzID}) + test.AssertNotError(t, err, "sa.GetAuthorization2 failed") + test.AssertEquals(t, dbVer.Status, string(core.StatusValid)) + test.AssertEquals(t, dbVer.Expires.AsTime(), expires) + test.AssertEquals(t, dbVer.Challenges[0].Status, string(core.StatusValid)) + test.AssertEquals(t, len(dbVer.Challenges[0].Validationrecords), 1) + test.AssertEquals(t, dbVer.Challenges[0].Validationrecords[0].Hostname, "example.com") + test.AssertEquals(t, dbVer.Challenges[0].Validationrecords[0].Port, "80") + test.AssertEquals(t, dbVer.Challenges[0].Validationrecords[0].ResolverAddrs[0], "resolver:5353") + test.AssertEquals(t, dbVer.Challenges[0].Validated.AsTime(), attemptedAt) - // The count for the base domain should still be 1, just certA. CertB should - // be ignored. - test.AssertEquals(t, countName(t, "not-example.com"), int64(1)) + authzID = createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("aaa"), fc.Now().Add(time.Hour)) + prob := bgrpc.ProblemDetailsToPB(probs.Connection("it went bad captain")) - // Add the third certificate - it should not be considered a renewal - _, err = sa.AddCertificate(ctx, &sapb.AddCertificateRequest{ - Der: certCDER, - RegID: reg.Id, - Issued: issued.UnixNano(), + _, err = sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ + Id: authzID, + ValidationRecords: []*corepb.ValidationRecord{ + { + Hostname: "example.com", + Port: "80", + Url: "http://example.com", + AddressUsed: ip, + ResolverAddrs: []string{"resolver:5353"}, + }, + }, + ValidationError: prob, + Status: string(core.StatusInvalid), + Attempted: string(core.ChallengeTypeHTTP01), + Expires: timestamppb.New(expires), }) - test.AssertNotError(t, err, "Failed to add CertC test certificate") + test.AssertNotError(t, err, "sa.FinalizeAuthorization2 failed") - // The count for the base domain should be 2 now: certA and certC. - // CertB should be ignored. - test.AssertEquals(t, countName(t, "not-example.com"), int64(2)) + dbVer, err = sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: authzID}) + test.AssertNotError(t, err, "sa.GetAuthorization2 failed") + test.AssertEquals(t, dbVer.Status, string(core.StatusInvalid)) + test.AssertEquals(t, dbVer.Challenges[0].Status, string(core.StatusInvalid)) + test.AssertEquals(t, len(dbVer.Challenges[0].Validationrecords), 1) + test.AssertEquals(t, dbVer.Challenges[0].Validationrecords[0].Hostname, "example.com") + test.AssertEquals(t, dbVer.Challenges[0].Validationrecords[0].Port, "80") + test.AssertEquals(t, dbVer.Challenges[0].Validationrecords[0].ResolverAddrs[0], "resolver:5353") + test.AssertDeepEquals(t, dbVer.Challenges[0].Error, prob) } -func TestNewAuthorizations2(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() +func TestFinalizeAuthorization2_Race(t *testing.T) { + // Attempting to finalize the same authorization twice, e.g. because two + // requests to validate one of its challenges arrived in rapid succession and + // both succeeded, should result in a NotFound error for the second attempt. + sa, fc := initSA(t) + fc.Set(mustTime("2021-01-01 00:00")) reg := createWorkingRegistration(t, sa) - expires := fc.Now().Add(time.Hour).UTC().UnixNano() - apbA := &corepb.Authorization{ - Identifier: "aaa", - RegistrationID: reg.Id, - Status: string(core.StatusPending), - Expires: expires, - Challenges: []*corepb.Challenge{ + authzID := createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("aaa"), fc.Now().Add(time.Hour)) + + _, err := sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ + Id: authzID, + ValidationRecords: []*corepb.ValidationRecord{ { - Status: string(core.StatusPending), - Type: string(core.ChallengeTypeDNS01), - Token: "YXNkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + Hostname: "example.com", + Port: "80", + Url: "http://example.com", + AddressUsed: []byte("1.1.1.1"), + ResolverAddrs: []string{"resolver:5353"}, }, }, + Status: string(core.StatusValid), + Expires: timestamppb.New(fc.Now().Add(time.Hour * 24)), + Attempted: string(core.ChallengeTypeHTTP01), + AttemptedAt: timestamppb.New(fc.Now()), + }) + if err != nil { + t.Fatalf("FinalizeAuthorization2() = %#v, but want success", err) } - apbB := &corepb.Authorization{ - Identifier: "aaa", - RegistrationID: reg.Id, - Status: string(core.StatusPending), - Expires: expires, - Challenges: []*corepb.Challenge{ + + _, err = sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ + Id: authzID, + ValidationRecords: []*corepb.ValidationRecord{ { - Status: string(core.StatusPending), - Type: string(core.ChallengeTypeDNS01), - Token: "ZmdoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + Hostname: "example.com", + Port: "80", + Url: "http://example.com", + AddressUsed: []byte("2.2.2.2"), + ResolverAddrs: []string{"resolver:5354"}, }, }, - } - req := &sapb.AddPendingAuthorizationsRequest{Authz: []*corepb.Authorization{apbA, apbB}} - ids, err := sa.NewAuthorizations2(context.Background(), req) - test.AssertNotError(t, err, "sa.NewAuthorizations failed") - test.AssertEquals(t, len(ids.Ids), 2) - for i, id := range ids.Ids { - dbVer, err := sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: id}) - test.AssertNotError(t, err, "sa.GetAuthorization2 failed") - - // Everything but ID should match. - req.Authz[i].Id = dbVer.Id - test.AssertDeepEquals(t, req.Authz[i], dbVer) + Status: string(core.StatusInvalid), + Expires: timestamppb.New(fc.Now().Add(time.Hour * 2)), + Attempted: string(core.ChallengeTypeTLSALPN01), + AttemptedAt: timestamppb.New(fc.Now()), + }) + if !errors.Is(err, berrors.NotFound) { + t.Fatalf("FinalizeAuthorization2(repeat ID) = %s, but want NotFound error", err) } } -func TestNewAuthorizations2_100(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() - - reg := createWorkingRegistration(t, sa) - expires := fc.Now().Add(time.Hour).UnixNano() - - allAuthz := make([]*corepb.Authorization, 100) - for i := 0; i < 100; i++ { - allAuthz[i] = &corepb.Authorization{ - Identifier: fmt.Sprintf("%08x", i), - RegistrationID: reg.Id, - Status: string(core.StatusPending), - Expires: expires, - Challenges: []*corepb.Challenge{ - { - Status: string(core.StatusPending), - Type: string(core.ChallengeTypeDNS01), - Token: core.NewToken(), - }, - }, - } - } - - req := &sapb.AddPendingAuthorizationsRequest{Authz: allAuthz} - ids, err := sa.NewAuthorizations2(context.Background(), req) - test.AssertNotError(t, err, "sa.NewAuthorizations failed") - test.AssertEquals(t, len(ids.Ids), 100) - for i, id := range ids.Ids { - dbVer, err := sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: id}) - test.AssertNotError(t, err, "sa.GetAuthorization2 failed") - // Everything but the ID should match. - req.Authz[i].Id = dbVer.Id - test.AssertDeepEquals(t, req.Authz[i], dbVer) - } -} +func TestRehydrateHostPort(t *testing.T) { + sa, fc := initSA(t) -func TestFinalizeAuthorization2(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + fc.Set(mustTime("2021-01-01 00:00")) reg := createWorkingRegistration(t, sa) + expires := fc.Now().Add(time.Hour * 2).UTC() + attemptedAt := fc.Now() + ip, _ := netip.MustParseAddr("1.1.1.1").MarshalText() - expires := fc.Now().Add(time.Hour).UTC().UnixNano() - apb := &corepb.Authorization{ - Identifier: "aaa", - RegistrationID: reg.Id, - Status: string(core.StatusPending), - Expires: expires, - Challenges: []*corepb.Challenge{ + // Implicit good port with good scheme + authzID := createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("aaa"), fc.Now().Add(time.Hour)) + _, err := sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ + Id: authzID, + ValidationRecords: []*corepb.ValidationRecord{ { - Status: string(core.StatusPending), - Type: string(core.ChallengeTypeDNS01), - Token: "YXNkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + Hostname: "example.com", + Port: "80", + Url: "http://example.com", + AddressUsed: ip, }, }, - } - ids, err := sa.NewAuthorizations2(context.Background(), &sapb.AddPendingAuthorizationsRequest{Authz: []*corepb.Authorization{apb}}) - test.AssertNotError(t, err, "sa.NewAuthorization failed") - - fc.Set(time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC)) - expires = fc.Now().Add(time.Hour * 2).UTC().UnixNano() - attemptedAt := fc.Now().UnixNano() + Status: string(core.StatusValid), + Expires: timestamppb.New(expires), + Attempted: string(core.ChallengeTypeHTTP01), + AttemptedAt: timestamppb.New(attemptedAt), + }) + test.AssertNotError(t, err, "sa.FinalizeAuthorization2 failed") + _, err = sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: authzID}) + test.AssertNotError(t, err, "rehydration failed in some fun and interesting way") - ip, _ := net.ParseIP("1.1.1.1").MarshalText() + // Explicit good port with good scheme + authzID = createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("aaa"), fc.Now().Add(time.Hour)) _, err = sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ - Id: ids.Ids[0], + Id: authzID, ValidationRecords: []*corepb.ValidationRecord{ { - Hostname: "aaa", - Port: "123", - Url: "http://asd", + Hostname: "example.com", + Port: "80", + Url: "http://example.com:80", AddressUsed: ip, }, }, Status: string(core.StatusValid), - Expires: expires, - Attempted: string(core.ChallengeTypeDNS01), - AttemptedAt: attemptedAt, + Expires: timestamppb.New(expires), + Attempted: string(core.ChallengeTypeHTTP01), + AttemptedAt: timestamppb.New(attemptedAt), }) test.AssertNotError(t, err, "sa.FinalizeAuthorization2 failed") + _, err = sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: authzID}) + test.AssertNotError(t, err, "rehydration failed in some fun and interesting way") - dbVer, err := sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: ids.Ids[0]}) - test.AssertNotError(t, err, "sa.GetAuthorization2 failed") - test.AssertEquals(t, dbVer.Status, string(core.StatusValid)) - test.AssertEquals(t, time.Unix(0, dbVer.Expires).UTC(), fc.Now().Add(time.Hour*2).UTC()) - test.AssertEquals(t, dbVer.Challenges[0].Status, string(core.StatusValid)) - test.AssertEquals(t, len(dbVer.Challenges[0].Validationrecords), 1) - test.AssertEquals(t, time.Unix(0, dbVer.Challenges[0].Validated).UTC(), fc.Now().UTC()) - - apb2 := &corepb.Authorization{ - Identifier: "aaa", - RegistrationID: reg.Id, - Status: string(core.StatusPending), - Expires: expires, - Challenges: []*corepb.Challenge{ + // Explicit bad port with good scheme + authzID = createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("aaa"), fc.Now().Add(time.Hour)) + _, err = sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ + Id: authzID, + ValidationRecords: []*corepb.ValidationRecord{ { - Status: string(core.StatusPending), - Type: string(core.ChallengeTypeDNS01), - Token: "ZmdoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + Hostname: "example.com", + Port: "444", + Url: "http://example.com:444", + AddressUsed: ip, }, }, - } - ids, err = sa.NewAuthorizations2(context.Background(), &sapb.AddPendingAuthorizationsRequest{Authz: []*corepb.Authorization{apb2}}) - test.AssertNotError(t, err, "sa.NewAuthorization failed") - prob, _ := bgrpc.ProblemDetailsToPB(probs.ConnectionFailure("it went bad captain")) + Status: string(core.StatusValid), + Expires: timestamppb.New(expires), + Attempted: string(core.ChallengeTypeHTTP01), + AttemptedAt: timestamppb.New(attemptedAt), + }) + test.AssertNotError(t, err, "sa.FinalizeAuthorization2 failed") + _, err = sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: authzID}) + test.AssertError(t, err, "only ports 80/tcp and 443/tcp are allowed in URL \"http://example.com:444\"") + + // Explicit bad port with bad scheme + authzID = createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("aaa"), fc.Now().Add(time.Hour)) _, err = sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ - Id: ids.Ids[0], + Id: authzID, ValidationRecords: []*corepb.ValidationRecord{ { - Hostname: "aaa", - Port: "123", - Url: "http://asd", + Hostname: "example.com", + Port: "80", + Url: "httpx://example.com", AddressUsed: ip, }, }, - ValidationError: prob, - Status: string(core.StatusInvalid), - Attempted: string(core.ChallengeTypeDNS01), - Expires: expires, + Status: string(core.StatusValid), + Expires: timestamppb.New(expires), + Attempted: string(core.ChallengeTypeHTTP01), + AttemptedAt: timestamppb.New(attemptedAt), }) test.AssertNotError(t, err, "sa.FinalizeAuthorization2 failed") + _, err = sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: authzID}) + test.AssertError(t, err, "unknown scheme \"httpx\" in URL \"httpx://example.com\"") - dbVer, err = sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: ids.Ids[0]}) - test.AssertNotError(t, err, "sa.GetAuthorization2 failed") - test.AssertEquals(t, dbVer.Status, string(core.StatusInvalid)) - test.AssertEquals(t, dbVer.Challenges[0].Status, string(core.StatusInvalid)) - test.AssertEquals(t, len(dbVer.Challenges[0].Validationrecords), 1) - test.AssertDeepEquals(t, dbVer.Challenges[0].Error, prob) -} - -func TestGetPendingAuthorization2(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() - - domain := "example.com" - expiresA := fc.Now().Add(time.Hour).UTC() - expiresB := fc.Now().Add(time.Hour * 3).UTC() - authzIDA := createPendingAuthorization(t, sa, domain, expiresA) - authzIDB := createPendingAuthorization(t, sa, domain, expiresB) - - regID := int64(1) - validUntil := fc.Now().Add(time.Hour * 2).UTC().UnixNano() - dbVer, err := sa.GetPendingAuthorization2(context.Background(), &sapb.GetPendingAuthorizationRequest{ - RegistrationID: regID, - IdentifierValue: domain, - ValidUntil: validUntil, - }) - test.AssertNotError(t, err, "sa.GetPendingAuthorization2 failed") - test.AssertEquals(t, fmt.Sprintf("%d", authzIDB), dbVer.Id) - - validUntil = fc.Now().UTC().UnixNano() - dbVer, err = sa.GetPendingAuthorization2(context.Background(), &sapb.GetPendingAuthorizationRequest{ - RegistrationID: regID, - IdentifierValue: domain, - ValidUntil: validUntil, + // Missing URL field + authzID = createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("aaa"), fc.Now().Add(time.Hour)) + _, err = sa.FinalizeAuthorization2(context.Background(), &sapb.FinalizeAuthorizationRequest{ + Id: authzID, + ValidationRecords: []*corepb.ValidationRecord{ + { + Hostname: "example.com", + Port: "80", + AddressUsed: ip, + }, + }, + Status: string(core.StatusValid), + Expires: timestamppb.New(expires), + Attempted: string(core.ChallengeTypeHTTP01), + AttemptedAt: timestamppb.New(attemptedAt), }) - test.AssertNotError(t, err, "sa.GetPendingAuthorization2 failed") - test.AssertEquals(t, fmt.Sprintf("%d", authzIDA), dbVer.Id) + test.AssertNotError(t, err, "sa.FinalizeAuthorization2 failed") + _, err = sa.GetAuthorization2(context.Background(), &sapb.AuthorizationID2{Id: authzID}) + test.AssertError(t, err, "URL field cannot be empty") } func TestCountPendingAuthorizations2(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + sa, fc := initSA(t) + reg := createWorkingRegistration(t, sa) expiresA := fc.Now().Add(time.Hour).UTC() expiresB := fc.Now().Add(time.Hour * 3).UTC() - _ = createPendingAuthorization(t, sa, "example.com", expiresA) - _ = createPendingAuthorization(t, sa, "example.com", expiresB) + _ = createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("example.com"), expiresA) + _ = createPendingAuthorization(t, sa, reg.Id, identifier.NewDNS("example.com"), expiresB) // Registration has two new style pending authorizations - regID := int64(1) + regID := reg.Id count, err := sa.CountPendingAuthorizations2(context.Background(), &sapb.RegistrationID{ Id: regID, }) @@ -2165,7 +2381,7 @@ func TestCountPendingAuthorizations2(t *testing.T) { test.AssertEquals(t, count.Count, int64(1)) // Registration with no authorizations should be 0 - noReg := int64(20) + noReg := reg.Id + 100 count, err = sa.CountPendingAuthorizations2(context.Background(), &sapb.RegistrationID{ Id: noReg, }) @@ -2175,8 +2391,8 @@ func TestCountPendingAuthorizations2(t *testing.T) { func TestAuthzModelMapToPB(t *testing.T) { baseExpires := time.Now() - input := map[string]authzModel{ - "example.com": { + input := map[identifier.ACMEIdentifier]authzModel{ + identifier.NewDNS("example.com"): { ID: 123, IdentifierType: 0, IdentifierValue: "example.com", @@ -2185,7 +2401,7 @@ func TestAuthzModelMapToPB(t *testing.T) { Expires: baseExpires, Challenges: 4, }, - "www.example.com": { + identifier.NewDNS("www.example.com"): { ID: 124, IdentifierType: 0, IdentifierValue: "www.example.com", @@ -2194,7 +2410,7 @@ func TestAuthzModelMapToPB(t *testing.T) { Expires: baseExpires, Challenges: 1, }, - "other.example.net": { + identifier.NewDNS("other.example.net"): { ID: 125, IdentifierType: 0, IdentifierValue: "other.example.net", @@ -2203,6 +2419,15 @@ func TestAuthzModelMapToPB(t *testing.T) { Expires: baseExpires, Challenges: 3, }, + identifier.NewIP(netip.MustParseAddr("10.10.10.10")): { + ID: 126, + IdentifierType: 1, + IdentifierValue: "10.10.10.10", + RegistrationID: 77, + Status: 1, + Expires: baseExpires, + Challenges: 5, + }, } out, err := authzModelMapToPB(input) @@ -2210,35 +2435,35 @@ func TestAuthzModelMapToPB(t *testing.T) { t.Fatal(err) } - for _, el := range out.Authz { - model, ok := input[el.Domain] + for _, authzPB := range out.Authzs { + model, ok := input[identifier.FromProto(authzPB.Identifier)] if !ok { - t.Errorf("output had element for %q, a hostname not present in input", el.Domain) + t.Errorf("output had element for %q, an identifier not present in input", authzPB.Identifier.Value) } - authzPB := el.Authz test.AssertEquals(t, authzPB.Id, fmt.Sprintf("%d", model.ID)) - test.AssertEquals(t, authzPB.Identifier, model.IdentifierValue) + test.AssertEquals(t, authzPB.Identifier.Type, string(uintToIdentifierType[model.IdentifierType])) + test.AssertEquals(t, authzPB.Identifier.Value, model.IdentifierValue) test.AssertEquals(t, authzPB.RegistrationID, model.RegistrationID) test.AssertEquals(t, authzPB.Status, string(uintToStatus[model.Status])) - gotTime := time.Unix(0, authzPB.Expires).UTC() + gotTime := authzPB.Expires.AsTime() if !model.Expires.Equal(gotTime) { - t.Errorf("Times didn't match. Got %s, expected %s (%d)", gotTime, model.Expires, authzPB.Expires) + t.Errorf("Times didn't match. Got %s, expected %s (%s)", gotTime, model.Expires, authzPB.Expires.AsTime()) } - if len(el.Authz.Challenges) != bits.OnesCount(uint(model.Challenges)) { - t.Errorf("wrong number of challenges for %q: got %d, expected %d", el.Domain, - len(el.Authz.Challenges), bits.OnesCount(uint(model.Challenges))) + if len(authzPB.Challenges) != bits.OnesCount(uint(model.Challenges)) { + t.Errorf("wrong number of challenges for %q: got %d, expected %d", authzPB.Identifier.Value, + len(authzPB.Challenges), bits.OnesCount(uint(model.Challenges))) } switch model.Challenges { case 1: - test.AssertEquals(t, el.Authz.Challenges[0].Type, "http-01") + test.AssertEquals(t, authzPB.Challenges[0].Type, "http-01") case 3: - test.AssertEquals(t, el.Authz.Challenges[0].Type, "http-01") - test.AssertEquals(t, el.Authz.Challenges[1].Type, "dns-01") + test.AssertEquals(t, authzPB.Challenges[0].Type, "http-01") + test.AssertEquals(t, authzPB.Challenges[1].Type, "dns-01") case 4: - test.AssertEquals(t, el.Authz.Challenges[0].Type, "tls-alpn-01") + test.AssertEquals(t, authzPB.Challenges[0].Type, "tls-alpn-01") } - delete(input, el.Domain) + delete(input, identifier.FromProto(authzPB.Identifier)) } for k := range input { @@ -2247,136 +2472,277 @@ func TestAuthzModelMapToPB(t *testing.T) { } func TestGetValidOrderAuthorizations2(t *testing.T) { - sa, fc, cleanup := initSA(t) - defer cleanup() + sa, fc := initSA(t) - // Create two new valid authorizations + // Create three new valid authorizations reg := createWorkingRegistration(t, sa) - identA := "a.example.com" - identB := "b.example.com" + identA := identifier.NewDNS("a.example.com") + identB := identifier.NewDNS("b.example.com") + identC := identifier.NewIP(netip.MustParseAddr("3fff:aaa:aaaa:aaaa:abad:0ff1:cec0:ffee")) expires := fc.Now().Add(time.Hour * 24 * 7).UTC() attemptedAt := fc.Now() - authzIDA := createFinalizedAuthorization(t, sa, identA, expires, "valid", attemptedAt) - authzIDB := createFinalizedAuthorization(t, sa, identB, expires, "valid", attemptedAt) + authzIDA := createFinalizedAuthorization(t, sa, reg.Id, identA, expires, "valid", attemptedAt) + authzIDB := createFinalizedAuthorization(t, sa, reg.Id, identB, expires, "valid", attemptedAt) + authzIDC := createFinalizedAuthorization(t, sa, reg.Id, identC, expires, "valid", attemptedAt) - order, err := sa.NewOrder(context.Background(), &sapb.NewOrderRequest{ - RegistrationID: reg.Id, - Expires: fc.Now().Truncate(time.Second).UnixNano(), - Names: []string{"a.example.com", "b.example.com"}, - V2Authorizations: []int64{authzIDA, authzIDB}, + orderExpr := fc.Now().Truncate(time.Second) + order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(orderExpr), + Identifiers: []*corepb.Identifier{ + identifier.NewDNS("a.example.com").ToProto(), + identifier.NewDNS("b.example.com").ToProto(), + identifier.NewIP(netip.MustParseAddr("3fff:aaa:aaaa:aaaa:abad:0ff1:cec0:ffee")).ToProto(), + }, + V2Authorizations: []int64{authzIDA, authzIDB, authzIDC}, + }, }) test.AssertNotError(t, err, "AddOrder failed") - authzMap, err := sa.GetValidOrderAuthorizations2( + authzPBs, err := sa.GetValidOrderAuthorizations2( context.Background(), &sapb.GetValidOrderAuthorizationsRequest{ Id: order.Id, AcctID: reg.Id, }) test.AssertNotError(t, err, "sa.GetValidOrderAuthorizations failed") - test.AssertNotNil(t, authzMap, "sa.GetValidOrderAuthorizations result was nil") - test.AssertEquals(t, len(authzMap.Authz), 2) + test.AssertNotNil(t, authzPBs, "sa.GetValidOrderAuthorizations result was nil") + test.AssertEquals(t, len(authzPBs.Authzs), 3) - namesToCheck := map[string]int64{"a.example.com": authzIDA, "b.example.com": authzIDB} - for _, a := range authzMap.Authz { - if fmt.Sprintf("%d", namesToCheck[a.Authz.Identifier]) != a.Authz.Id { - t.Fatalf("incorrect identifier %q with id %s", a.Authz.Identifier, a.Authz.Id) + identsToCheck := map[identifier.ACMEIdentifier]int64{ + identifier.NewDNS("a.example.com"): authzIDA, + identifier.NewDNS("b.example.com"): authzIDB, + identifier.NewIP(netip.MustParseAddr("3fff:aaa:aaaa:aaaa:abad:0ff1:cec0:ffee")): authzIDC, + } + for _, a := range authzPBs.Authzs { + ident := identifier.ACMEIdentifier{Type: identifier.IdentifierType(a.Identifier.Type), Value: a.Identifier.Value} + if fmt.Sprintf("%d", identsToCheck[ident]) != a.Id { + t.Fatalf("incorrect identifier %q with id %s", a.Identifier.Value, a.Id) } - test.AssertEquals(t, a.Authz.Expires, expires.UnixNano()) - delete(namesToCheck, a.Authz.Identifier) + test.AssertEquals(t, a.Expires.AsTime(), expires) + delete(identsToCheck, ident) } // Getting the order authorizations for an order that doesn't exist should return nothing missingID := int64(0xC0FFEEEEEEE) - authzMap, err = sa.GetValidOrderAuthorizations2( + authzPBs, err = sa.GetValidOrderAuthorizations2( context.Background(), &sapb.GetValidOrderAuthorizationsRequest{ Id: missingID, AcctID: reg.Id, }) test.AssertNotError(t, err, "sa.GetValidOrderAuthorizations failed") - test.AssertEquals(t, len(authzMap.Authz), 0) - - // Getting the order authorizations for an order that does exist, but for the - // wrong acct ID should return nothing - wrongAcctID := int64(0xDEADDA7ABA5E) - authzMap, err = sa.GetValidOrderAuthorizations2( - context.Background(), - &sapb.GetValidOrderAuthorizationsRequest{ - Id: order.Id, - AcctID: wrongAcctID, - }) - test.AssertNotError(t, err, "sa.GetValidOrderAuthorizations failed") - test.AssertEquals(t, len(authzMap.Authz), 0) + test.AssertEquals(t, len(authzPBs.Authzs), 0) } func TestCountInvalidAuthorizations2(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + sa, fc := initSA(t) - // Create two authorizations, one pending, one invalid fc.Add(time.Hour) reg := createWorkingRegistration(t, sa) - ident := "aaa" - expiresA := fc.Now().Add(time.Hour).UTC() - expiresB := fc.Now().Add(time.Hour * 3).UTC() - attemptedAt := fc.Now() - _ = createFinalizedAuthorization(t, sa, ident, expiresA, "invalid", attemptedAt) - _ = createPendingAuthorization(t, sa, ident, expiresB) - - earliest, latest := fc.Now().Add(-time.Hour).UTC().UnixNano(), fc.Now().Add(time.Hour*5).UTC().UnixNano() - count, err := sa.CountInvalidAuthorizations2(context.Background(), &sapb.CountInvalidAuthorizationsRequest{ - RegistrationID: reg.Id, - Hostname: ident, - Range: &sapb.Range{ - Earliest: earliest, - Latest: latest, - }, - }) - test.AssertNotError(t, err, "sa.CountInvalidAuthorizations2 failed") - test.AssertEquals(t, count.Count, int64(1)) + idents := identifier.ACMEIdentifiers{ + identifier.NewDNS("aaa"), + identifier.NewIP(netip.MustParseAddr("10.10.10.10")), + } + for _, ident := range idents { + // Create two authorizations, one pending, one invalid + expiresA := fc.Now().Add(time.Hour).UTC() + expiresB := fc.Now().Add(time.Hour * 3).UTC() + attemptedAt := fc.Now() + _ = createFinalizedAuthorization(t, sa, reg.Id, ident, expiresA, "invalid", attemptedAt) + _ = createPendingAuthorization(t, sa, reg.Id, ident, expiresB) + + earliest := fc.Now().Add(-time.Hour).UTC() + latest := fc.Now().Add(time.Hour * 5).UTC() + count, err := sa.CountInvalidAuthorizations2(context.Background(), &sapb.CountInvalidAuthorizationsRequest{ + RegistrationID: reg.Id, + Identifier: ident.ToProto(), + Range: &sapb.Range{ + Earliest: timestamppb.New(earliest), + Latest: timestamppb.New(latest), + }, + }) + test.AssertNotError(t, err, "sa.CountInvalidAuthorizations2 failed") + test.AssertEquals(t, count.Count, int64(1)) + } } func TestGetValidAuthorizations2(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + sa, fc := initSA(t) + + var aaa int64 + { + tokenStr := core.NewToken() + token, err := base64.RawURLEncoding.DecodeString(tokenStr) + test.AssertNotError(t, err, "computing test authorization challenge token") + + profile := "test" + attempted := challTypeToUint[string(core.ChallengeTypeHTTP01)] + attemptedAt := fc.Now() + vr, _ := json.Marshal([]core.ValidationRecord{}) + + am := authzModel{ + IdentifierType: identifierTypeToUint[string(identifier.TypeDNS)], + IdentifierValue: "aaa", + RegistrationID: 1, + CertificateProfileName: &profile, + Status: statusToUint[core.StatusValid], + Expires: fc.Now().Add(24 * time.Hour), + Challenges: 1 << challTypeToUint[string(core.ChallengeTypeHTTP01)], + Attempted: &attempted, + AttemptedAt: &attemptedAt, + Token: token, + ValidationError: nil, + ValidationRecord: vr, + } - // Create a valid authorization - ident := "aaa" - expires := fc.Now().Add(time.Hour).UTC() - attemptedAt := fc.Now() - authzID := createFinalizedAuthorization(t, sa, ident, expires, "valid", attemptedAt) + err = sa.dbMap.Insert(context.Background(), &am) + test.AssertNotError(t, err, "failed to insert valid authz") + + aaa = am.ID + } + + var dac int64 + { + tokenStr := core.NewToken() + token, err := base64.RawURLEncoding.DecodeString(tokenStr) + test.AssertNotError(t, err, "computing test authorization challenge token") + + profile := "test" + attempted := challTypeToUint[string(core.ChallengeTypeDNSAccount01)] + attemptedAt := fc.Now() + vr, _ := json.Marshal([]core.ValidationRecord{}) + + am := authzModel{ + IdentifierType: identifierTypeToUint[string(identifier.TypeDNS)], + IdentifierValue: "aaa", + RegistrationID: 3, + CertificateProfileName: &profile, + Status: statusToUint[core.StatusValid], + Expires: fc.Now().Add(24 * time.Hour), + Challenges: 1 << challTypeToUint[string(core.ChallengeTypeDNSAccount01)], + Attempted: &attempted, + AttemptedAt: &attemptedAt, + Token: token, + ValidationError: nil, + ValidationRecord: vr, + } + err = sa.dbMap.Insert(context.Background(), &am) + test.AssertNotError(t, err, "failed to insert valid authz with dns-account-01") + dac = am.ID + } - now := fc.Now().UTC().UnixNano() - regID := int64(1) - authzs, err := sa.GetValidAuthorizations2(context.Background(), &sapb.GetValidAuthorizationsRequest{ - Domains: []string{ - "aaa", - "bbb", + for _, tc := range []struct { + name string + regID int64 + identifiers []*corepb.Identifier + profile string + validUntil time.Time + wantIDs []int64 + }{ + { + name: "happy path, DNS identifier", + regID: 1, + identifiers: []*corepb.Identifier{identifier.NewDNS("aaa").ToProto()}, + profile: "test", + validUntil: fc.Now().Add(time.Hour), + wantIDs: []int64{aaa}, }, - RegistrationID: regID, - Now: now, - }) - test.AssertNotError(t, err, "sa.GetValidAuthorizations2 failed") - test.AssertEquals(t, len(authzs.Authz), 1) - test.AssertEquals(t, authzs.Authz[0].Domain, ident) - test.AssertEquals(t, authzs.Authz[0].Authz.Id, fmt.Sprintf("%d", authzID)) + { + name: "happy path, dns-account-01 challenge", + regID: 3, + identifiers: []*corepb.Identifier{identifier.NewDNS("aaa").ToProto()}, + profile: "test", + validUntil: fc.Now().Add(time.Hour), + wantIDs: []int64{dac}, + }, + { + name: "different identifier type", + regID: 1, + identifiers: []*corepb.Identifier{identifier.NewIP(netip.MustParseAddr("10.10.10.10")).ToProto()}, + profile: "test", + validUntil: fc.Now().Add(time.Hour), + wantIDs: []int64{}, + }, + { + name: "different regID", + regID: 2, + identifiers: []*corepb.Identifier{identifier.NewDNS("aaa").ToProto()}, + profile: "test", + validUntil: fc.Now().Add(time.Hour), + wantIDs: []int64{}, + }, + { + name: "different DNS identifier", + regID: 1, + identifiers: []*corepb.Identifier{identifier.NewDNS("bbb").ToProto()}, + profile: "test", + validUntil: fc.Now().Add(time.Hour), + wantIDs: []int64{}, + }, + { + name: "different profile", + regID: 1, + identifiers: []*corepb.Identifier{identifier.NewDNS("aaa").ToProto()}, + profile: "other", + validUntil: fc.Now().Add(time.Hour), + wantIDs: []int64{}, + }, + { + name: "too-far-out validUntil", + regID: 2, + identifiers: []*corepb.Identifier{identifier.NewDNS("aaa").ToProto()}, + profile: "test", + validUntil: fc.Now().Add(25 * time.Hour), + wantIDs: []int64{}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + got, err := sa.GetValidAuthorizations2(context.Background(), &sapb.GetValidAuthorizationsRequest{ + RegistrationID: tc.regID, + Identifiers: tc.identifiers, + Profile: tc.profile, + ValidUntil: timestamppb.New(tc.validUntil), + }) + if err != nil { + t.Fatalf("GetValidAuthorizations2 got error %q, want success", err) + } + + var gotIDs []int64 + for _, authz := range got.Authzs { + id, err := strconv.Atoi(authz.Id) + if err != nil { + t.Fatalf("parsing authz id: %s", err) + } + gotIDs = append(gotIDs, int64(id)) + } + + slices.Sort(gotIDs) + slices.Sort(tc.wantIDs) + if !slices.Equal(gotIDs, tc.wantIDs) { + t.Errorf("GetValidAuthorizations2() = %+v, want %+v", gotIDs, tc.wantIDs) + } + }) + } } func TestGetOrderExpired(t *testing.T) { - sa, fc, cleanUp := initSA(t) - defer cleanUp() + sa, fc := initSA(t) fc.Add(time.Hour * 5) + now := fc.Now() reg := createWorkingRegistration(t, sa) - order, err := sa.NewOrder(context.Background(), &sapb.NewOrderRequest{ - RegistrationID: reg.Id, - Expires: fc.Now().Add(-time.Hour).UnixNano(), - Names: []string{"example.com"}, - V2Authorizations: []int64{666}, + order, err := sa.NewOrderAndAuthzs(context.Background(), &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(now.Add(-time.Hour)), + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, + V2Authorizations: []int64{666}, + }, }) - test.AssertNotError(t, err, "NewOrder failed") + test.AssertNotError(t, err, "NewOrderAndAuthzs failed") _, err = sa.GetOrder(context.Background(), &sapb.OrderRequest{ Id: order.Id, }) @@ -2385,25 +2751,24 @@ func TestGetOrderExpired(t *testing.T) { } func TestBlockedKey(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() + sa, _ := initSA(t) hashA := make([]byte, 32) hashA[0] = 1 hashB := make([]byte, 32) hashB[0] = 2 - added := time.Now().UnixNano() + added := time.Now() source := "API" _, err := sa.AddBlockedKey(context.Background(), &sapb.AddBlockedKeyRequest{ KeyHash: hashA, - Added: added, + Added: timestamppb.New(added), Source: source, }) test.AssertNotError(t, err, "AddBlockedKey failed") _, err = sa.AddBlockedKey(context.Background(), &sapb.AddBlockedKeyRequest{ KeyHash: hashA, - Added: added, + Added: timestamppb.New(added), Source: source, }) test.AssertNotError(t, err, "AddBlockedKey failed with duplicate insert") @@ -2411,25 +2776,25 @@ func TestBlockedKey(t *testing.T) { comment := "testing comments" _, err = sa.AddBlockedKey(context.Background(), &sapb.AddBlockedKeyRequest{ KeyHash: hashB, - Added: added, + Added: timestamppb.New(added), Source: source, Comment: comment, }) test.AssertNotError(t, err, "AddBlockedKey failed") - exists, err := sa.KeyBlocked(context.Background(), &sapb.KeyBlockedRequest{ + exists, err := sa.KeyBlocked(context.Background(), &sapb.SPKIHash{ KeyHash: hashA, }) test.AssertNotError(t, err, "KeyBlocked failed") test.Assert(t, exists != nil, "*sapb.Exists is nil") test.Assert(t, exists.Exists, "KeyBlocked returned false for blocked key") - exists, err = sa.KeyBlocked(context.Background(), &sapb.KeyBlockedRequest{ + exists, err = sa.KeyBlocked(context.Background(), &sapb.SPKIHash{ KeyHash: hashB, }) test.AssertNotError(t, err, "KeyBlocked failed") test.Assert(t, exists != nil, "*sapb.Exists is nil") test.Assert(t, exists.Exists, "KeyBlocked returned false for blocked key") - exists, err = sa.KeyBlocked(context.Background(), &sapb.KeyBlockedRequest{ + exists, err = sa.KeyBlocked(context.Background(), &sapb.SPKIHash{ KeyHash: []byte{5}, }) test.AssertNotError(t, err, "KeyBlocked failed") @@ -2438,12 +2803,11 @@ func TestBlockedKey(t *testing.T) { } func TestAddBlockedKeyUnknownSource(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() + sa, fc := initSA(t) _, err := sa.AddBlockedKey(context.Background(), &sapb.AddBlockedKeyRequest{ KeyHash: []byte{1, 2, 3}, - Added: 1, + Added: timestamppb.New(fc.Now()), Source: "heyo", }) test.AssertError(t, err, "AddBlockedKey didn't fail with unknown source") @@ -2451,52 +2815,1749 @@ func TestAddBlockedKeyUnknownSource(t *testing.T) { } func TestBlockedKeyRevokedBy(t *testing.T) { - sa, _, cleanUp := initSA(t) - defer cleanUp() - - err := features.Set(map[string]bool{"StoreRevokerInfo": true}) - test.AssertNotError(t, err, "failed to set features") - defer features.Reset() + sa, fc := initSA(t) - _, err = sa.AddBlockedKey(context.Background(), &sapb.AddBlockedKeyRequest{ + now := fc.Now() + _, err := sa.AddBlockedKey(context.Background(), &sapb.AddBlockedKeyRequest{ KeyHash: []byte{1}, - Added: 1, + Added: timestamppb.New(now), Source: "API", }) test.AssertNotError(t, err, "AddBlockedKey failed") _, err = sa.AddBlockedKey(context.Background(), &sapb.AddBlockedKeyRequest{ KeyHash: []byte{2}, - Added: 1, + Added: timestamppb.New(now), Source: "API", RevokedBy: 1, }) test.AssertNotError(t, err, "AddBlockedKey failed") } -func TestHashNames(t *testing.T) { - // Test that it is deterministic - h1 := HashNames([]string{"a"}) - h2 := HashNames([]string{"a"}) - test.AssertByteEquals(t, h1, h2) +func TestIncidentsForSerial(t *testing.T) { + sa, _ := initSA(t) + + testSADbMap, err := DBMapForTest(vars.DBConnSAFullPerms) + test.AssertNotError(t, err, "Couldn't create test dbMap") + + testIncidentsDbMap, err := DBMapForTest(vars.DBConnIncidentsFullPerms) + test.AssertNotError(t, err, "Couldn't create test dbMap") + defer test.ResetIncidentsTestDatabase(t) + + weekAgo := sa.clk.Now().Add(-time.Hour * 24 * 7) + + // Add a disabled incident. + err = testSADbMap.Insert(ctx, &incidentModel{ + SerialTable: "incident_foo", + URL: "https://example.com/foo-incident", + RenewBy: sa.clk.Now().Add(time.Hour * 24 * 7), + Enabled: false, + }) + test.AssertNotError(t, err, "Failed to insert disabled incident") + + // No incidents are enabled, so this should return in error. + result, err := sa.IncidentsForSerial(context.Background(), &sapb.Serial{Serial: "1337"}) + test.AssertNotError(t, err, "fetching from no incidents") + test.AssertEquals(t, len(result.Incidents), 0) + + // Add an enabled incident. + err = testSADbMap.Insert(ctx, &incidentModel{ + SerialTable: "incident_bar", + URL: "https://example.com/test-incident", + RenewBy: sa.clk.Now().Add(time.Hour * 24 * 7), + Enabled: true, + }) + test.AssertNotError(t, err, "Failed to insert enabled incident") + + // Add a row to the incident table with serial '1338'. + one := int64(1) + affectedCertA := incidentSerialModel{ + Serial: "1338", + RegistrationID: &one, + OrderID: &one, + LastNoticeSent: &weekAgo, + } + _, err = testIncidentsDbMap.ExecContext(ctx, + fmt.Sprintf("INSERT INTO incident_bar (%s) VALUES ('%s', %d, %d, '%s')", + "serial, registrationID, orderID, lastNoticeSent", + affectedCertA.Serial, + affectedCertA.RegistrationID, + affectedCertA.OrderID, + affectedCertA.LastNoticeSent.Format(time.DateTime), + ), + ) + test.AssertNotError(t, err, "Error while inserting row for '1338' into incident table") + + // The incident table should not contain a row with serial '1337'. + result, err = sa.IncidentsForSerial(context.Background(), &sapb.Serial{Serial: "1337"}) + test.AssertNotError(t, err, "fetching from one incident") + test.AssertEquals(t, len(result.Incidents), 0) + + // Add a row to the incident table with serial '1337'. + two := int64(2) + affectedCertB := incidentSerialModel{ + Serial: "1337", + RegistrationID: &two, + OrderID: &two, + LastNoticeSent: &weekAgo, + } + _, err = testIncidentsDbMap.ExecContext(ctx, + fmt.Sprintf("INSERT INTO incident_bar (%s) VALUES ('%s', %d, %d, '%s')", + "serial, registrationID, orderID, lastNoticeSent", + affectedCertB.Serial, + affectedCertB.RegistrationID, + affectedCertB.OrderID, + affectedCertB.LastNoticeSent.Format(time.DateTime), + ), + ) + test.AssertNotError(t, err, "Error while inserting row for '1337' into incident table") + + // The incident table should now contain a row with serial '1337'. + result, err = sa.IncidentsForSerial(context.Background(), &sapb.Serial{Serial: "1337"}) + test.AssertNotError(t, err, "Failed to retrieve incidents for serial") + test.AssertEquals(t, len(result.Incidents), 1) +} + +func TestSerialsForIncident(t *testing.T) { + sa, _ := initSA(t) + + testIncidentsDbMap, err := DBMapForTest(vars.DBConnIncidentsFullPerms) + test.AssertNotError(t, err, "Couldn't create test dbMap") + defer test.ResetIncidentsTestDatabase(t) + + // Request serials from a malformed incident table name. + mockServerStream := &fakeServerStream[sapb.IncidentSerial]{} + err = sa.SerialsForIncident( + &sapb.SerialsForIncidentRequest{ + IncidentTable: "incidesnt_Baz", + }, + mockServerStream, + ) + test.AssertError(t, err, "Expected error for malformed table name") + test.AssertContains(t, err.Error(), "malformed table name \"incidesnt_Baz\"") + + // Request serials from another malformed incident table name. + mockServerStream = &fakeServerStream[sapb.IncidentSerial]{} + longTableName := "incident_l" + strings.Repeat("o", 1000) + "ng" + err = sa.SerialsForIncident( + &sapb.SerialsForIncidentRequest{ + IncidentTable: longTableName, + }, + mockServerStream, + ) + test.AssertError(t, err, "Expected error for long table name") + test.AssertContains(t, err.Error(), fmt.Sprintf("malformed table name %q", longTableName)) + + // Request serials for an incident table which doesn't exists. + mockServerStream = &fakeServerStream[sapb.IncidentSerial]{} + err = sa.SerialsForIncident( + &sapb.SerialsForIncidentRequest{ + IncidentTable: "incident_baz", + }, + mockServerStream, + ) + test.AssertError(t, err, "Expected error for nonexistent table name") + + // Assert that the error is a MySQL error so we can inspect the error code. + var mysqlErr *mysql.MySQLError + if errors.As(err, &mysqlErr) { + // We expect the error code to be 1146 (ER_NO_SUCH_TABLE): + // https://mariadb.com/kb/en/mariadb-error-codes/ + test.AssertEquals(t, mysqlErr.Number, uint16(1146)) + } else { + t.Fatalf("Expected MySQL Error 1146 (ER_NO_SUCH_TABLE) from Recv(), got %q", err) + } - // Test that it differentiates - h1 = HashNames([]string{"a"}) - h2 = HashNames([]string{"b"}) - test.Assert(t, !bytes.Equal(h1, h2), "Should have been different") + // Request serials from table 'incident_foo', which we expect to exist but + // be empty. + stream := make(chan *sapb.IncidentSerial) + mockServerStream = &fakeServerStream[sapb.IncidentSerial]{output: stream} + go func() { + err = sa.SerialsForIncident( + &sapb.SerialsForIncidentRequest{ + IncidentTable: "incident_foo", + }, + mockServerStream, + ) + close(stream) // Let our main test thread continue. + }() + for range stream { + t.Fatal("No serials should have been written to this stream") + } + test.AssertNotError(t, err, "Error calling SerialsForIncident on empty table") - // Test that it is not subject to ordering - h1 = HashNames([]string{"a", "b"}) - h2 = HashNames([]string{"b", "a"}) - test.AssertByteEquals(t, h1, h2) + // Add 4 rows of incident serials to 'incident_foo'. + expectedSerials := map[string]bool{ + "1335": true, "1336": true, "1337": true, "1338": true, + } + for i := range expectedSerials { + randInt := func() int64 { return mrand.Int64() } + _, err := testIncidentsDbMap.ExecContext(ctx, + fmt.Sprintf("INSERT INTO incident_foo (%s) VALUES ('%s', %d, %d, '%s')", + "serial, registrationID, orderID, lastNoticeSent", + i, + randInt(), + randInt(), + sa.clk.Now().Add(time.Hour*24*7).Format(time.DateTime), + ), + ) + test.AssertNotError(t, err, fmt.Sprintf("Error while inserting row for '%s' into incident table", i)) + } - // Test that it is not subject to case - h1 = HashNames([]string{"a", "b"}) - h2 = HashNames([]string{"A", "B"}) - test.AssertByteEquals(t, h1, h2) + // Request all 4 serials from the incident table we just added entries to. + stream = make(chan *sapb.IncidentSerial) + mockServerStream = &fakeServerStream[sapb.IncidentSerial]{output: stream} + go func() { + err = sa.SerialsForIncident( + &sapb.SerialsForIncidentRequest{ + IncidentTable: "incident_foo", + }, + mockServerStream, + ) + close(stream) + }() + receivedSerials := make(map[string]bool) + for serial := range stream { + if len(receivedSerials) > 4 { + t.Fatal("Received too many serials") + } + if _, ok := receivedSerials[serial.Serial]; ok { + t.Fatalf("Received serial %q more than once", serial.Serial) + } + receivedSerials[serial.Serial] = true + } + test.AssertDeepEquals(t, receivedSerials, map[string]bool{ + "1335": true, "1336": true, "1337": true, "1338": true, + }) + test.AssertNotError(t, err, "Error getting serials for incident") +} + +func TestGetRevokedCertsByShard(t *testing.T) { + sa, _ := initSA(t) + + // Add a cert to the DB to test with. We use AddPrecertificate because it sets + // up the certificateStatus row we need. This particular cert has a notAfter + // date of Mar 6 2023, and we lie about its IssuerNameID to make things easy. + reg := createWorkingRegistration(t, sa) + eeCert, err := core.LoadCert("../test/hierarchy/ee-e1.cert.pem") + test.AssertNotError(t, err, "failed to load test cert") + _, err = sa.AddSerial(ctx, &sapb.AddSerialRequest{ + RegID: reg.Id, + Serial: core.SerialToString(eeCert.SerialNumber), + Created: timestamppb.New(eeCert.NotBefore), + Expires: timestamppb.New(eeCert.NotAfter), + }) + test.AssertNotError(t, err, "failed to add test serial") + _, err = sa.AddPrecertificate(ctx, &sapb.AddCertificateRequest{ + Der: eeCert.Raw, + RegID: reg.Id, + Issued: timestamppb.New(eeCert.NotBefore), + IssuerNameID: 1, + }) + test.AssertNotError(t, err, "failed to add test cert") + + // Check that it worked. + status, err := sa.GetCertificateStatus( + ctx, &sapb.Serial{Serial: core.SerialToString(eeCert.SerialNumber)}) + test.AssertNotError(t, err, "GetCertificateStatus failed") + test.AssertEquals(t, core.OCSPStatus(status.Status), core.OCSPStatusGood) + + // Here's a little helper func we'll use to call GetRevokedCertsByShard and count + // how many results it returned. + countRevokedCerts := func(req *sapb.GetRevokedCertsByShardRequest) (int, error) { + stream := make(chan *corepb.CRLEntry) + mockServerStream := &fakeServerStream[corepb.CRLEntry]{output: stream} + var err error + go func() { + err = sa.GetRevokedCertsByShard(req, mockServerStream) + close(stream) + }() + entriesReceived := 0 + for range stream { + entriesReceived++ + } + return entriesReceived, err + } + + // The basic request covers a time range and shard that should include this certificate. + basicRequest := &sapb.GetRevokedCertsByShardRequest{ + IssuerNameID: 1, + ShardIdx: 9, + ExpiresAfter: mustTimestamp("2023-03-01 00:00"), + RevokedBefore: mustTimestamp("2023-04-01 00:00"), + } + + // Nothing's been revoked yet. Count should be zero. + count, err := countRevokedCerts(basicRequest) + test.AssertNotError(t, err, "zero rows shouldn't result in error") + test.AssertEquals(t, count, 0) + + // Revoke the certificate, providing the ShardIdx so it gets written into + // both the certificateStatus and revokedCertificates tables. + _, err = sa.RevokeCertificate(context.Background(), &sapb.RevokeCertificateRequest{ + IssuerID: 1, + Serial: core.SerialToString(eeCert.SerialNumber), + Date: mustTimestamp("2023-01-01 00:00"), + Reason: 1, + Response: []byte{1, 2, 3}, + ShardIdx: 9, + }) + test.AssertNotError(t, err, "failed to revoke test cert") + + // Check that it worked in the most basic way. + c, err := sa.dbMap.SelectNullInt( + ctx, "SELECT count(*) FROM revokedCertificates") + test.AssertNotError(t, err, "SELECT from revokedCertificates failed") + test.Assert(t, c.Valid, "SELECT from revokedCertificates got no result") + test.AssertEquals(t, c.Int64, int64(1)) + + // Asking for revoked certs now should return one result. + count, err = countRevokedCerts(basicRequest) + test.AssertNotError(t, err, "normal usage shouldn't result in error") + test.AssertEquals(t, count, 1) + + // Asking for revoked certs from a different issuer should return zero results. + count, err = countRevokedCerts(&sapb.GetRevokedCertsByShardRequest{ + IssuerNameID: 5678, + ShardIdx: basicRequest.ShardIdx, + ExpiresAfter: basicRequest.ExpiresAfter, + RevokedBefore: basicRequest.RevokedBefore, + }) + test.AssertNotError(t, err, "zero rows shouldn't result in error") + test.AssertEquals(t, count, 0) + + // Asking for revoked certs from a different shard should return zero results. + count, err = countRevokedCerts(&sapb.GetRevokedCertsByShardRequest{ + IssuerNameID: basicRequest.IssuerNameID, + ShardIdx: 8, + ExpiresAfter: basicRequest.ExpiresAfter, + RevokedBefore: basicRequest.RevokedBefore, + }) + test.AssertNotError(t, err, "zero rows shouldn't result in error") + test.AssertEquals(t, count, 0) + + // Asking for revoked certs with an old RevokedBefore should return no results. + count, err = countRevokedCerts(&sapb.GetRevokedCertsByShardRequest{ + IssuerNameID: basicRequest.IssuerNameID, + ShardIdx: basicRequest.ShardIdx, + ExpiresAfter: basicRequest.ExpiresAfter, + RevokedBefore: mustTimestamp("2020-03-01 00:00"), + }) + test.AssertNotError(t, err, "zero rows shouldn't result in error") + test.AssertEquals(t, count, 0) +} + +func TestLeaseOldestCRLShard(t *testing.T) { + sa, clk := initSA(t) + + // Create 8 shards: 4 for each of 2 issuers. For each issuer, one shard is + // currently leased, three are available, and one of those failed to update. + _, err := sa.dbMap.ExecContext(ctx, + `INSERT INTO crlShards (issuerID, idx, thisUpdate, nextUpdate, leasedUntil) VALUES + (1, 0, ?, ?, ?), + (1, 1, ?, ?, ?), + (1, 2, ?, ?, ?), + (1, 3, NULL, NULL, ?), + (2, 0, ?, ?, ?), + (2, 1, ?, ?, ?), + (2, 2, ?, ?, ?), + (2, 3, NULL, NULL, ?);`, + clk.Now().Add(-7*24*time.Hour), clk.Now().Add(3*24*time.Hour), clk.Now().Add(time.Hour), + clk.Now().Add(-6*24*time.Hour), clk.Now().Add(4*24*time.Hour), clk.Now().Add(-6*24*time.Hour), + clk.Now().Add(-5*24*time.Hour), clk.Now().Add(5*24*time.Hour), clk.Now().Add(-5*24*time.Hour), + clk.Now().Add(-4*24*time.Hour), + clk.Now().Add(-7*24*time.Hour), clk.Now().Add(3*24*time.Hour), clk.Now().Add(time.Hour), + clk.Now().Add(-6*24*time.Hour), clk.Now().Add(4*24*time.Hour), clk.Now().Add(-6*24*time.Hour), + clk.Now().Add(-5*24*time.Hour), clk.Now().Add(5*24*time.Hour), clk.Now().Add(-5*24*time.Hour), + clk.Now().Add(-4*24*time.Hour), + ) + test.AssertNotError(t, err, "setting up test shards") + + until := clk.Now().Add(time.Hour).Truncate(time.Second).UTC() + var untilModel struct { + LeasedUntil time.Time `db:"leasedUntil"` + } + + // Leasing from a fully-leased subset should fail. + _, err = sa.leaseOldestCRLShard( + context.Background(), + &sapb.LeaseCRLShardRequest{ + IssuerNameID: 1, + MinShardIdx: 0, + MaxShardIdx: 0, + Until: timestamppb.New(until), + }, + ) + test.AssertError(t, err, "leasing when all shards are leased") + + // Leasing any known shard should return the never-before-leased one (3). + res, err := sa.leaseOldestCRLShard( + context.Background(), + &sapb.LeaseCRLShardRequest{ + IssuerNameID: 1, + MinShardIdx: 0, + MaxShardIdx: 3, + Until: timestamppb.New(until), + }, + ) + test.AssertNotError(t, err, "leasing available shard") + test.AssertEquals(t, res.IssuerNameID, int64(1)) + test.AssertEquals(t, res.ShardIdx, int64(3)) + + err = sa.dbMap.SelectOne( + ctx, + &untilModel, + `SELECT leasedUntil FROM crlShards WHERE issuerID = ? AND idx = ? LIMIT 1`, + res.IssuerNameID, + res.ShardIdx, + ) + test.AssertNotError(t, err, "getting updated lease timestamp") + test.Assert(t, untilModel.LeasedUntil.Equal(until), "checking updated lease timestamp") + + // Leasing any known shard *again* should now return the oldest one (1). + res, err = sa.leaseOldestCRLShard( + context.Background(), + &sapb.LeaseCRLShardRequest{ + IssuerNameID: 1, + MinShardIdx: 0, + MaxShardIdx: 3, + Until: timestamppb.New(until), + }, + ) + test.AssertNotError(t, err, "leasing available shard") + test.AssertEquals(t, res.IssuerNameID, int64(1)) + test.AssertEquals(t, res.ShardIdx, int64(1)) + + err = sa.dbMap.SelectOne( + ctx, + &untilModel, + `SELECT leasedUntil FROM crlShards WHERE issuerID = ? AND idx = ? LIMIT 1`, + res.IssuerNameID, + res.ShardIdx, + ) + test.AssertNotError(t, err, "getting updated lease timestamp") + test.Assert(t, untilModel.LeasedUntil.Equal(until), "checking updated lease timestamp") + + // Leasing from a superset of known shards should succeed and return one of + // the previously-unknown shards. + res, err = sa.leaseOldestCRLShard( + context.Background(), + &sapb.LeaseCRLShardRequest{ + IssuerNameID: 2, + MinShardIdx: 0, + MaxShardIdx: 7, + Until: timestamppb.New(until), + }, + ) + test.AssertNotError(t, err, "leasing available shard") + test.AssertEquals(t, res.IssuerNameID, int64(2)) + test.Assert(t, res.ShardIdx >= 4, "checking leased index") + test.Assert(t, res.ShardIdx <= 7, "checking leased index") + + err = sa.dbMap.SelectOne( + ctx, + &untilModel, + `SELECT leasedUntil FROM crlShards WHERE issuerID = ? AND idx = ? LIMIT 1`, + res.IssuerNameID, + res.ShardIdx, + ) + test.AssertNotError(t, err, "getting updated lease timestamp") + test.Assert(t, untilModel.LeasedUntil.Equal(until), "checking updated lease timestamp") +} + +func TestLeaseSpecificCRLShard(t *testing.T) { + sa, clk := initSA(t) + + // Create 8 shards: 4 for each of 2 issuers. For each issuer, one shard is + // currently leased, three are available, and one of those failed to update. + _, err := sa.dbMap.ExecContext(ctx, + `INSERT INTO crlShards (issuerID, idx, thisUpdate, nextUpdate, leasedUntil) VALUES + (1, 0, ?, ?, ?), + (1, 1, ?, ?, ?), + (1, 2, ?, ?, ?), + (1, 3, NULL, NULL, ?), + (2, 0, ?, ?, ?), + (2, 1, ?, ?, ?), + (2, 2, ?, ?, ?), + (2, 3, NULL, NULL, ?);`, + clk.Now().Add(-7*24*time.Hour), clk.Now().Add(3*24*time.Hour), clk.Now().Add(time.Hour), + clk.Now().Add(-6*24*time.Hour), clk.Now().Add(4*24*time.Hour), clk.Now().Add(-6*24*time.Hour), + clk.Now().Add(-5*24*time.Hour), clk.Now().Add(5*24*time.Hour), clk.Now().Add(-5*24*time.Hour), + clk.Now().Add(-4*24*time.Hour), + clk.Now().Add(-7*24*time.Hour), clk.Now().Add(3*24*time.Hour), clk.Now().Add(time.Hour), + clk.Now().Add(-6*24*time.Hour), clk.Now().Add(4*24*time.Hour), clk.Now().Add(-6*24*time.Hour), + clk.Now().Add(-5*24*time.Hour), clk.Now().Add(5*24*time.Hour), clk.Now().Add(-5*24*time.Hour), + clk.Now().Add(-4*24*time.Hour), + ) + test.AssertNotError(t, err, "setting up test shards") + + until := clk.Now().Add(time.Hour).Truncate(time.Second).UTC() + var untilModel struct { + LeasedUntil time.Time `db:"leasedUntil"` + } + + // Leasing an unleased shard should work. + res, err := sa.leaseSpecificCRLShard( + context.Background(), + &sapb.LeaseCRLShardRequest{ + IssuerNameID: 1, + MinShardIdx: 1, + MaxShardIdx: 1, + Until: timestamppb.New(until), + }, + ) + test.AssertNotError(t, err, "leasing available shard") + test.AssertEquals(t, res.IssuerNameID, int64(1)) + test.AssertEquals(t, res.ShardIdx, int64(1)) + + err = sa.dbMap.SelectOne( + ctx, + &untilModel, + `SELECT leasedUntil FROM crlShards WHERE issuerID = ? AND idx = ? LIMIT 1`, + res.IssuerNameID, + res.ShardIdx, + ) + test.AssertNotError(t, err, "getting updated lease timestamp") + test.Assert(t, untilModel.LeasedUntil.Equal(until), "checking updated lease timestamp") + + // Leasing a never-before-leased shard should work. + res, err = sa.leaseSpecificCRLShard( + context.Background(), + &sapb.LeaseCRLShardRequest{ + IssuerNameID: 2, + MinShardIdx: 3, + MaxShardIdx: 3, + Until: timestamppb.New(until), + }, + ) + test.AssertNotError(t, err, "leasing available shard") + test.AssertEquals(t, res.IssuerNameID, int64(2)) + test.AssertEquals(t, res.ShardIdx, int64(3)) + + err = sa.dbMap.SelectOne( + ctx, + &untilModel, + `SELECT leasedUntil FROM crlShards WHERE issuerID = ? AND idx = ? LIMIT 1`, + res.IssuerNameID, + res.ShardIdx, + ) + test.AssertNotError(t, err, "getting updated lease timestamp") + test.Assert(t, untilModel.LeasedUntil.Equal(until), "checking updated lease timestamp") + + // Leasing a previously-unknown specific shard should work (to ease the + // transition into using leasing). + res, err = sa.leaseSpecificCRLShard( + context.Background(), + &sapb.LeaseCRLShardRequest{ + IssuerNameID: 1, + MinShardIdx: 9, + MaxShardIdx: 9, + Until: timestamppb.New(until), + }, + ) + test.AssertNotError(t, err, "leasing unknown shard") + + err = sa.dbMap.SelectOne( + ctx, + &untilModel, + `SELECT leasedUntil FROM crlShards WHERE issuerID = ? AND idx = ? LIMIT 1`, + res.IssuerNameID, + res.ShardIdx, + ) + test.AssertNotError(t, err, "getting updated lease timestamp") + test.Assert(t, untilModel.LeasedUntil.Equal(until), "checking updated lease timestamp") + + // Leasing a leased shard should fail. + _, err = sa.leaseSpecificCRLShard( + context.Background(), + &sapb.LeaseCRLShardRequest{ + IssuerNameID: 1, + MinShardIdx: 0, + MaxShardIdx: 0, + Until: timestamppb.New(until), + }, + ) + test.AssertError(t, err, "leasing unavailable shard") + + // Leasing more than one shard should fail. + _, err = sa.leaseSpecificCRLShard( + context.Background(), + &sapb.LeaseCRLShardRequest{ + IssuerNameID: 1, + MinShardIdx: 1, + MaxShardIdx: 2, + Until: timestamppb.New(until), + }, + ) + test.AssertError(t, err, "did not lease one specific shard") +} + +func TestUpdateCRLShard(t *testing.T) { + sa, clk := initSA(t) + + // Create 8 shards: 4 for each of 2 issuers. For each issuer, one shard is + // currently leased, three are available, and one of those failed to update. + _, err := sa.dbMap.ExecContext(ctx, + `INSERT INTO crlShards (issuerID, idx, thisUpdate, nextUpdate, leasedUntil) VALUES + (1, 0, ?, ?, ?), + (1, 1, ?, ?, ?), + (1, 2, ?, ?, ?), + (1, 3, NULL, NULL, ?), + (2, 0, ?, ?, ?), + (2, 1, ?, ?, ?), + (2, 2, ?, ?, ?), + (2, 3, NULL, NULL, ?);`, + clk.Now().Add(-7*24*time.Hour), clk.Now().Add(3*24*time.Hour), clk.Now().Add(time.Hour), + clk.Now().Add(-6*24*time.Hour), clk.Now().Add(4*24*time.Hour), clk.Now().Add(-6*24*time.Hour), + clk.Now().Add(-5*24*time.Hour), clk.Now().Add(5*24*time.Hour), clk.Now().Add(-5*24*time.Hour), + clk.Now().Add(-4*24*time.Hour), + clk.Now().Add(-7*24*time.Hour), clk.Now().Add(3*24*time.Hour), clk.Now().Add(time.Hour), + clk.Now().Add(-6*24*time.Hour), clk.Now().Add(4*24*time.Hour), clk.Now().Add(-6*24*time.Hour), + clk.Now().Add(-5*24*time.Hour), clk.Now().Add(5*24*time.Hour), clk.Now().Add(-5*24*time.Hour), + clk.Now().Add(-4*24*time.Hour), + ) + test.AssertNotError(t, err, "setting up test shards") + + thisUpdate := clk.Now().Truncate(time.Second).UTC() + var crlModel struct { + ThisUpdate *time.Time + NextUpdate *time.Time + } + + // Updating a leased shard should work. + _, err = sa.UpdateCRLShard( + context.Background(), + &sapb.UpdateCRLShardRequest{ + IssuerNameID: 1, + ShardIdx: 0, + ThisUpdate: timestamppb.New(thisUpdate), + NextUpdate: timestamppb.New(thisUpdate.Add(10 * 24 * time.Hour)), + }, + ) + test.AssertNotError(t, err, "updating leased shard") + + err = sa.dbMap.SelectOne( + ctx, + &crlModel, + `SELECT thisUpdate FROM crlShards WHERE issuerID = 1 AND idx = 0 LIMIT 1`, + ) + test.AssertNotError(t, err, "getting updated thisUpdate timestamp") + test.Assert(t, crlModel.ThisUpdate.Equal(thisUpdate), "checking updated thisUpdate timestamp") + + // Updating an unleased shard should work. + _, err = sa.UpdateCRLShard( + context.Background(), + &sapb.UpdateCRLShardRequest{ + IssuerNameID: 1, + ShardIdx: 1, + ThisUpdate: timestamppb.New(thisUpdate), + NextUpdate: timestamppb.New(thisUpdate.Add(10 * 24 * time.Hour)), + }, + ) + test.AssertNotError(t, err, "updating unleased shard") + + err = sa.dbMap.SelectOne( + ctx, + &crlModel, + `SELECT thisUpdate FROM crlShards WHERE issuerID = 1 AND idx = 1 LIMIT 1`, + ) + test.AssertNotError(t, err, "getting updated thisUpdate timestamp") + test.Assert(t, crlModel.ThisUpdate.Equal(thisUpdate), "checking updated thisUpdate timestamp") + + // Updating without supplying a NextUpdate should work. + _, err = sa.UpdateCRLShard( + context.Background(), + &sapb.UpdateCRLShardRequest{ + IssuerNameID: 1, + ShardIdx: 3, + ThisUpdate: timestamppb.New(thisUpdate.Add(time.Second)), + }, + ) + test.AssertNotError(t, err, "updating shard without NextUpdate") + + err = sa.dbMap.SelectOne( + ctx, + &crlModel, + `SELECT nextUpdate FROM crlShards WHERE issuerID = 1 AND idx = 3 LIMIT 1`, + ) + test.AssertNotError(t, err, "getting updated nextUpdate timestamp") + test.AssertBoxedNil(t, crlModel.NextUpdate, "checking updated nextUpdate timestamp") + + // Updating a shard to an earlier time should fail. + _, err = sa.UpdateCRLShard( + context.Background(), + &sapb.UpdateCRLShardRequest{ + IssuerNameID: 1, + ShardIdx: 1, + ThisUpdate: timestamppb.New(thisUpdate.Add(-24 * time.Hour)), + NextUpdate: timestamppb.New(thisUpdate.Add(9 * 24 * time.Hour)), + }, + ) + test.AssertError(t, err, "updating shard to an earlier time") + + // Updating an unknown shard should fail. + _, err = sa.UpdateCRLShard( + context.Background(), + &sapb.UpdateCRLShardRequest{ + IssuerNameID: 1, + ShardIdx: 4, + ThisUpdate: timestamppb.New(thisUpdate), + NextUpdate: timestamppb.New(thisUpdate.Add(10 * 24 * time.Hour)), + }, + ) + test.AssertError(t, err, "updating an unknown shard") +} + +func TestReplacementOrderExists(t *testing.T) { + sa, fc := initSA(t) + + oldCertSerial := "1234567890" + + // Check that a non-existent replacement order does not exist. + exists, err := sa.ReplacementOrderExists(ctx, &sapb.Serial{Serial: oldCertSerial}) + test.AssertNotError(t, err, "failed to check for replacement order") + test.Assert(t, !exists.Exists, "replacement for non-existent serial should not exist") + + // Create a test registration to reference. + reg := createWorkingRegistration(t, sa) + + // Add one valid authz. + expires := fc.Now().Add(time.Hour) + attemptedAt := fc.Now() + authzID := createFinalizedAuthorization(t, sa, reg.Id, identifier.NewDNS("example.com"), expires, "valid", attemptedAt) + + // Add a new order in pending status with no certificate serial. + expires1Year := sa.clk.Now().Add(365 * 24 * time.Hour) + order, err := sa.NewOrderAndAuthzs(ctx, &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires1Year), + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, + V2Authorizations: []int64{authzID}, + }, + }) + test.AssertNotError(t, err, "NewOrderAndAuthzs failed") + + // Set the order to processing so it can be finalized + _, err = sa.SetOrderProcessing(ctx, &sapb.OrderRequest{Id: order.Id}) + test.AssertNotError(t, err, "SetOrderProcessing failed") + + // Finalize the order with a certificate oldCertSerial. + order.CertificateSerial = oldCertSerial + _, err = sa.FinalizeOrder(ctx, &sapb.FinalizeOrderRequest{Id: order.Id, CertificateSerial: order.CertificateSerial}) + test.AssertNotError(t, err, "FinalizeOrder failed") + + // Create a replacement order. + order, err = sa.NewOrderAndAuthzs(ctx, &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires1Year), + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, + V2Authorizations: []int64{authzID}, + ReplacesSerial: oldCertSerial, + }, + }) + test.AssertNotError(t, err, "NewOrderAndAuthzs failed") + + // Check that a pending replacement order exists. + exists, err = sa.ReplacementOrderExists(ctx, &sapb.Serial{Serial: oldCertSerial}) + test.AssertNotError(t, err, "failed to check for replacement order") + test.Assert(t, exists.Exists, "replacement order should exist") + + // Set the order to processing so it can be finalized. + _, err = sa.SetOrderProcessing(ctx, &sapb.OrderRequest{Id: order.Id}) + test.AssertNotError(t, err, "SetOrderProcessing failed") + + // Check that a replacement order in processing still exists. + exists, err = sa.ReplacementOrderExists(ctx, &sapb.Serial{Serial: oldCertSerial}) + test.AssertNotError(t, err, "failed to check for replacement order") + test.Assert(t, exists.Exists, "replacement order in processing should still exist") + + order.CertificateSerial = "0123456789" + _, err = sa.FinalizeOrder(ctx, &sapb.FinalizeOrderRequest{Id: order.Id, CertificateSerial: order.CertificateSerial}) + test.AssertNotError(t, err, "FinalizeOrder failed") + + // Check that a finalized replacement order still exists. + exists, err = sa.ReplacementOrderExists(ctx, &sapb.Serial{Serial: oldCertSerial}) + test.AssertNotError(t, err, "failed to check for replacement order") + test.Assert(t, exists.Exists, "replacement order in processing should still exist") + + // Try updating the replacement order. + + // Create a replacement order. + newReplacementOrder, err := sa.NewOrderAndAuthzs(ctx, &sapb.NewOrderAndAuthzsRequest{ + NewOrder: &sapb.NewOrderRequest{ + RegistrationID: reg.Id, + Expires: timestamppb.New(expires1Year), + Identifiers: []*corepb.Identifier{identifier.NewDNS("example.com").ToProto()}, + V2Authorizations: []int64{authzID}, + ReplacesSerial: oldCertSerial, + }, + }) + test.AssertNotError(t, err, "NewOrderAndAuthzs failed") + + // Fetch the replacement order so we can ensure it was updated. + var replacementRow replacementOrderModel + err = sa.dbReadOnlyMap.SelectOne( + ctx, + &replacementRow, + "SELECT * FROM replacementOrders WHERE serial = ? LIMIT 1", + oldCertSerial, + ) + test.AssertNotError(t, err, "SELECT from replacementOrders failed") + test.AssertEquals(t, newReplacementOrder.Id, replacementRow.OrderID) + test.AssertEquals(t, newReplacementOrder.Expires.AsTime(), replacementRow.OrderExpires) +} + +func TestGetSerialsByKey(t *testing.T) { + sa, fc := initSA(t) + + // Insert four rows into keyHashToSerial: two that should match the query, + // one that should not match due to keyHash mismatch, and one that should not + // match due to being already expired. + expectedHash := make([]byte, 32) + expectedHash[0] = 1 + differentHash := make([]byte, 32) + differentHash[0] = 2 + inserts := []keyHashModel{ + { + KeyHash: expectedHash, + CertSerial: "1", + CertNotAfter: fc.Now().Add(time.Hour), + }, + { + KeyHash: expectedHash, + CertSerial: "2", + CertNotAfter: fc.Now().Add(2 * time.Hour), + }, + { + KeyHash: expectedHash, + CertSerial: "3", + CertNotAfter: fc.Now().Add(-1 * time.Hour), + }, + { + KeyHash: differentHash, + CertSerial: "4", + CertNotAfter: fc.Now().Add(time.Hour), + }, + } + + for _, row := range inserts { + err := sa.dbMap.Insert(context.Background(), &row) + test.AssertNotError(t, err, "inserting test keyHash") + } + + // Expect the result res to have two entries. + res := make(chan *sapb.Serial) + stream := &fakeServerStream[sapb.Serial]{output: res} + var err error + go func() { + err = sa.GetSerialsByKey(&sapb.SPKIHash{KeyHash: expectedHash}, stream) + close(res) // Let our main test thread continue. + }() + + var seen []string + for serial := range res { + if !slices.Contains([]string{"1", "2"}, serial.Serial) { + t.Errorf("Received unexpected serial %q", serial.Serial) + } + if slices.Contains(seen, serial.Serial) { + t.Errorf("Received serial %q more than once", serial.Serial) + } + seen = append(seen, serial.Serial) + } + test.AssertNotError(t, err, "calling GetSerialsByKey") + test.AssertEquals(t, len(seen), 2) +} + +func TestGetSerialsByAccount(t *testing.T) { + sa, fc := initSA(t) + + expectedReg := createWorkingRegistration(t, sa) + + // Insert three rows into the serials table: two that should match the query, + // and one that should not match due to being already expired. We do not here + // test filtering on the regID itself, because our test setup makes it very + // hard to insert two fake registrations rows with different IDs. + inserts := []recordedSerialModel{ + { + Serial: "1", + RegistrationID: expectedReg.Id, + Created: fc.Now().Add(-23 * time.Hour), + Expires: fc.Now().Add(time.Hour), + }, + { + Serial: "2", + RegistrationID: expectedReg.Id, + Created: fc.Now().Add(-22 * time.Hour), + Expires: fc.Now().Add(2 * time.Hour), + }, + { + Serial: "3", + RegistrationID: expectedReg.Id, + Created: fc.Now().Add(-23 * time.Hour), + Expires: fc.Now().Add(-1 * time.Hour), + }, + } + + for _, row := range inserts { + err := sa.dbMap.Insert(context.Background(), &row) + test.AssertNotError(t, err, "inserting test serial") + } + + // Expect the result stream to have two entries. + res := make(chan *sapb.Serial) + stream := &fakeServerStream[sapb.Serial]{output: res} + var err error + go func() { + err = sa.GetSerialsByAccount(&sapb.RegistrationID{Id: expectedReg.Id}, stream) + close(res) // Let our main test thread continue. + }() + + var seen []string + for serial := range res { + if !slices.Contains([]string{"1", "2"}, serial.Serial) { + t.Errorf("Received unexpected serial %q", serial.Serial) + } + if slices.Contains(seen, serial.Serial) { + t.Errorf("Received serial %q more than once", serial.Serial) + } + seen = append(seen, serial.Serial) + } + test.AssertNotError(t, err, "calling GetSerialsByAccount") + test.AssertEquals(t, len(seen), 2) +} + +func TestUnpauseAccount(t *testing.T) { + sa, _ := initSA(t) + + reg := createWorkingRegistration(t, sa) + + tests := []struct { + name string + state []pausedModel + req *sapb.RegistrationID + }{ + { + name: "UnpauseAccount with no paused identifiers", + state: nil, + req: &sapb.RegistrationID{Id: reg.Id}, + }, + { + name: "UnpauseAccount with one paused identifier", + state: []pausedModel{ + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.com", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + }, + }, + req: &sapb.RegistrationID{Id: reg.Id}, + }, + { + name: "UnpauseAccount with multiple paused identifiers", + state: []pausedModel{ + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.com", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + }, + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.net", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + }, + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.org", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + }, + }, + req: &sapb.RegistrationID{Id: reg.Id}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defer func() { + _, err := sa.dbMap.ExecContext(ctx, "DELETE FROM paused WHERE 1 = 1") + test.AssertNotError(t, err, "cleaning up paused table") + }() + + // Setup table state. + for _, state := range tt.state { + err := sa.dbMap.Insert(ctx, &state) + test.AssertNotError(t, err, "inserting test identifier") + } + + _, err := sa.UnpauseAccount(ctx, tt.req) + test.AssertNotError(t, err, "Unexpected error for UnpauseAccount()") + + // Count the number of paused identifiers. + var count int + err = sa.dbReadOnlyMap.SelectOne( + ctx, + &count, + "SELECT COUNT(*) FROM paused WHERE registrationID = ? AND unpausedAt IS NULL", + tt.req.Id, + ) + test.AssertNotError(t, err, "SELECT COUNT(*) failed") + test.AssertEquals(t, count, 0) + }) + } +} + +func bulkInsertPausedIdentifiers(ctx context.Context, sa *SQLStorageAuthority, regID int64, count int) error { + const batchSize = 1000 + + values := make([]any, 0, batchSize*4) + now := sa.clk.Now().Add(-time.Hour) + batches := (count + batchSize - 1) / batchSize + + for batch := range batches { + query := ` + INSERT INTO paused (registrationID, identifierType, identifierValue, pausedAt) + VALUES` + + start := batch * batchSize + end := min(start+batchSize, count) + + for i := start; i < end; i++ { + if i > start { + query += "," + } + query += "(?, ?, ?, ?)" + values = append(values, regID, identifierTypeToUint[string(identifier.TypeDNS)], fmt.Sprintf("example%d.com", i), now) + } + + _, err := sa.dbMap.ExecContext(ctx, query, values...) + if err != nil { + return fmt.Errorf("bulk inserting paused identifiers: %w", err) + } + values = values[:0] + } + + return nil +} + +func TestUnpauseAccountWithTwoLoops(t *testing.T) { + sa, _ := initSA(t) + + reg := createWorkingRegistration(t, sa) + + err := bulkInsertPausedIdentifiers(ctx, sa, reg.Id, 12000) + test.AssertNotError(t, err, "bulk inserting paused identifiers") + + result, err := sa.UnpauseAccount(ctx, &sapb.RegistrationID{Id: reg.Id}) + test.AssertNotError(t, err, "Unexpected error for UnpauseAccount()") + test.AssertEquals(t, result.Count, int64(12000)) +} + +func TestUnpauseAccountWithMaxLoops(t *testing.T) { + sa, _ := initSA(t) + + reg := createWorkingRegistration(t, sa) + err := bulkInsertPausedIdentifiers(ctx, sa, reg.Id, 50001) + test.AssertNotError(t, err, "bulk inserting paused identifiers") + + result, err := sa.UnpauseAccount(ctx, &sapb.RegistrationID{Id: reg.Id}) + test.AssertNotError(t, err, "Unexpected error for UnpauseAccount()") + test.AssertEquals(t, result.Count, int64(50000)) +} + +func TestPauseIdentifiers(t *testing.T) { + sa, _ := initSA(t) + + reg := createWorkingRegistration(t, sa) + ptrTime := func(t time.Time) *time.Time { + return &t + } + + fourWeeksAgo := sa.clk.Now().Add(-4 * 7 * 24 * time.Hour) + threeWeeksAgo := sa.clk.Now().Add(-3 * 7 * 24 * time.Hour) + + tests := []struct { + name string + state []pausedModel + req *sapb.PauseRequest + want *sapb.PauseIdentifiersResponse + }{ + { + name: "An identifier which is not now or previously paused", + state: nil, + req: &sapb.PauseRequest{ + RegistrationID: reg.Id, + Identifiers: []*corepb.Identifier{ + { + Type: string(identifier.TypeDNS), + Value: "example.com", + }, + }, + }, + want: &sapb.PauseIdentifiersResponse{ + Paused: 1, + Repaused: 0, + }, + }, + { + name: "One unpaused entry which was previously paused", + state: []pausedModel{ + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.com", + }, + PausedAt: fourWeeksAgo, + UnpausedAt: ptrTime(threeWeeksAgo), + }, + }, + req: &sapb.PauseRequest{ + RegistrationID: reg.Id, + Identifiers: []*corepb.Identifier{ + { + Type: string(identifier.TypeDNS), + Value: "example.com", + }, + }, + }, + want: &sapb.PauseIdentifiersResponse{ + Paused: 0, + Repaused: 1, + }, + }, + { + name: "One unpaused entry which was previously paused and unpaused less than 2 weeks ago", + state: []pausedModel{ + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.com", + }, + PausedAt: fourWeeksAgo, + UnpausedAt: ptrTime(sa.clk.Now().Add(-13 * 24 * time.Hour)), + }, + }, + req: &sapb.PauseRequest{ + RegistrationID: reg.Id, + Identifiers: []*corepb.Identifier{ + { + Type: string(identifier.TypeDNS), + Value: "example.com", + }, + }, + }, + want: &sapb.PauseIdentifiersResponse{ + Paused: 0, + Repaused: 0, + }, + }, + { + name: "An identifier which is currently paused", + state: []pausedModel{ + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.com", + }, + PausedAt: fourWeeksAgo, + }, + }, + req: &sapb.PauseRequest{ + RegistrationID: reg.Id, + Identifiers: []*corepb.Identifier{ + { + Type: string(identifier.TypeDNS), + Value: "example.com", + }, + }, + }, + want: &sapb.PauseIdentifiersResponse{ + Paused: 0, + Repaused: 0, + }, + }, + { + name: "Two previously paused entries and one new entry", + state: []pausedModel{ + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.com", + }, + PausedAt: fourWeeksAgo, + UnpausedAt: ptrTime(threeWeeksAgo), + }, + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.net", + }, + PausedAt: fourWeeksAgo, + UnpausedAt: ptrTime(threeWeeksAgo), + }, + }, + req: &sapb.PauseRequest{ + RegistrationID: reg.Id, + Identifiers: []*corepb.Identifier{ + { + Type: string(identifier.TypeDNS), + Value: "example.com", + }, + { + Type: string(identifier.TypeDNS), + Value: "example.net", + }, + { + Type: string(identifier.TypeDNS), + Value: "example.org", + }, + }, + }, + want: &sapb.PauseIdentifiersResponse{ + Paused: 1, + Repaused: 2, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defer func() { + _, err := sa.dbMap.ExecContext(ctx, "DELETE FROM paused WHERE 1 = 1") + test.AssertNotError(t, err, "cleaning up paused table") + }() + + // Setup table state. + for _, state := range tt.state { + err := sa.dbMap.Insert(ctx, &state) + test.AssertNotError(t, err, "inserting test identifier") + } + + got, err := sa.PauseIdentifiers(ctx, tt.req) + test.AssertNotError(t, err, "Unexpected error for PauseIdentifiers()") + test.AssertEquals(t, got.Paused, tt.want.Paused) + test.AssertEquals(t, got.Repaused, tt.want.Repaused) + }) + } +} + +func TestCheckIdentifiersPaused(t *testing.T) { + sa, _ := initSA(t) + + ptrTime := func(t time.Time) *time.Time { + return &t + } + + reg := createWorkingRegistration(t, sa) + tests := []struct { + name string + state []pausedModel + req *sapb.PauseRequest + want *sapb.Identifiers + }{ + { + name: "No paused identifiers", + state: nil, + req: &sapb.PauseRequest{ + RegistrationID: reg.Id, + Identifiers: []*corepb.Identifier{ + { + Type: string(identifier.TypeDNS), + Value: "example.com", + }, + }, + }, + want: &sapb.Identifiers{ + Identifiers: []*corepb.Identifier{}, + }, + }, + { + name: "One paused identifier", + state: []pausedModel{ + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.com", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + }, + }, + req: &sapb.PauseRequest{ + RegistrationID: reg.Id, + Identifiers: []*corepb.Identifier{ + { + Type: string(identifier.TypeDNS), + Value: "example.com", + }, + }, + }, + want: &sapb.Identifiers{ + Identifiers: []*corepb.Identifier{ + { + Type: string(identifier.TypeDNS), + Value: "example.com", + }, + }, + }, + }, + { + name: "Two paused identifiers, one unpaused", + state: []pausedModel{ + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.com", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + }, + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.net", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + }, + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.org", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + UnpausedAt: ptrTime(sa.clk.Now().Add(-time.Minute)), + }, + }, + req: &sapb.PauseRequest{ + RegistrationID: reg.Id, + Identifiers: []*corepb.Identifier{ + { + Type: string(identifier.TypeDNS), + Value: "example.com", + }, + { + Type: string(identifier.TypeDNS), + Value: "example.net", + }, + { + Type: string(identifier.TypeDNS), + Value: "example.org", + }, + }, + }, + want: &sapb.Identifiers{ + Identifiers: []*corepb.Identifier{ + { + Type: string(identifier.TypeDNS), + Value: "example.com", + }, + { + Type: string(identifier.TypeDNS), + Value: "example.net", + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defer func() { + _, err := sa.dbMap.ExecContext(ctx, "DELETE FROM paused WHERE 1 = 1") + test.AssertNotError(t, err, "cleaning up paused table") + }() + + // Setup table state. + for _, state := range tt.state { + err := sa.dbMap.Insert(ctx, &state) + test.AssertNotError(t, err, "inserting test identifier") + } + + got, err := sa.CheckIdentifiersPaused(ctx, tt.req) + test.AssertNotError(t, err, "Unexpected error for PauseIdentifiers()") + test.AssertDeepEquals(t, got.Identifiers, tt.want.Identifiers) + }) + } +} + +func TestGetPausedIdentifiers(t *testing.T) { + sa, _ := initSA(t) + + ptrTime := func(t time.Time) *time.Time { + return &t + } + + reg := createWorkingRegistration(t, sa) + + tests := []struct { + name string + state []pausedModel + req *sapb.RegistrationID + want *sapb.Identifiers + }{ + { + name: "No paused identifiers", + state: nil, + req: &sapb.RegistrationID{Id: reg.Id}, + want: &sapb.Identifiers{ + Identifiers: []*corepb.Identifier{}, + }, + }, + { + name: "One paused identifier", + state: []pausedModel{ + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.com", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + }, + }, + req: &sapb.RegistrationID{Id: reg.Id}, + want: &sapb.Identifiers{ + Identifiers: []*corepb.Identifier{ + { + Type: string(identifier.TypeDNS), + Value: "example.com", + }, + }, + }, + }, + { + name: "Two paused identifiers, one unpaused", + state: []pausedModel{ + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.com", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + }, + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.net", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + }, + { + RegistrationID: reg.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.org", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + UnpausedAt: ptrTime(sa.clk.Now().Add(-time.Minute)), + }, + }, + req: &sapb.RegistrationID{Id: reg.Id}, + want: &sapb.Identifiers{ + Identifiers: []*corepb.Identifier{ + { + Type: string(identifier.TypeDNS), + Value: "example.com", + }, + { + Type: string(identifier.TypeDNS), + Value: "example.net", + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defer func() { + _, err := sa.dbMap.ExecContext(ctx, "DELETE FROM paused WHERE 1 = 1") + test.AssertNotError(t, err, "cleaning up paused table") + }() + + // Setup table state. + for _, state := range tt.state { + err := sa.dbMap.Insert(ctx, &state) + test.AssertNotError(t, err, "inserting test identifier") + } + + got, err := sa.GetPausedIdentifiers(ctx, tt.req) + test.AssertNotError(t, err, "Unexpected error for PauseIdentifiers()") + test.AssertDeepEquals(t, got.Identifiers, tt.want.Identifiers) + }) + } +} + +func TestGetPausedIdentifiersOnlyUnpausesOneAccount(t *testing.T) { + sa, _ := initSA(t) + + reg1 := createWorkingRegistration(t, sa) + reg2, err := sa.NewRegistration(ctx, &corepb.Registration{ + Key: newAcctKey(t), + CreatedAt: mustTimestamp("2018-04-01 07:00"), + Status: string(core.StatusValid), + }) + test.AssertNotError(t, err, "creating second registration") + + // Insert two paused identifiers for two different accounts. + err = sa.dbMap.Insert(ctx, &pausedModel{ + RegistrationID: reg1.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.com", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + }) + test.AssertNotError(t, err, "inserting test identifier") + + err = sa.dbMap.Insert(ctx, &pausedModel{ + RegistrationID: reg2.Id, + identifierModel: identifierModel{ + Type: identifierTypeToUint[string(identifier.TypeDNS)], + Value: "example.net", + }, + PausedAt: sa.clk.Now().Add(-time.Hour), + }) + test.AssertNotError(t, err, "inserting test identifier") + + // Unpause the first account. + _, err = sa.UnpauseAccount(ctx, &sapb.RegistrationID{Id: reg1.Id}) + test.AssertNotError(t, err, "UnpauseAccount failed") + + // Check that the second account's identifier is still paused. + idents, err := sa.GetPausedIdentifiers(ctx, &sapb.RegistrationID{Id: reg2.Id}) + test.AssertNotError(t, err, "GetPausedIdentifiers failed") + test.AssertEquals(t, len(idents.Identifiers), 1) + test.AssertEquals(t, idents.Identifiers[0].Value, "example.net") +} + +func newAcctKey(t *testing.T) []byte { + key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + jwk := &jose.JSONWebKey{Key: key.Public()} + acctKey, err := jwk.MarshalJSON() + test.AssertNotError(t, err, "failed to marshal account key") + return acctKey +} + +func TestUpdateRegistrationKey(t *testing.T) { + sa, _ := initSA(t) + + _, err := sa.UpdateRegistrationKey(ctx, &sapb.UpdateRegistrationKeyRequest{}) + test.AssertError(t, err, "should not have been able to update registration key without a registration ID") + test.AssertContains(t, err.Error(), "incomplete gRPC request message") + + existingReg, err := sa.NewRegistration(ctx, &corepb.Registration{ + Key: newAcctKey(t), + }) + test.AssertNotError(t, err, "creating new registration") + + tests := []struct { + name string + newJwk []byte + expectedError string + }{ + { + name: "update a valid registration with a new account key", + newJwk: newAcctKey(t), + }, + { + name: "update a valid registration with a duplicate account key", + newJwk: existingReg.Key, + expectedError: "key is already in use for a different account", + }, + { + name: "update a valid registration with a malformed account key", + newJwk: []byte("Eat at Joe's"), + expectedError: "parsing JWK", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + reg, err := sa.NewRegistration(ctx, &corepb.Registration{ + Key: newAcctKey(t), + }) + test.AssertNotError(t, err, "creating new registration") + + updatedReg, err := sa.UpdateRegistrationKey(ctx, &sapb.UpdateRegistrationKeyRequest{ + RegistrationID: reg.Id, + Jwk: tt.newJwk, + }) + if tt.expectedError != "" { + test.AssertError(t, err, "should have errored") + test.AssertContains(t, err.Error(), tt.expectedError) + } else { + test.AssertNotError(t, err, "unexpected error for UpdateRegistrationKey()") + test.AssertEquals(t, updatedReg.Id, reg.Id) + test.AssertDeepEquals(t, updatedReg.Key, tt.newJwk) + + refetchedReg, err := sa.GetRegistration(ctx, &sapb.RegistrationID{ + Id: reg.Id, + }) + test.AssertNotError(t, err, "retrieving registration") + test.AssertDeepEquals(t, refetchedReg.Key, tt.newJwk) + } + }) + } +} + +type mockRLOStream struct { + grpc.ServerStream + sent []*sapb.RateLimitOverrideResponse + ctx context.Context +} + +func newMockRLOStream() *mockRLOStream { + return &mockRLOStream{ctx: ctx} +} +func (m *mockRLOStream) Context() context.Context { return m.ctx } +func (m *mockRLOStream) RecvMsg(any) error { return io.EOF } +func (m *mockRLOStream) Send(ov *sapb.RateLimitOverrideResponse) error { + m.sent = append(m.sent, ov) + return nil +} + +func TestAddRateLimitOverrideInsertThenUpdate(t *testing.T) { + if os.Getenv("BOULDER_CONFIG_DIR") != "test/config-next" { + // TODO(#8147): Remove this skip. + t.Skip("skipping, this overrides table must exist for this test to run") + } + + sa, _ := initSA(t) + + expectBucketKey := core.RandomString(10) + ov := &sapb.RateLimitOverride{ + LimitEnum: 1, + BucketKey: expectBucketKey, + Comment: "insert", + Period: durationpb.New(time.Hour), + Count: 100, + Burst: 100, + } + + // Insert + resp, err := sa.AddRateLimitOverride(ctx, &sapb.AddRateLimitOverrideRequest{Override: ov}) + test.AssertNotError(t, err, "expected successful insert, got error") + test.Assert(t, resp.Inserted && resp.Enabled, fmt.Sprintf("expected (Inserted=true, Enabled=true) for initial insert, got (%v,%v)", resp.Inserted, resp.Enabled)) + + // Update (change comment) + ov.Comment = "updated" + resp, err = sa.AddRateLimitOverride(ctx, &sapb.AddRateLimitOverrideRequest{Override: ov}) + test.AssertNotError(t, err, "expected successful update, got error") + test.Assert(t, !resp.Inserted && resp.Enabled, fmt.Sprintf("expected (Inserted=false, Enabled=true) for update, got (%v, %v)", resp.Inserted, resp.Enabled)) + + got, err := sa.GetRateLimitOverride(ctx, &sapb.GetRateLimitOverrideRequest{LimitEnum: 1, BucketKey: expectBucketKey}) + test.AssertNotError(t, err, "expected GetRateLimitOverride to succeed, got error") + test.AssertEquals(t, got.Override.Comment, "updated") + + // Disable + _, err = sa.DisableRateLimitOverride(ctx, &sapb.DisableRateLimitOverrideRequest{LimitEnum: 1, BucketKey: expectBucketKey}) + test.AssertNotError(t, err, "expected DisableRateLimitOverride to succeed, got error") + + // Update and check that it's still disabled. + got, err = sa.GetRateLimitOverride(ctx, &sapb.GetRateLimitOverrideRequest{LimitEnum: 1, BucketKey: expectBucketKey}) + test.AssertNotError(t, err, "expected GetRateLimitOverride to succeed, got error") + test.Assert(t, !got.Enabled, fmt.Sprintf("expected Enabled=false after disable, got Enabled=%v", got.Enabled)) + + // Update (change period, count, and burst) + ov.Period = durationpb.New(2 * time.Hour) + ov.Count = 200 + ov.Burst = 200 + _, err = sa.AddRateLimitOverride(ctx, &sapb.AddRateLimitOverrideRequest{Override: ov}) + test.AssertNotError(t, err, "expected successful update, got error") + + got, err = sa.GetRateLimitOverride(ctx, &sapb.GetRateLimitOverrideRequest{LimitEnum: 1, BucketKey: expectBucketKey}) + test.AssertNotError(t, err, "expected GetRateLimitOverride to succeed, got error") + test.AssertEquals(t, got.Override.Period.AsDuration(), 2*time.Hour) + test.AssertEquals(t, got.Override.Count, int64(200)) + test.AssertEquals(t, got.Override.Burst, int64(200)) +} + +func TestDisableEnableRateLimitOverride(t *testing.T) { + if os.Getenv("BOULDER_CONFIG_DIR") != "test/config-next" { + // TODO(#8147): Remove this skip. + t.Skip("skipping, this overrides table must exist for this test to run") + } + + sa, _ := initSA(t) + + expectBucketKey := core.RandomString(10) + ov := &sapb.RateLimitOverride{ + LimitEnum: 2, + BucketKey: expectBucketKey, + Period: durationpb.New(time.Hour), + Count: 1, + Burst: 1, + Comment: "test", + } + _, _ = sa.AddRateLimitOverride(ctx, &sapb.AddRateLimitOverrideRequest{Override: ov}) + + // Disable + _, err := sa.DisableRateLimitOverride(ctx, + &sapb.DisableRateLimitOverrideRequest{LimitEnum: 2, BucketKey: expectBucketKey}) + test.AssertNotError(t, err, "expected DisableRateLimitOverride to succeed, got error") + + st, _ := sa.GetRateLimitOverride(ctx, + &sapb.GetRateLimitOverrideRequest{LimitEnum: 2, BucketKey: expectBucketKey}) + test.Assert(t, !st.Enabled, + fmt.Sprintf("expected Enabled=false after disable, got Enabled=%v", st.Enabled)) + + // Enable + _, err = sa.EnableRateLimitOverride(ctx, + &sapb.EnableRateLimitOverrideRequest{LimitEnum: 2, BucketKey: expectBucketKey}) + test.AssertNotError(t, err, "expected EnableRateLimitOverride to succeed, got error") + + st, _ = sa.GetRateLimitOverride(ctx, + &sapb.GetRateLimitOverrideRequest{LimitEnum: 2, BucketKey: expectBucketKey}) + test.Assert(t, st.Enabled, + fmt.Sprintf("expected Enabled=true after enable, got Enabled=%v", st.Enabled)) +} + +func TestGetEnabledRateLimitOverrides(t *testing.T) { + if os.Getenv("BOULDER_CONFIG_DIR") != "test/config-next" { + // TODO(#8147): Remove this skip. + t.Skip("skipping, this overrides table must exist for this test to run") + } + + sa, _ := initSA(t) + + // Enabled + ov1 := &sapb.RateLimitOverride{ + LimitEnum: 10, BucketKey: "on", Period: durationpb.New(time.Second), Count: 1, Burst: 1, Comment: "on", + } + // Disabled + ov2 := &sapb.RateLimitOverride{ + LimitEnum: 11, BucketKey: "off", Period: durationpb.New(time.Second), Count: 1, Burst: 1, Comment: "off", + } - // Test that it is not subject to duplication - h1 = HashNames([]string{"a", "a"}) - h2 = HashNames([]string{"a"}) - test.AssertByteEquals(t, h1, h2) + _, err := sa.AddRateLimitOverride(ctx, &sapb.AddRateLimitOverrideRequest{Override: ov1}) + test.AssertNotError(t, err, "expected successful insert of ov1, got error") + _, err = sa.AddRateLimitOverride(ctx, &sapb.AddRateLimitOverrideRequest{Override: ov2}) + test.AssertNotError(t, err, "expected successful insert of ov2, got error") + _, err = sa.DisableRateLimitOverride(ctx, &sapb.DisableRateLimitOverrideRequest{LimitEnum: 11, BucketKey: "off"}) + test.AssertNotError(t, err, "expected DisableRateLimitOverride of ov2 to succeed, got error") + _, err = sa.EnableRateLimitOverride(ctx, &sapb.EnableRateLimitOverrideRequest{LimitEnum: 10, BucketKey: "on"}) + test.AssertNotError(t, err, "expected EnableRateLimitOverride of ov1 to succeed, got error") + + stream := newMockRLOStream() + err = sa.GetEnabledRateLimitOverrides(&emptypb.Empty{}, stream) + test.AssertNotError(t, err, "expected streaming enabled overrides to succeed, got error") + test.AssertEquals(t, len(stream.sent), 1) + test.AssertEquals(t, stream.sent[0].Override.BucketKey, "on") } diff --git a/sa/saro.go b/sa/saro.go new file mode 100644 index 00000000000..c0be2a689f2 --- /dev/null +++ b/sa/saro.go @@ -0,0 +1,1168 @@ +package sa + +import ( + "context" + "errors" + "fmt" + "math" + "regexp" + "strings" + "time" + + "github.com/go-jose/go-jose/v4" + "github.com/jmhodges/clock" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/letsencrypt/boulder/core" + corepb "github.com/letsencrypt/boulder/core/proto" + "github.com/letsencrypt/boulder/db" + berrors "github.com/letsencrypt/boulder/errors" + "github.com/letsencrypt/boulder/identifier" + blog "github.com/letsencrypt/boulder/log" + sapb "github.com/letsencrypt/boulder/sa/proto" +) + +var ( + validIncidentTableRegexp = regexp.MustCompile(`^incident_[0-9a-zA-Z_]{1,100}$`) +) + +// SQLStorageAuthorityRO defines a read-only subset of a Storage Authority +type SQLStorageAuthorityRO struct { + sapb.UnsafeStorageAuthorityReadOnlyServer + + dbReadOnlyMap *db.WrappedMap + dbIncidentsMap *db.WrappedMap + + // For RPCs that generate multiple, parallelizable SQL queries, this is the + // max parallelism they will use (to avoid consuming too many MariaDB + // threads). + parallelismPerRPC int + + // lagFactor is the amount of time we're willing to delay before retrying a + // request that may have failed due to replication lag. For example, a user + // might create a new account and then immediately create a new order, but + // validating that new-order request requires reading their account info from + // a read-only database replica... which may not have their brand new data + // yet. This value should be less than, but about the same order of magnitude + // as, the observed database replication lag. + lagFactor time.Duration + + clk clock.Clock + log blog.Logger + + // lagFactorCounter is a Prometheus counter that tracks the number of times + // we've retried a query inside of GetRegistration, GetOrder, and + // GetAuthorization2 due to replication lag. It is labeled by method name + // and whether data from the retry attempt was found, notfound, or some + // other error was encountered. + lagFactorCounter *prometheus.CounterVec +} + +var _ sapb.StorageAuthorityReadOnlyServer = (*SQLStorageAuthorityRO)(nil) + +// NewSQLStorageAuthorityRO provides persistence using a SQL backend for +// Boulder. It will modify the given borp.DbMap by adding relevant tables. +func NewSQLStorageAuthorityRO( + dbReadOnlyMap *db.WrappedMap, + dbIncidentsMap *db.WrappedMap, + stats prometheus.Registerer, + parallelismPerRPC int, + lagFactor time.Duration, + clk clock.Clock, + logger blog.Logger, +) (*SQLStorageAuthorityRO, error) { + lagFactorCounter := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ + Name: "sa_lag_factor", + Help: "A counter of SA lagFactor checks labelled by method and pass/fail", + }, []string{"method", "result"}) + + ssaro := &SQLStorageAuthorityRO{ + dbReadOnlyMap: dbReadOnlyMap, + dbIncidentsMap: dbIncidentsMap, + parallelismPerRPC: parallelismPerRPC, + lagFactor: lagFactor, + clk: clk, + log: logger, + lagFactorCounter: lagFactorCounter, + } + + return ssaro, nil +} + +// GetRegistration obtains a Registration by ID +func (ssa *SQLStorageAuthorityRO) GetRegistration(ctx context.Context, req *sapb.RegistrationID) (*corepb.Registration, error) { + if req == nil || req.Id == 0 { + return nil, errIncompleteRequest + } + + model, err := selectRegistration(ctx, ssa.dbReadOnlyMap, "id", req.Id) + if db.IsNoRows(err) && ssa.lagFactor != 0 { + // GetRegistration is often called to validate a JWK belonging to a brand + // new account whose registrations table row hasn't propagated to the read + // replica yet. If we get a NoRows, wait a little bit and retry, once. + ssa.clk.Sleep(ssa.lagFactor) + model, err = selectRegistration(ctx, ssa.dbReadOnlyMap, "id", req.Id) + if err != nil { + if db.IsNoRows(err) { + ssa.lagFactorCounter.WithLabelValues("GetRegistration", "notfound").Inc() + } else { + ssa.lagFactorCounter.WithLabelValues("GetRegistration", "other").Inc() + } + } else { + ssa.lagFactorCounter.WithLabelValues("GetRegistration", "found").Inc() + } + } + if err != nil { + if db.IsNoRows(err) { + return nil, berrors.NotFoundError("registration with ID '%d' not found", req.Id) + } + return nil, err + } + + return registrationModelToPb(model) +} + +// GetRegistrationByKey obtains a Registration by JWK +func (ssa *SQLStorageAuthorityRO) GetRegistrationByKey(ctx context.Context, req *sapb.JSONWebKey) (*corepb.Registration, error) { + if req == nil || len(req.Jwk) == 0 { + return nil, errIncompleteRequest + } + + var jwk jose.JSONWebKey + err := jwk.UnmarshalJSON(req.Jwk) + if err != nil { + return nil, err + } + + sha, err := core.KeyDigestB64(jwk.Key) + if err != nil { + return nil, err + } + model, err := selectRegistration(ctx, ssa.dbReadOnlyMap, "jwk_sha256", sha) + if err != nil { + if db.IsNoRows(err) { + return nil, berrors.NotFoundError("no registrations with public key sha256 %q", sha) + } + return nil, err + } + + return registrationModelToPb(model) +} + +// GetSerialMetadata returns metadata stored alongside the serial number, +// such as the RegID whose certificate request created that serial, and when +// the certificate with that serial will expire. +func (ssa *SQLStorageAuthorityRO) GetSerialMetadata(ctx context.Context, req *sapb.Serial) (*sapb.SerialMetadata, error) { + if req == nil || req.Serial == "" { + return nil, errIncompleteRequest + } + + if !core.ValidSerial(req.Serial) { + return nil, fmt.Errorf("invalid serial %q", req.Serial) + } + + recordedSerial := recordedSerialModel{} + err := ssa.dbReadOnlyMap.SelectOne( + ctx, + &recordedSerial, + "SELECT * FROM serials WHERE serial = ?", + req.Serial, + ) + if err != nil { + if db.IsNoRows(err) { + return nil, berrors.NotFoundError("serial %q not found", req.Serial) + } + return nil, err + } + + return &sapb.SerialMetadata{ + Serial: recordedSerial.Serial, + RegistrationID: recordedSerial.RegistrationID, + Created: timestamppb.New(recordedSerial.Created), + Expires: timestamppb.New(recordedSerial.Expires), + }, nil +} + +// GetCertificate takes a serial number and returns the corresponding +// certificate, or error if it does not exist. +func (ssa *SQLStorageAuthorityRO) GetCertificate(ctx context.Context, req *sapb.Serial) (*corepb.Certificate, error) { + if req == nil || req.Serial == "" { + return nil, errIncompleteRequest + } + if !core.ValidSerial(req.Serial) { + return nil, fmt.Errorf("invalid certificate serial %s", req.Serial) + } + + cert, err := SelectCertificate(ctx, ssa.dbReadOnlyMap, req.Serial) + if db.IsNoRows(err) { + return nil, berrors.NotFoundError("certificate with serial %q not found", req.Serial) + } + if err != nil { + return nil, err + } + return cert, nil +} + +// GetLintPrecertificate takes a serial number and returns the corresponding +// linting precertificate, or error if it does not exist. The returned precert +// is identical to the actual submitted-to-CT-logs precertificate, except for +// its signature. +func (ssa *SQLStorageAuthorityRO) GetLintPrecertificate(ctx context.Context, req *sapb.Serial) (*corepb.Certificate, error) { + if req == nil || req.Serial == "" { + return nil, errIncompleteRequest + } + if !core.ValidSerial(req.Serial) { + return nil, fmt.Errorf("invalid precertificate serial %s", req.Serial) + } + + cert, err := SelectPrecertificate(ctx, ssa.dbReadOnlyMap, req.Serial) + if db.IsNoRows(err) { + return nil, berrors.NotFoundError("precertificate with serial %q not found", req.Serial) + } + if err != nil { + return nil, err + } + return cert, nil +} + +// GetCertificateStatus takes a hexadecimal string representing the full 128-bit serial +// number of a certificate and returns data about that certificate's current +// validity. +func (ssa *SQLStorageAuthorityRO) GetCertificateStatus(ctx context.Context, req *sapb.Serial) (*corepb.CertificateStatus, error) { + if req.Serial == "" { + return nil, errIncompleteRequest + } + if !core.ValidSerial(req.Serial) { + err := fmt.Errorf("invalid certificate serial %s", req.Serial) + return nil, err + } + + certStatus, err := SelectCertificateStatus(ctx, ssa.dbReadOnlyMap, req.Serial) + if db.IsNoRows(err) { + return nil, berrors.NotFoundError("certificate status with serial %q not found", req.Serial) + } + if err != nil { + return nil, err + } + + return certStatus, nil +} + +// GetRevocationStatus takes a hexadecimal string representing the full serial +// number of a certificate and returns a minimal set of data about that cert's +// current validity. +func (ssa *SQLStorageAuthorityRO) GetRevocationStatus(ctx context.Context, req *sapb.Serial) (*sapb.RevocationStatus, error) { + if req.Serial == "" { + return nil, errIncompleteRequest + } + if !core.ValidSerial(req.Serial) { + return nil, fmt.Errorf("invalid certificate serial %s", req.Serial) + } + + status, err := SelectRevocationStatus(ctx, ssa.dbReadOnlyMap, req.Serial) + if err != nil { + if db.IsNoRows(err) { + return nil, berrors.NotFoundError("certificate status with serial %q not found", req.Serial) + } + return nil, err + } + + return status, nil +} + +// FQDNSetTimestampsForWindow returns the issuance timestamps for each +// certificate, issued for a set of identifiers, during a given window of time, +// starting from the most recent issuance. +// +// If req.Limit is nonzero, it returns only the most recent `Limit` results +func (ssa *SQLStorageAuthorityRO) FQDNSetTimestampsForWindow(ctx context.Context, req *sapb.CountFQDNSetsRequest) (*sapb.Timestamps, error) { + idents := identifier.FromProtoSlice(req.Identifiers) + + if core.IsAnyNilOrZero(req.Window) || len(idents) == 0 { + return nil, errIncompleteRequest + } + limit := req.Limit + if limit == 0 { + limit = math.MaxInt64 + } + type row struct { + Issued time.Time + } + var rows []row + _, err := ssa.dbReadOnlyMap.Select( + ctx, + &rows, + `SELECT issued FROM fqdnSets + WHERE setHash = ? + AND issued > ? + ORDER BY issued DESC + LIMIT ?`, + core.HashIdentifiers(idents), + ssa.clk.Now().Add(-req.Window.AsDuration()), + limit, + ) + if err != nil { + return nil, err + } + + var results []*timestamppb.Timestamp + for _, i := range rows { + results = append(results, timestamppb.New(i.Issued)) + } + return &sapb.Timestamps{Timestamps: results}, nil +} + +// FQDNSetExists returns a bool indicating if one or more FQDN sets |names| +// exists in the database +func (ssa *SQLStorageAuthorityRO) FQDNSetExists(ctx context.Context, req *sapb.FQDNSetExistsRequest) (*sapb.Exists, error) { + idents := identifier.FromProtoSlice(req.Identifiers) + if len(idents) == 0 { + return nil, errIncompleteRequest + } + exists, err := ssa.checkFQDNSetExists(ctx, ssa.dbReadOnlyMap.SelectOne, idents) + if err != nil { + return nil, err + } + return &sapb.Exists{Exists: exists}, nil +} + +// oneSelectorFunc is a func type that matches both borp.Transaction.SelectOne +// and borp.DbMap.SelectOne. +type oneSelectorFunc func(ctx context.Context, holder any, query string, args ...any) error + +// checkFQDNSetExists uses the given oneSelectorFunc to check whether an fqdnSet +// for the given names exists. +func (ssa *SQLStorageAuthorityRO) checkFQDNSetExists(ctx context.Context, selector oneSelectorFunc, idents identifier.ACMEIdentifiers) (bool, error) { + namehash := core.HashIdentifiers(idents) + var exists bool + err := selector( + ctx, + &exists, + `SELECT EXISTS (SELECT id FROM fqdnSets WHERE setHash = ? LIMIT 1)`, + namehash, + ) + return exists, err +} + +// GetOrder is used to retrieve an already existing order object +func (ssa *SQLStorageAuthorityRO) GetOrder(ctx context.Context, req *sapb.OrderRequest) (*corepb.Order, error) { + if req == nil || req.Id == 0 { + return nil, errIncompleteRequest + } + + txn := func(tx db.Executor) (any, error) { + omObj, err := tx.Get(ctx, orderModel{}, req.Id) + if err != nil { + return nil, err + } + if omObj == nil { + return nil, berrors.NotFoundError("no order found for ID %d", req.Id) + } + + order, err := modelToOrder(omObj.(*orderModel)) + if err != nil { + return nil, err + } + + orderExp := order.Expires.AsTime() + if orderExp.Before(ssa.clk.Now()) { + return nil, berrors.NotFoundError("no order found for ID %d", req.Id) + } + + // Get the partial Authorization objects for the order + authzValidityInfo, err := getAuthorizationStatuses(ctx, tx, order.V2Authorizations) + // If there was an error getting the authorizations, return it immediately + if err != nil { + return nil, err + } + + var idents identifier.ACMEIdentifiers + for _, a := range authzValidityInfo { + idents = append(idents, identifier.ACMEIdentifier{Type: uintToIdentifierType[a.IdentifierType], Value: a.IdentifierValue}) + } + order.Identifiers = idents.ToProtoSlice() + + // Calculate the status for the order + status, err := statusForOrder(order, authzValidityInfo, ssa.clk.Now()) + if err != nil { + return nil, err + } + order.Status = status + + return order, nil + } + + output, err := db.WithTransaction(ctx, ssa.dbReadOnlyMap, txn) + if (db.IsNoRows(err) || errors.Is(err, berrors.NotFound)) && ssa.lagFactor != 0 { + // GetOrder is often called shortly after a new order is created, sometimes + // before the order or its associated rows have propagated to the read + // replica yet. If we get a NoRows, wait a little bit and retry, once. + ssa.clk.Sleep(ssa.lagFactor) + output, err = db.WithTransaction(ctx, ssa.dbReadOnlyMap, txn) + if err != nil { + if db.IsNoRows(err) || errors.Is(err, berrors.NotFound) { + ssa.lagFactorCounter.WithLabelValues("GetOrder", "notfound").Inc() + } else { + ssa.lagFactorCounter.WithLabelValues("GetOrder", "other").Inc() + } + } else { + ssa.lagFactorCounter.WithLabelValues("GetOrder", "found").Inc() + } + } + if err != nil { + return nil, err + } + + order, ok := output.(*corepb.Order) + if !ok { + return nil, fmt.Errorf("casting error in GetOrder") + } + + return order, nil +} + +// GetOrderForNames tries to find a **pending** or **ready** order with the +// exact set of names requested, associated with the given accountID. Only +// unexpired orders are considered. If no order meeting these requirements is +// found a nil corepb.Order pointer is returned. +func (ssa *SQLStorageAuthorityRO) GetOrderForNames(ctx context.Context, req *sapb.GetOrderForNamesRequest) (*corepb.Order, error) { + idents := identifier.FromProtoSlice(req.Identifiers) + + if req.AcctID == 0 || len(idents) == 0 { + return nil, errIncompleteRequest + } + + // Hash the names requested for lookup in the orderFqdnSets table + fqdnHash := core.HashIdentifiers(idents) + + // Find a possibly-suitable order. We don't include the account ID or order + // status in this query because there's no index that includes those, so + // including them could require the DB to scan extra rows. + // Instead, we select one unexpired order that matches the fqdnSet. If + // that order doesn't match the account ID or status we need, just return + // nothing. We use `ORDER BY expires ASC` because the index on + // (setHash, expires) is in ASC order. DESC would be slightly nicer from a + // user experience perspective but would be slow when there are many entries + // to sort. + // This approach works fine because in most cases there's only one account + // issuing for a given name. If there are other accounts issuing for the same + // name, it just means order reuse happens less often. + var result struct { + OrderID int64 + RegistrationID int64 + } + var err error + err = ssa.dbReadOnlyMap.SelectOne(ctx, &result, ` + SELECT orderID, registrationID + FROM orderFqdnSets + WHERE setHash = ? + AND expires > ? + ORDER BY expires ASC + LIMIT 1`, + fqdnHash, ssa.clk.Now()) + + if db.IsNoRows(err) { + return nil, berrors.NotFoundError("no order matching request found") + } else if err != nil { + return nil, err + } + + if result.RegistrationID != req.AcctID { + return nil, berrors.NotFoundError("no order matching request found") + } + + // Get the order + order, err := ssa.GetOrder(ctx, &sapb.OrderRequest{Id: result.OrderID}) + if err != nil { + return nil, err + } + // Only return a pending or ready order + if order.Status != string(core.StatusPending) && + order.Status != string(core.StatusReady) { + return nil, berrors.NotFoundError("no order matching request found") + } + return order, nil +} + +func (ssa *SQLStorageAuthorityRO) getAuthorizationsByID(ctx context.Context, ids []int64) (*sapb.Authorizations, error) { + selector, err := db.NewMappedSelector[authzModel](ssa.dbReadOnlyMap) + if err != nil { + return nil, fmt.Errorf("initializing db map: %w", err) + } + + clauses := fmt.Sprintf(`WHERE id IN (%s)`, db.QuestionMarks(len(ids))) + + var sliceOfAny []any + for _, id := range ids { + sliceOfAny = append(sliceOfAny, id) + } + rows, err := selector.QueryContext(ctx, clauses, sliceOfAny...) + if err != nil { + return nil, fmt.Errorf("reading db: %w", err) + } + + var ret []*corepb.Authorization + err = rows.ForEach(func(row *authzModel) error { + authz, err := modelToAuthzPB(*row) + if err != nil { + return err + } + ret = append(ret, authz) + return nil + }) + if err != nil { + return nil, fmt.Errorf("reading db: %w", err) + } + return &sapb.Authorizations{Authzs: ret}, nil +} + +// GetAuthorization2 returns the authz2 style authorization identified by the provided ID or an error. +// If no authorization is found matching the ID a berrors.NotFound type error is returned. +func (ssa *SQLStorageAuthorityRO) GetAuthorization2(ctx context.Context, req *sapb.AuthorizationID2) (*corepb.Authorization, error) { + if req.Id == 0 { + return nil, errIncompleteRequest + } + obj, err := ssa.dbReadOnlyMap.Get(ctx, authzModel{}, req.Id) + if db.IsNoRows(err) && ssa.lagFactor != 0 { + // GetAuthorization2 is often called shortly after a new order is created, + // sometimes before the order's associated authz rows have propagated to the + // read replica yet. If we get a NoRows, wait a little bit and retry, once. + ssa.clk.Sleep(ssa.lagFactor) + obj, err = ssa.dbReadOnlyMap.Get(ctx, authzModel{}, req.Id) + if err != nil { + if db.IsNoRows(err) { + ssa.lagFactorCounter.WithLabelValues("GetAuthorization2", "notfound").Inc() + } else { + ssa.lagFactorCounter.WithLabelValues("GetAuthorization2", "other").Inc() + } + } else { + ssa.lagFactorCounter.WithLabelValues("GetAuthorization2", "found").Inc() + } + } + if err != nil { + return nil, err + } + if obj == nil { + return nil, berrors.NotFoundError("authorization %d not found", req.Id) + } + return modelToAuthzPB(*(obj.(*authzModel))) +} + +// authzModelMapToPB converts a mapping of identifiers to authzModels into a +// protobuf authorizations map +func authzModelMapToPB(m map[identifier.ACMEIdentifier]authzModel) (*sapb.Authorizations, error) { + resp := &sapb.Authorizations{} + for _, v := range m { + authzPB, err := modelToAuthzPB(v) + if err != nil { + return nil, err + } + resp.Authzs = append(resp.Authzs, authzPB) + } + return resp, nil +} + +// CountPendingAuthorizations2 returns the number of pending, unexpired authorizations +// for the given registration. +func (ssa *SQLStorageAuthorityRO) CountPendingAuthorizations2(ctx context.Context, req *sapb.RegistrationID) (*sapb.Count, error) { + if req.Id == 0 { + return nil, errIncompleteRequest + } + + var count int64 + err := ssa.dbReadOnlyMap.SelectOne(ctx, &count, + `SELECT COUNT(*) FROM authz2 WHERE + registrationID = :regID AND + expires > :expires AND + status = :status`, + map[string]any{ + "regID": req.Id, + "expires": ssa.clk.Now(), + "status": statusUint(core.StatusPending), + }, + ) + if err != nil { + return nil, err + } + return &sapb.Count{Count: count}, nil +} + +// GetValidOrderAuthorizations2 is used to get all authorizations +// associated with the given Order ID. +// NOTE: The name is outdated. It does *not* filter out invalid or expired +// authorizations; that it left to the caller. It also ignores the RegID field +// of the input: ensuring that the returned authorizations match the same RegID +// as the Order is also left to the caller. This is because the caller is +// generally in a better position to provide insightful error messages, whereas +// simply omitting an authz from this method's response would leave the caller +// wondering why that authz was omitted. +func (ssa *SQLStorageAuthorityRO) GetValidOrderAuthorizations2(ctx context.Context, req *sapb.GetValidOrderAuthorizationsRequest) (*sapb.Authorizations, error) { + if core.IsAnyNilOrZero(req.Id) { + return nil, errIncompleteRequest + } + + om, err := ssa.dbReadOnlyMap.Get(ctx, &orderModel{}, req.Id) + if err != nil { + return nil, err + } + // Nonexistent orders should return no error, with an empty list of authorizations + if om == nil { + return &sapb.Authorizations{}, nil + } + + order, err := modelToOrder(om.(*orderModel)) + if err != nil { + return nil, err + } + + if len(order.V2Authorizations) == 0 { + return nil, fmt.Errorf("invalid order: no authorization IDs") + } + + // Fetch the fully-hydrated Authorization objects and return them. + authzs, err := ssa.getAuthorizationsByID(ctx, order.V2Authorizations) + if err != nil { + return nil, err + } + return authzs, nil +} + +// CountInvalidAuthorizations2 counts invalid authorizations for a user expiring +// in a given time range. +func (ssa *SQLStorageAuthorityRO) CountInvalidAuthorizations2(ctx context.Context, req *sapb.CountInvalidAuthorizationsRequest) (*sapb.Count, error) { + ident := identifier.FromProto(req.Identifier) + + if core.IsAnyNilOrZero(req.RegistrationID, ident, req.Range.Earliest, req.Range.Latest) { + return nil, errIncompleteRequest + } + + idType, ok := identifierTypeToUint[ident.ToProto().Type] + if !ok { + return nil, fmt.Errorf("unsupported identifier type %q", ident.ToProto().Type) + } + + var count int64 + err := ssa.dbReadOnlyMap.SelectOne( + ctx, + &count, + `SELECT COUNT(*) FROM authz2 WHERE + registrationID = :regID AND + status = :status AND + expires > :expiresEarliest AND + expires <= :expiresLatest AND + identifierType = :identType AND + identifierValue = :identValue`, + map[string]any{ + "regID": req.RegistrationID, + "identType": idType, + "identValue": ident.Value, + "expiresEarliest": req.Range.Earliest.AsTime(), + "expiresLatest": req.Range.Latest.AsTime(), + "status": statusUint(core.StatusInvalid), + }, + ) + if err != nil { + return nil, err + } + return &sapb.Count{Count: count}, nil +} + +// GetValidAuthorizations2 returns a single valid authorization owned by the +// given account for all given identifiers. If more than one valid authorization +// exists, only the one with the latest expiry will be returned. +func (ssa *SQLStorageAuthorityRO) GetValidAuthorizations2(ctx context.Context, req *sapb.GetValidAuthorizationsRequest) (*sapb.Authorizations, error) { + idents := identifier.FromProtoSlice(req.Identifiers) + + if core.IsAnyNilOrZero(req, req.RegistrationID, idents, req.ValidUntil) { + return nil, errIncompleteRequest + } + + // The WHERE clause returned by this function does not contain any + // user-controlled strings; all user-controlled input ends up in the + // returned placeholder args. + identConditions, identArgs := buildIdentifierQueryConditions(idents) + query := fmt.Sprintf( + `SELECT %s FROM authz2 + USE INDEX (regID_identifier_status_expires_idx) + WHERE registrationID = ? AND + status = ? AND + expires > ? AND + (%s)`, + authzFields, + identConditions, + ) + + params := []any{ + req.RegistrationID, + statusUint(core.StatusValid), + req.ValidUntil.AsTime(), + } + params = append(params, identArgs...) + + var authzModels []authzModel + _, err := ssa.dbReadOnlyMap.Select( + ctx, + &authzModels, + query, + params..., + ) + if err != nil { + return nil, err + } + + if len(authzModels) == 0 { + return &sapb.Authorizations{}, nil + } + + // TODO(#8111): Consider reducing the volume of data in this map. + authzMap := make(map[identifier.ACMEIdentifier]authzModel, len(authzModels)) + for _, am := range authzModels { + if req.Profile != "" { + // Don't return authzs whose profile doesn't match that requested. + if am.CertificateProfileName == nil || *am.CertificateProfileName != req.Profile { + continue + } + } + // If there is an existing authorization in the map only replace it with one + // which has a later expiry. + identType, ok := uintToIdentifierType[am.IdentifierType] + if !ok { + return nil, fmt.Errorf("unrecognized identifier type encoding %d on authz id %d", am.IdentifierType, am.ID) + } + ident := identifier.ACMEIdentifier{Type: identType, Value: am.IdentifierValue} + existing, present := authzMap[ident] + if present && am.Expires.Before(existing.Expires) { + continue + } + authzMap[ident] = am + } + + return authzModelMapToPB(authzMap) +} + +// KeyBlocked checks if a key, indicated by a hash, is present in the blockedKeys table +func (ssa *SQLStorageAuthorityRO) KeyBlocked(ctx context.Context, req *sapb.SPKIHash) (*sapb.Exists, error) { + if req == nil || req.KeyHash == nil { + return nil, errIncompleteRequest + } + + var id int64 + err := ssa.dbReadOnlyMap.SelectOne(ctx, &id, `SELECT ID FROM blockedKeys WHERE keyHash = ?`, req.KeyHash) + if err != nil { + if db.IsNoRows(err) { + return &sapb.Exists{Exists: false}, nil + } + return nil, err + } + + return &sapb.Exists{Exists: true}, nil +} + +// IncidentsForSerial queries each active incident table and returns every +// incident that currently impacts `req.Serial`. +func (ssa *SQLStorageAuthorityRO) IncidentsForSerial(ctx context.Context, req *sapb.Serial) (*sapb.Incidents, error) { + if req == nil { + return nil, errIncompleteRequest + } + + var activeIncidents []incidentModel + _, err := ssa.dbReadOnlyMap.Select(ctx, &activeIncidents, `SELECT * FROM incidents WHERE enabled = 1`) + if err != nil { + if db.IsNoRows(err) { + return &sapb.Incidents{}, nil + } + return nil, err + } + + var incidentsForSerial []*sapb.Incident + for _, i := range activeIncidents { + var count int + err := ssa.dbIncidentsMap.SelectOne(ctx, &count, fmt.Sprintf("SELECT COUNT(*) FROM %s WHERE serial = ?", + i.SerialTable), req.Serial) + if err != nil { + if db.IsNoRows(err) { + continue + } + return nil, err + } + if count > 0 { + incident := incidentModelToPB(i) + incidentsForSerial = append(incidentsForSerial, &incident) + } + + } + if len(incidentsForSerial) == 0 { + return &sapb.Incidents{}, nil + } + return &sapb.Incidents{Incidents: incidentsForSerial}, nil +} + +// SerialsForIncident queries the provided incident table and returns the +// resulting rows as a stream of `*sapb.IncidentSerial`s. An `io.EOF` error +// signals that there are no more serials to send. If the incident table in +// question contains zero rows, only an `io.EOF` error is returned. The +// IncidentSerial messages returned may have the zero-value for their OrderID, +// RegistrationID, and LastNoticeSent fields, if those are NULL in the database. +func (ssa *SQLStorageAuthorityRO) SerialsForIncident(req *sapb.SerialsForIncidentRequest, stream grpc.ServerStreamingServer[sapb.IncidentSerial]) error { + if req.IncidentTable == "" { + return errIncompleteRequest + } + + // Check that `req.IncidentTable` is a valid incident table name. + if !validIncidentTableRegexp.MatchString(req.IncidentTable) { + return fmt.Errorf("malformed table name %q", req.IncidentTable) + } + + selector, err := db.NewMappedSelector[incidentSerialModel](ssa.dbIncidentsMap) + if err != nil { + return fmt.Errorf("initializing db map: %w", err) + } + + rows, err := selector.QueryFrom(stream.Context(), req.IncidentTable, "") + if err != nil { + return fmt.Errorf("starting db query: %w", err) + } + + return rows.ForEach(func(row *incidentSerialModel) error { + // Scan the row into the model. Note: the fields must be passed in the + // same order as the columns returned by the query above. + ism, err := rows.Get() + if err != nil { + return err + } + + ispb := &sapb.IncidentSerial{ + Serial: ism.Serial, + } + if ism.RegistrationID != nil { + ispb.RegistrationID = *ism.RegistrationID + } + if ism.OrderID != nil { + ispb.OrderID = *ism.OrderID + } + if ism.LastNoticeSent != nil { + ispb.LastNoticeSent = timestamppb.New(*ism.LastNoticeSent) + } + + return stream.Send(ispb) + }) +} + +// GetRevokedCertsByShard returns revoked certificates by explicit sharding. +// +// It returns all unexpired certificates from the revokedCertificates table with the given +// shardIdx. It limits the results those revoked before req.RevokedBefore. +func (ssa *SQLStorageAuthorityRO) GetRevokedCertsByShard(req *sapb.GetRevokedCertsByShardRequest, stream grpc.ServerStreamingServer[corepb.CRLEntry]) error { + if core.IsAnyNilOrZero(req.ShardIdx, req.IssuerNameID, req.RevokedBefore, req.ExpiresAfter) { + return errIncompleteRequest + } + + atTime := req.RevokedBefore.AsTime() + + clauses := ` + WHERE issuerID = ? + AND shardIdx = ? + AND notAfterHour >= ?` + params := []any{ + req.IssuerNameID, + req.ShardIdx, + // Round the expiry down to the nearest hour, to take advantage of our + // smaller index while still capturing at least as many certs as intended. + req.ExpiresAfter.AsTime().Truncate(time.Hour), + } + + selector, err := db.NewMappedSelector[revokedCertModel](ssa.dbReadOnlyMap) + if err != nil { + return fmt.Errorf("initializing db map: %w", err) + } + + rows, err := selector.QueryContext(stream.Context(), clauses, params...) + if err != nil { + return fmt.Errorf("reading db: %w", err) + } + + return rows.ForEach(func(row *revokedCertModel) error { + // Double-check that the cert wasn't revoked between the time at which we're + // constructing this snapshot CRL and right now. If the cert was revoked + // at-or-after the "atTime", we'll just include it in the next generation + // of CRLs. + if row.RevokedDate.After(atTime) || row.RevokedDate.Equal(atTime) { + return nil + } + + return stream.Send(&corepb.CRLEntry{ + Serial: row.Serial, + Reason: int32(row.RevokedReason), //nolint: gosec // Revocation reasons are guaranteed to be small, no risk of overflow. + RevokedAt: timestamppb.New(row.RevokedDate), + }) + }) +} + +// Health implements the grpc.checker interface. +func (ssa *SQLStorageAuthorityRO) Health(ctx context.Context) error { + err := ssa.dbReadOnlyMap.SelectOne(ctx, new(int), "SELECT 1") + if err != nil { + return err + } + return nil +} + +// ReplacementOrderExists returns whether a valid replacement order exists for +// the given certificate serial number. An existing but expired or otherwise +// invalid replacement order is not considered to exist. +func (ssa *SQLStorageAuthorityRO) ReplacementOrderExists(ctx context.Context, req *sapb.Serial) (*sapb.Exists, error) { + if req == nil || req.Serial == "" { + return nil, errIncompleteRequest + } + + var replacement replacementOrderModel + err := ssa.dbReadOnlyMap.SelectOne( + ctx, + &replacement, + "SELECT * FROM replacementOrders WHERE serial = ? LIMIT 1", + req.Serial, + ) + if err != nil { + if db.IsNoRows(err) { + // No replacement order exists. + return &sapb.Exists{Exists: false}, nil + } + return nil, err + } + if replacement.Replaced { + // Certificate has already been replaced. + return &sapb.Exists{Exists: true}, nil + } + if replacement.OrderExpires.Before(ssa.clk.Now()) { + // The existing replacement order has expired. + return &sapb.Exists{Exists: false}, nil + } + + // Pull the replacement order so we can inspect its status. + replacementOrder, err := ssa.GetOrder(ctx, &sapb.OrderRequest{Id: replacement.OrderID}) + if err != nil { + if errors.Is(err, berrors.NotFound) { + // The existing replacement order has been deleted. This should + // never happen. + ssa.log.Errf("replacement order %d for serial %q not found", replacement.OrderID, req.Serial) + return &sapb.Exists{Exists: false}, nil + } + } + + switch replacementOrder.Status { + case string(core.StatusPending), string(core.StatusReady), string(core.StatusProcessing), string(core.StatusValid): + // An existing replacement order is either still being worked on or has + // already been finalized. + return &sapb.Exists{Exists: true}, nil + + case string(core.StatusInvalid): + // The existing replacement order cannot be finalized. The requester + // should create a new replacement order. + return &sapb.Exists{Exists: false}, nil + + default: + // Replacement order is in an unknown state. This should never happen. + return nil, fmt.Errorf("unknown replacement order status: %q", replacementOrder.Status) + } +} + +// GetSerialsByKey returns a stream of serials for all unexpired certificates +// whose public key matches the given SPKIHash. This is useful for revoking all +// certificates affected by a key compromise. +func (ssa *SQLStorageAuthorityRO) GetSerialsByKey(req *sapb.SPKIHash, stream grpc.ServerStreamingServer[sapb.Serial]) error { + clauses := ` + WHERE keyHash = ? + AND certNotAfter > ?` + params := []any{ + req.KeyHash, + ssa.clk.Now(), + } + + selector, err := db.NewMappedSelector[keyHashModel](ssa.dbReadOnlyMap) + if err != nil { + return fmt.Errorf("initializing db map: %w", err) + } + + rows, err := selector.QueryContext(stream.Context(), clauses, params...) + if err != nil { + return fmt.Errorf("reading db: %w", err) + } + + return rows.ForEach(func(row *keyHashModel) error { + return stream.Send(&sapb.Serial{Serial: row.CertSerial}) + }) +} + +// GetSerialsByAccount returns a stream of all serials for all unexpired +// certificates issued to the given RegID. This is useful for revoking all of +// an account's certs upon their request. +func (ssa *SQLStorageAuthorityRO) GetSerialsByAccount(req *sapb.RegistrationID, stream grpc.ServerStreamingServer[sapb.Serial]) error { + clauses := ` + WHERE registrationID = ? + AND expires > ?` + params := []any{ + req.Id, + ssa.clk.Now(), + } + + selector, err := db.NewMappedSelector[recordedSerialModel](ssa.dbReadOnlyMap) + if err != nil { + return fmt.Errorf("initializing db map: %w", err) + } + + rows, err := selector.QueryContext(stream.Context(), clauses, params...) + if err != nil { + return fmt.Errorf("reading db: %w", err) + } + + return rows.ForEach(func(row *recordedSerialModel) error { + return stream.Send(&sapb.Serial{Serial: row.Serial}) + }) +} + +// CheckIdentifiersPaused takes a slice of identifiers and returns a slice of +// the first 15 identifier values which are currently paused for the provided +// account. If no matches are found, an empty slice is returned. +func (ssa *SQLStorageAuthorityRO) CheckIdentifiersPaused(ctx context.Context, req *sapb.PauseRequest) (*sapb.Identifiers, error) { + if core.IsAnyNilOrZero(req.RegistrationID, req.Identifiers) { + return nil, errIncompleteRequest + } + + idents, err := newIdentifierModelsFromPB(req.Identifiers) + if err != nil { + return nil, err + } + + if len(idents) == 0 { + // No identifier values to check. + return nil, nil + } + + identsByType := map[uint8][]string{} + for _, id := range idents { + identsByType[id.Type] = append(identsByType[id.Type], id.Value) + } + + // Build a query to retrieve up to 15 paused identifiers using OR clauses + // for conditions specific to each type. This approach handles mixed + // identifier types in a single query. Assuming 3 DNS identifiers and 1 IP + // identifier, the resulting query would look like: + // + // SELECT identifierType, identifierValue + // FROM paused WHERE registrationID = ? AND + // unpausedAt IS NULL AND + // ((identifierType = ? AND identifierValue IN (?, ?, ?)) OR + // (identifierType = ? AND identifierValue IN (?))) + // LIMIT 15 + // + // Corresponding args array for placeholders: [, 0, "example.com", + // "example.net", "example.org", 1, "1.2.3.4"] + + var conditions []string + args := []any{req.RegistrationID} + for idType, values := range identsByType { + conditions = append(conditions, + fmt.Sprintf("identifierType = ? AND identifierValue IN (%s)", + db.QuestionMarks(len(values)), + ), + ) + args = append(args, idType) + for _, value := range values { + args = append(args, value) + } + } + + query := fmt.Sprintf(` + SELECT identifierType, identifierValue + FROM paused + WHERE registrationID = ? AND unpausedAt IS NULL AND (%s) LIMIT 15`, + strings.Join(conditions, " OR ")) + + var matches []identifierModel + _, err = ssa.dbReadOnlyMap.Select(ctx, &matches, query, args...) + if err != nil && !db.IsNoRows(err) { + // Error querying the database. + return nil, err + } + + return newPBFromIdentifierModels(matches) +} + +// GetPausedIdentifiers returns a slice of paused identifiers for the provided +// account. If no paused identifiers are found, an empty slice is returned. The +// results are limited to the first 15 paused identifiers. +func (ssa *SQLStorageAuthorityRO) GetPausedIdentifiers(ctx context.Context, req *sapb.RegistrationID) (*sapb.Identifiers, error) { + if core.IsAnyNilOrZero(req.Id) { + return nil, errIncompleteRequest + } + + var matches []identifierModel + _, err := ssa.dbReadOnlyMap.Select(ctx, &matches, ` + SELECT identifierType, identifierValue + FROM paused + WHERE + registrationID = ? AND + unpausedAt IS NULL + LIMIT 15`, + req.Id, + ) + if err != nil && !db.IsNoRows(err) { + return nil, err + } + + return newPBFromIdentifierModels(matches) +} + +// GetRateLimitOverride retrieves a rate limit override for the given bucket key +// and limit. If no override is found, a NotFound error is returned. +func (ssa *SQLStorageAuthorityRO) GetRateLimitOverride(ctx context.Context, req *sapb.GetRateLimitOverrideRequest) (*sapb.RateLimitOverrideResponse, error) { + if core.IsAnyNilOrZero(req, req.LimitEnum, req.BucketKey) { + return nil, errIncompleteRequest + } + + obj, err := ssa.dbReadOnlyMap.Get(ctx, overrideModel{}, req.LimitEnum, req.BucketKey) + if db.IsNoRows(err) { + return nil, berrors.NotFoundError( + "no rate limit override found for limit %d and bucket key %s", + req.LimitEnum, + req.BucketKey, + ) + } + if err != nil { + return nil, err + } + row := obj.(*overrideModel) + + return &sapb.RateLimitOverrideResponse{ + Override: newPBFromOverrideModel(row), + Enabled: row.Enabled, + UpdatedAt: timestamppb.New(row.UpdatedAt), + }, nil +} + +// GetEnabledRateLimitOverrides retrieves all enabled rate limit overrides from +// the database. The results are returned as a stream. If no enabled overrides +// are found, an empty stream is returned. +func (ssa *SQLStorageAuthorityRO) GetEnabledRateLimitOverrides(_ *emptypb.Empty, stream sapb.StorageAuthorityReadOnly_GetEnabledRateLimitOverridesServer) error { + selector, err := db.NewMappedSelector[overrideModel](ssa.dbReadOnlyMap) + if err != nil { + return fmt.Errorf("initializing selector: %w", err) + } + + rows, err := selector.QueryContext(stream.Context(), "WHERE enabled = true") + if err != nil { + return fmt.Errorf("querying enabled overrides: %w", err) + } + + return rows.ForEach(func(m *overrideModel) error { + return stream.Send(&sapb.RateLimitOverrideResponse{ + Override: newPBFromOverrideModel(m), + Enabled: m.Enabled, + UpdatedAt: timestamppb.New(m.UpdatedAt), + }) + }) +} diff --git a/sa/satest/satest.go b/sa/satest/satest.go index 4fe811a637f..bb92125a4c2 100644 --- a/sa/satest/satest.go +++ b/sa/satest/satest.go @@ -2,10 +2,11 @@ package satest import ( "context" - "net" "testing" "time" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/letsencrypt/boulder/core" corepb "github.com/letsencrypt/boulder/core/proto" sapb "github.com/letsencrypt/boulder/sa/proto" @@ -15,16 +16,13 @@ import ( // SA using GoodKey under the hood. This is used by various non-SA tests // to initialize the a registration for the test to reference. func CreateWorkingRegistration(t *testing.T, sa sapb.StorageAuthorityClient) *corepb.Registration { - initialIP, _ := net.ParseIP("88.77.66.11").MarshalText() reg, err := sa.NewRegistration(context.Background(), &corepb.Registration{ Key: []byte(`{ "kty": "RSA", "n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT-O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqVwGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj-oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuCLqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5gHdrNP5zw", "e": "AQAB" }`), - Contact: []string{"mailto:foo@example.com"}, - InitialIP: initialIP, - CreatedAt: time.Date(2003, 5, 10, 0, 0, 0, 0, time.UTC).UnixNano(), + CreatedAt: timestamppb.New(time.Date(2003, 5, 10, 0, 0, 0, 0, time.UTC)), Status: string(core.StatusValid), }) if err != nil { diff --git a/sa/sysvars.go b/sa/sysvars.go new file mode 100644 index 00000000000..6039c82e7f3 --- /dev/null +++ b/sa/sysvars.go @@ -0,0 +1,235 @@ +package sa + +import ( + "fmt" + "regexp" +) + +var ( + checkStringQuoteRE = regexp.MustCompile(`^'[0-9A-Za-z_\-=:]+'$`) + checkIntRE = regexp.MustCompile(`^\d+$`) + checkImproperIntRE = regexp.MustCompile(`^'\d+'$`) + checkNumericRE = regexp.MustCompile(`^\d+(\.\d+)?$`) + checkBooleanRE = regexp.MustCompile(`^([0-1])|(?i)(true|false)|(?i)(on|off)`) +) + +// checkMariaDBSystemVariables validates a MariaDB config passed in via SA +// setDefault or DSN. This manually curated list of system variables was +// partially generated by a tool in issue #6687. An overview of the validations +// performed are: +// +// - Correct quoting for strings and string enums prevent future +// problems such as PR #6683 from occurring. +// +// - Regex validation is performed for the various booleans, floats, integers, and strings. +// +// Only session scoped variables should be included. A session variable is one +// that affects the current session only. Passing a session variable that only +// works in the global scope causes database connection error 1045. +// https://mariadb.com/kb/en/set/#global-session +func checkMariaDBSystemVariables(name string, value string) error { + // System variable names will be indexed into the appropriate hash sets + // below and can possibly exist in several sets. + + // Check the list of currently known MariaDB string type system variables + // and determine if the value is a properly formatted string e.g. + // sql_mode='STRICT_TABLES' + mariaDBStringTypes := map[string]struct{}{ + "character_set_client": {}, + "character_set_connection": {}, + "character_set_database": {}, + "character_set_filesystem": {}, + "character_set_results": {}, + "character_set_server": {}, + "collation_connection": {}, + "collation_database": {}, + "collation_server": {}, + "debug/debug_dbug": {}, + "debug_sync": {}, + "enforce_storage_engine": {}, + "external_user": {}, + "lc_messages": {}, + "lc_time_names": {}, + "old_alter_table": {}, + "old_mode": {}, + "optimizer_switch": {}, + "proxy_user": {}, + "session_track_system_variables": {}, + "sql_mode": {}, + "time_zone": {}, + } + + if _, found := mariaDBStringTypes[name]; found { + if checkStringQuoteRE.FindString(value) != value { + return fmt.Errorf("%s=%s string is not properly quoted", name, value) + } + return nil + } + + // MariaDB numerics which may either be integers or floats. + // https://mariadb.com/kb/en/numeric-data-type-overview/ + mariaDBNumericTypes := map[string]struct{}{ + "bulk_insert_buffer_size": {}, + "default_week_format": {}, + "eq_range_index_dive_limit": {}, + "error_count": {}, + "expensive_subquery_limit": {}, + "group_concat_max_len": {}, + "histogram_size": {}, + "idle_readonly_transaction_timeout": {}, + "idle_transaction_timeout": {}, + "idle_write_transaction_timeout": {}, + "in_predicate_conversion_threshold": {}, + "insert_id": {}, + "interactive_timeout": {}, + "join_buffer_size": {}, + "join_buffer_space_limit": {}, + "join_cache_level": {}, + "last_insert_id": {}, + "lock_wait_timeout": {}, + "log_slow_min_examined_row_limit": {}, + "log_slow_query_time": {}, + "log_slow_rate_limit": {}, + "long_query_time": {}, + "max_allowed_packet": {}, + "max_delayed_threads": {}, + "max_digest_length": {}, + "max_error_count": {}, + "max_heap_table_size": {}, + "max_join_size": {}, + "max_length_for_sort_data": {}, + "max_recursive_iterations": {}, + "max_rowid_filter_size": {}, + "max_seeks_for_key": {}, + "max_session_mem_used": {}, + "max_sort_length": {}, + "max_sp_recursion_depth": {}, + "max_statement_time": {}, + "max_user_connections": {}, + "min_examined_row_limit": {}, + "mrr_buffer_size": {}, + "net_buffer_length": {}, + "net_read_timeout": {}, + "net_retry_count": {}, + "net_write_timeout": {}, + "optimizer_extra_pruning_depth": {}, + "optimizer_max_sel_arg_weight": {}, + "optimizer_prune_level": {}, + "optimizer_search_depth": {}, + "optimizer_selectivity_sampling_limit": {}, + "optimizer_trace_max_mem_size": {}, + "optimizer_use_condition_selectivity": {}, + "preload_buffer_size": {}, + "profiling_history_size": {}, + "progress_report_time": {}, + "pseudo_slave_mode": {}, + "pseudo_thread_id": {}, + "query_alloc_block_size": {}, + "query_prealloc_size": {}, + "rand_seed1": {}, + "range_alloc_block_size": {}, + "read_rnd_buffer_size": {}, + "rowid_merge_buff_size": {}, + "sql_select_limit": {}, + "tmp_disk_table_size": {}, + "tmp_table_size": {}, + "transaction_alloc_block_size": {}, + "transaction_prealloc_size": {}, + "wait_timeout": {}, + "warning_count": {}, + } + + if _, found := mariaDBNumericTypes[name]; found { + if checkNumericRE.FindString(value) != value { + return fmt.Errorf("%s=%s requires a numeric value, but is not formatted like a number", name, value) + } + return nil + } + + // Certain MariaDB enums can have both string and integer values. + mariaDBIntEnumTypes := map[string]struct{}{ + "completion_type": {}, + "query_cache_type": {}, + } + + mariaDBStringEnumTypes := map[string]struct{}{ + "completion_type": {}, + "default_regex_flags": {}, + "default_storage_engine": {}, + "default_tmp_storage_engine": {}, + "histogram_type": {}, + "log_slow_filter": {}, + "log_slow_verbosity": {}, + "optimizer_trace": {}, + "query_cache_type": {}, + "session_track_transaction_info": {}, + "transaction_isolation": {}, + "tx_isolation": {}, + "use_stat_tables": {}, + } + + // Check the list of currently known MariaDB enumeration type system + // variables and determine if the value is either: + // 1) A properly formatted integer e.g. completion_type=1 + if _, found := mariaDBIntEnumTypes[name]; found { + if checkIntRE.FindString(value) == value { + return nil + } + if checkImproperIntRE.FindString(value) == value { + return fmt.Errorf("%s=%s integer enum is quoted, but should not be", name, value) + } + } + + // 2) A properly formatted string e.g. completion_type='CHAIN' + if _, found := mariaDBStringEnumTypes[name]; found { + if checkStringQuoteRE.FindString(value) != value { + return fmt.Errorf("%s=%s string enum is not properly quoted", name, value) + } + return nil + } + + // MariaDB booleans can be (0, false) or (1, true). + // https://mariadb.com/kb/en/boolean/ + mariaDBBooleanTypes := map[string]struct{}{ + "autocommit": {}, + "big_tables": {}, + "check_constraint_checks": {}, + "foreign_key_checks": {}, + "in_transaction": {}, + "keep_files_on_create": {}, + "log_slow_query": {}, + "low_priority_updates": {}, + "old": {}, + "old_passwords": {}, + "profiling": {}, + "query_cache_strip_comments": {}, + "query_cache_wlock_invalidate": {}, + "session_track_schema": {}, + "session_track_state_change": {}, + "slow_query_log": {}, + "sql_auto_is_null": {}, + "sql_big_selects": {}, + "sql_buffer_result": {}, + "sql_if_exists": {}, + "sql_log_off": {}, + "sql_notes": {}, + "sql_quote_show_create": {}, + "sql_safe_updates": {}, + "sql_warnings": {}, + "standard_compliant_cte": {}, + "tcp_nodelay": {}, + "transaction_read_only": {}, + "tx_read_only": {}, + "unique_checks": {}, + "updatable_views_with_limit": {}, + } + + if _, found := mariaDBBooleanTypes[name]; found { + if checkBooleanRE.FindString(value) != value { + return fmt.Errorf("%s=%s expected boolean value", name, value) + } + return nil + } + + return fmt.Errorf("%s=%s was unexpected", name, value) +} diff --git a/sa/sysvars_test.go b/sa/sysvars_test.go new file mode 100644 index 00000000000..8c39b62350c --- /dev/null +++ b/sa/sysvars_test.go @@ -0,0 +1,46 @@ +package sa + +import ( + "testing" + + "github.com/letsencrypt/boulder/test" +) + +func TestCheckMariaDBSystemVariables(t *testing.T) { + type testCase struct { + key string + value string + expectErr string + } + + for _, tc := range []testCase{ + {"sql_select_limit", "'0.1", "requires a numeric value"}, + {"max_statement_time", "0", ""}, + {"myBabies", "kids_I_tell_ya", "was unexpected"}, + {"sql_mode", "'STRICT_ALL_TABLES", "string is not properly quoted"}, + {"sql_mode", "%27STRICT_ALL_TABLES%27", "string is not properly quoted"}, + {"completion_type", "1", ""}, + {"completion_type", "'2'", "integer enum is quoted, but should not be"}, + {"completion_type", "RELEASE", "string enum is not properly quoted"}, + {"completion_type", "'CHAIN'", ""}, + {"autocommit", "0", ""}, + {"check_constraint_checks", "1", ""}, + {"log_slow_query", "true", ""}, + {"foreign_key_checks", "false", ""}, + {"sql_warnings", "TrUe", ""}, + {"tx_read_only", "FalSe", ""}, + {"sql_notes", "on", ""}, + {"tcp_nodelay", "off", ""}, + {"autocommit", "2", "expected boolean value"}, + } { + t.Run(tc.key, func(t *testing.T) { + err := checkMariaDBSystemVariables(tc.key, tc.value) + if tc.expectErr == "" { + test.AssertNotError(t, err, "Unexpected error received") + } else { + test.AssertError(t, err, "Error expected, but not found") + test.AssertContains(t, err.Error(), tc.expectErr) + } + }) + } +} diff --git a/sa/test-cert.der b/sa/test-cert.der deleted file mode 100644 index 37eb6b82a84..00000000000 Binary files a/sa/test-cert.der and /dev/null differ diff --git a/sa/test-cert2.der b/sa/test-cert2.der deleted file mode 100644 index 7600377d7a3..00000000000 Binary files a/sa/test-cert2.der and /dev/null differ diff --git a/sa/type-converter.go b/sa/type-converter.go index f4ec978e239..2ec4ad27a05 100644 --- a/sa/type-converter.go +++ b/sa/type-converter.go @@ -4,19 +4,21 @@ import ( "encoding/json" "errors" "fmt" + "time" - "github.com/go-gorp/gorp/v3" - jose "gopkg.in/square/go-jose.v2" + "github.com/go-jose/go-jose/v4" + + "github.com/letsencrypt/borp" "github.com/letsencrypt/boulder/core" "github.com/letsencrypt/boulder/identifier" ) -// BoulderTypeConverter is used by Gorp for storing objects in DB. +// BoulderTypeConverter is used by borp for storing objects in DB. type BoulderTypeConverter struct{} // ToDb converts a Boulder object to one suitable for the DB representation. -func (tc BoulderTypeConverter) ToDb(val interface{}) (interface{}, error) { +func (tc BoulderTypeConverter) ToDb(val any) (any, error) { switch t := val.(type) { case identifier.ACMEIdentifier, []core.Challenge, []string, [][]int: jsonBytes, err := json.Marshal(t) @@ -34,16 +36,28 @@ func (tc BoulderTypeConverter) ToDb(val interface{}) (interface{}, error) { return string(t), nil case core.OCSPStatus: return string(t), nil + // Time types get truncated to the nearest second. Given our DB schema, + // only seconds are stored anyhow. Avoiding sending queries with sub-second + // precision may help the query planner avoid pathological cases when + // querying against indexes on time fields (#5437). + case time.Time: + return t.Truncate(time.Second), nil + case *time.Time: + if t == nil { + return nil, nil + } + newT := t.Truncate(time.Second) + return &newT, nil default: return val, nil } } // FromDb converts a DB representation back into a Boulder object. -func (tc BoulderTypeConverter) FromDb(target interface{}) (gorp.CustomScanner, bool) { +func (tc BoulderTypeConverter) FromDb(target any) (borp.CustomScanner, bool) { switch target.(type) { case *identifier.ACMEIdentifier, *[]core.Challenge, *[]string, *[][]int: - binder := func(holder, target interface{}) error { + binder := func(holder, target any) error { s, ok := holder.(*string) if !ok { return errors.New("FromDb: Unable to convert *string") @@ -58,9 +72,9 @@ func (tc BoulderTypeConverter) FromDb(target interface{}) (gorp.CustomScanner, b } return nil } - return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true + return borp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true case *jose.JSONWebKey: - binder := func(holder, target interface{}) error { + binder := func(holder, target any) error { s, ok := holder.(*string) if !ok { return fmt.Errorf("FromDb: Unable to convert %T to *string", holder) @@ -82,9 +96,9 @@ func (tc BoulderTypeConverter) FromDb(target interface{}) (gorp.CustomScanner, b } return nil } - return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true + return borp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true case *core.AcmeStatus: - binder := func(holder, target interface{}) error { + binder := func(holder, target any) error { s, ok := holder.(*string) if !ok { return fmt.Errorf("FromDb: Unable to convert %T to *string", holder) @@ -97,9 +111,9 @@ func (tc BoulderTypeConverter) FromDb(target interface{}) (gorp.CustomScanner, b *st = core.AcmeStatus(*s) return nil } - return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true + return borp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true case *core.OCSPStatus: - binder := func(holder, target interface{}) error { + binder := func(holder, target any) error { s, ok := holder.(*string) if !ok { return fmt.Errorf("FromDb: Unable to convert %T to *string", holder) @@ -112,8 +126,8 @@ func (tc BoulderTypeConverter) FromDb(target interface{}) (gorp.CustomScanner, b *st = core.OCSPStatus(*s) return nil } - return gorp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true + return borp.CustomScanner{Holder: new(string), Target: target, Binder: binder}, true default: - return gorp.CustomScanner{}, false + return borp.CustomScanner{}, false } } diff --git a/sa/type-converter_test.go b/sa/type-converter_test.go index 01457e67e4c..8ca7d35d199 100644 --- a/sa/type-converter_test.go +++ b/sa/type-converter_test.go @@ -3,12 +3,13 @@ package sa import ( "encoding/json" "testing" + "time" "github.com/letsencrypt/boulder/core" "github.com/letsencrypt/boulder/identifier" "github.com/letsencrypt/boulder/test" - jose "gopkg.in/square/go-jose.v2" + "github.com/go-jose/go-jose/v4" ) const JWK1JSON = `{ @@ -48,7 +49,7 @@ func TestAcmeIdentifierBadJSON(t *testing.T) { test.AssertError(t, err, "expected error from scanner.Binder") var badJSONErr errBadJSON test.AssertErrorWraps(t, err, &badJSONErr) - test.AssertEquals(t, string(badJSONErr.json), string(badJSON)) + test.AssertEquals(t, string(badJSONErr.json), badJSON) } func TestJSONWebKey(t *testing.T) { @@ -85,7 +86,7 @@ func TestJSONWebKeyBadJSON(t *testing.T) { test.AssertError(t, err, "expected error from scanner.Binder") var badJSONErr errBadJSON test.AssertErrorWraps(t, err, &badJSONErr) - test.AssertEquals(t, string(badJSONErr.json), string(badJSON)) + test.AssertEquals(t, string(badJSONErr.json), badJSON) } func TestAcmeStatus(t *testing.T) { @@ -151,3 +152,26 @@ func TestStringSlice(t *testing.T) { test.AssertNotError(t, err, "failed to scanner.Binder") test.AssertMarshaledEquals(t, au, out) } + +func TestTimeTruncate(t *testing.T) { + tc := BoulderTypeConverter{} + preciseTime := time.Date(2024, 06, 20, 00, 00, 00, 999999999, time.UTC) + dbTime, err := tc.ToDb(preciseTime) + test.AssertNotError(t, err, "Could not ToDb") + dbTimeT, ok := dbTime.(time.Time) + test.Assert(t, ok, "Could not convert dbTime to time.Time") + test.Assert(t, dbTimeT.Nanosecond() == 0, "Nanosecond not truncated") + + dbTimePtr, err := tc.ToDb(&preciseTime) + test.AssertNotError(t, err, "Could not ToDb") + dbTimePtrT, ok := dbTimePtr.(*time.Time) + test.Assert(t, ok, "Could not convert dbTimePtr to *time.Time") + test.Assert(t, dbTimePtrT.Nanosecond() == 0, "Nanosecond not truncated") + + var dbTimePtrNil *time.Time + shouldBeNil, err := tc.ToDb(dbTimePtrNil) + test.AssertNotError(t, err, "Could not ToDb") + if shouldBeNil != nil { + t.Errorf("Expected nil, got %v", shouldBeNil) + } +} diff --git a/sa/www.eff.org.der b/sa/www.eff.org.der deleted file mode 100644 index 3b5f75b8c75..00000000000 Binary files a/sa/www.eff.org.der and /dev/null differ diff --git a/salesforce/cache.go b/salesforce/cache.go new file mode 100644 index 00000000000..2ec666d125a --- /dev/null +++ b/salesforce/cache.go @@ -0,0 +1,92 @@ +package salesforce + +import ( + "crypto/sha256" + "encoding/hex" + "sync" + + "github.com/golang/groupcache/lru" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +type EmailCache struct { + sync.Mutex + cache *lru.Cache + requests *prometheus.CounterVec +} + +func NewHashedEmailCache(maxEntries int, stats prometheus.Registerer) *EmailCache { + requests := promauto.With(stats).NewCounterVec(prometheus.CounterOpts{ + Name: "email_cache_requests", + }, []string{"status"}) + + return &EmailCache{ + cache: lru.New(maxEntries), + requests: requests, + } +} + +func hashEmail(email string) string { + sum := sha256.Sum256([]byte(email)) + return hex.EncodeToString(sum[:]) +} + +func (c *EmailCache) Seen(email string) bool { + if c == nil { + // If the cache is nil we assume it was not configured. + return false + } + + hash := hashEmail(email) + + c.Lock() + defer c.Unlock() + + _, ok := c.cache.Get(hash) + if !ok { + c.requests.WithLabelValues("miss").Inc() + return false + } + + c.requests.WithLabelValues("hit").Inc() + return true +} + +func (c *EmailCache) Remove(email string) { + if c == nil { + // If the cache is nil we assume it was not configured. + return + } + + hash := hashEmail(email) + + c.Lock() + defer c.Unlock() + + c.cache.Remove(hash) +} + +// StoreIfAbsent stores the email in the cache if it is not already present, as +// a single atomic operation. It returns true if the email was stored and false +// if it was already in the cache. If the cache is nil, true is always returned. +func (c *EmailCache) StoreIfAbsent(email string) bool { + if c == nil { + // If the cache is nil we assume it was not configured. + return true + } + + hash := hashEmail(email) + + c.Lock() + defer c.Unlock() + + _, ok := c.cache.Get(hash) + if ok { + c.requests.WithLabelValues("hit").Inc() + return false + } + c.cache.Add(hash, nil) + c.requests.WithLabelValues("miss").Inc() + return true +} diff --git a/salesforce/email/proto/emailexporter.pb.go b/salesforce/email/proto/emailexporter.pb.go new file mode 100644 index 00000000000..75d14972499 --- /dev/null +++ b/salesforce/email/proto/emailexporter.pb.go @@ -0,0 +1,279 @@ +// NOTE: This service is deprecated in favor of salesforce.Exporter. It must be +// kept in sync with salesforce.Exporter until we have fully migrated. +// +// TODO(#8410): Remove this service once we've fully migrated to +// salesforce.Exporter + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v3.20.1 +// source: emailexporter.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SendContactsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Emails []string `protobuf:"bytes,1,rep,name=emails,proto3" json:"emails,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SendContactsRequest) Reset() { + *x = SendContactsRequest{} + mi := &file_emailexporter_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SendContactsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendContactsRequest) ProtoMessage() {} + +func (x *SendContactsRequest) ProtoReflect() protoreflect.Message { + mi := &file_emailexporter_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendContactsRequest.ProtoReflect.Descriptor instead. +func (*SendContactsRequest) Descriptor() ([]byte, []int) { + return file_emailexporter_proto_rawDescGZIP(), []int{0} +} + +func (x *SendContactsRequest) GetEmails() []string { + if x != nil { + return x.Emails + } + return nil +} + +type SendCaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Origin string `protobuf:"bytes,1,opt,name=origin,proto3" json:"origin,omitempty"` + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + ContactEmail string `protobuf:"bytes,4,opt,name=contactEmail,proto3" json:"contactEmail,omitempty"` + Organization string `protobuf:"bytes,5,opt,name=organization,proto3" json:"organization,omitempty"` + AccountId string `protobuf:"bytes,6,opt,name=accountId,proto3" json:"accountId,omitempty"` + RateLimitName string `protobuf:"bytes,7,opt,name=rateLimitName,proto3" json:"rateLimitName,omitempty"` + RateLimitTier string `protobuf:"bytes,8,opt,name=rateLimitTier,proto3" json:"rateLimitTier,omitempty"` + UseCase string `protobuf:"bytes,9,opt,name=useCase,proto3" json:"useCase,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SendCaseRequest) Reset() { + *x = SendCaseRequest{} + mi := &file_emailexporter_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SendCaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendCaseRequest) ProtoMessage() {} + +func (x *SendCaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_emailexporter_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendCaseRequest.ProtoReflect.Descriptor instead. +func (*SendCaseRequest) Descriptor() ([]byte, []int) { + return file_emailexporter_proto_rawDescGZIP(), []int{1} +} + +func (x *SendCaseRequest) GetOrigin() string { + if x != nil { + return x.Origin + } + return "" +} + +func (x *SendCaseRequest) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *SendCaseRequest) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *SendCaseRequest) GetContactEmail() string { + if x != nil { + return x.ContactEmail + } + return "" +} + +func (x *SendCaseRequest) GetOrganization() string { + if x != nil { + return x.Organization + } + return "" +} + +func (x *SendCaseRequest) GetAccountId() string { + if x != nil { + return x.AccountId + } + return "" +} + +func (x *SendCaseRequest) GetRateLimitName() string { + if x != nil { + return x.RateLimitName + } + return "" +} + +func (x *SendCaseRequest) GetRateLimitTier() string { + if x != nil { + return x.RateLimitTier + } + return "" +} + +func (x *SendCaseRequest) GetUseCase() string { + if x != nil { + return x.UseCase + } + return "" +} + +var File_emailexporter_proto protoreflect.FileDescriptor + +var file_emailexporter_proto_rawDesc = string([]byte{ + 0x0a, 0x13, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x1a, 0x1b, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2d, 0x0a, 0x13, 0x53, 0x65, 0x6e, + 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xb1, 0x02, 0x0a, 0x0f, 0x53, 0x65, 0x6e, + 0x64, 0x43, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, + 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x45, + 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, + 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x49, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0d, + 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x54, 0x69, 0x65, 0x72, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x54, 0x69, + 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x43, 0x61, 0x73, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x75, 0x73, 0x65, 0x43, 0x61, 0x73, 0x65, 0x32, 0x8a, 0x01, 0x0a, + 0x08, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0c, 0x53, 0x65, 0x6e, + 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3a, 0x0a, + 0x08, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x61, 0x73, 0x65, 0x12, 0x16, 0x2e, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x73, 0x61, 0x6c, 0x65, + 0x73, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_emailexporter_proto_rawDescOnce sync.Once + file_emailexporter_proto_rawDescData []byte +) + +func file_emailexporter_proto_rawDescGZIP() []byte { + file_emailexporter_proto_rawDescOnce.Do(func() { + file_emailexporter_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_emailexporter_proto_rawDesc), len(file_emailexporter_proto_rawDesc))) + }) + return file_emailexporter_proto_rawDescData +} + +var file_emailexporter_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_emailexporter_proto_goTypes = []any{ + (*SendContactsRequest)(nil), // 0: email.SendContactsRequest + (*SendCaseRequest)(nil), // 1: email.SendCaseRequest + (*emptypb.Empty)(nil), // 2: google.protobuf.Empty +} +var file_emailexporter_proto_depIdxs = []int32{ + 0, // 0: email.Exporter.SendContacts:input_type -> email.SendContactsRequest + 1, // 1: email.Exporter.SendCase:input_type -> email.SendCaseRequest + 2, // 2: email.Exporter.SendContacts:output_type -> google.protobuf.Empty + 2, // 3: email.Exporter.SendCase:output_type -> google.protobuf.Empty + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_emailexporter_proto_init() } +func file_emailexporter_proto_init() { + if File_emailexporter_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_emailexporter_proto_rawDesc), len(file_emailexporter_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_emailexporter_proto_goTypes, + DependencyIndexes: file_emailexporter_proto_depIdxs, + MessageInfos: file_emailexporter_proto_msgTypes, + }.Build() + File_emailexporter_proto = out.File + file_emailexporter_proto_goTypes = nil + file_emailexporter_proto_depIdxs = nil +} diff --git a/salesforce/email/proto/emailexporter.proto b/salesforce/email/proto/emailexporter.proto new file mode 100644 index 00000000000..8c3733444e8 --- /dev/null +++ b/salesforce/email/proto/emailexporter.proto @@ -0,0 +1,34 @@ +// NOTE: This service is deprecated in favor of salesforce.Exporter. It must be +// kept in sync with salesforce.Exporter until we have fully migrated. +// +// TODO(#8410): Remove this service once we've fully migrated to +// salesforce.Exporter + +syntax = "proto3"; + +package email; +option go_package = "github.com/letsencrypt/boulder/salesforce/proto"; + +import "google/protobuf/empty.proto"; + +service Exporter { + rpc SendContacts (SendContactsRequest) returns (google.protobuf.Empty); + rpc SendCase (SendCaseRequest) returns (google.protobuf.Empty); +} + +message SendContactsRequest { + repeated string emails = 1; +} + +message SendCaseRequest { + string origin = 1; + string subject = 2; + string description = 3; + string contactEmail = 4; + string organization = 5; + string accountId = 6; + string rateLimitName = 7; + string rateLimitTier = 8; + string useCase = 9; +} + diff --git a/salesforce/email/proto/emailexporter_grpc.pb.go b/salesforce/email/proto/emailexporter_grpc.pb.go new file mode 100644 index 00000000000..72e7c1b3a93 --- /dev/null +++ b/salesforce/email/proto/emailexporter_grpc.pb.go @@ -0,0 +1,166 @@ +// NOTE: This service is deprecated in favor of salesforce.Exporter. It must be +// kept in sync with salesforce.Exporter until we have fully migrated. +// +// TODO(#8410): Remove this service once we've fully migrated to +// salesforce.Exporter + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.20.1 +// source: emailexporter.proto + +package proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Exporter_SendContacts_FullMethodName = "/email.Exporter/SendContacts" + Exporter_SendCase_FullMethodName = "/email.Exporter/SendCase" +) + +// ExporterClient is the client API for Exporter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ExporterClient interface { + SendContacts(ctx context.Context, in *SendContactsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + SendCase(ctx context.Context, in *SendCaseRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type exporterClient struct { + cc grpc.ClientConnInterface +} + +func NewExporterClient(cc grpc.ClientConnInterface) ExporterClient { + return &exporterClient{cc} +} + +func (c *exporterClient) SendContacts(ctx context.Context, in *SendContactsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, Exporter_SendContacts_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *exporterClient) SendCase(ctx context.Context, in *SendCaseRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, Exporter_SendCase_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ExporterServer is the server API for Exporter service. +// All implementations must embed UnimplementedExporterServer +// for forward compatibility. +type ExporterServer interface { + SendContacts(context.Context, *SendContactsRequest) (*emptypb.Empty, error) + SendCase(context.Context, *SendCaseRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedExporterServer() +} + +// UnimplementedExporterServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedExporterServer struct{} + +func (UnimplementedExporterServer) SendContacts(context.Context, *SendContactsRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendContacts not implemented") +} +func (UnimplementedExporterServer) SendCase(context.Context, *SendCaseRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendCase not implemented") +} +func (UnimplementedExporterServer) mustEmbedUnimplementedExporterServer() {} +func (UnimplementedExporterServer) testEmbeddedByValue() {} + +// UnsafeExporterServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ExporterServer will +// result in compilation errors. +type UnsafeExporterServer interface { + mustEmbedUnimplementedExporterServer() +} + +func RegisterExporterServer(s grpc.ServiceRegistrar, srv ExporterServer) { + // If the following call pancis, it indicates UnimplementedExporterServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Exporter_ServiceDesc, srv) +} + +func _Exporter_SendContacts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendContactsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExporterServer).SendContacts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Exporter_SendContacts_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExporterServer).SendContacts(ctx, req.(*SendContactsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Exporter_SendCase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendCaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExporterServer).SendCase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Exporter_SendCase_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExporterServer).SendCase(ctx, req.(*SendCaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Exporter_ServiceDesc is the grpc.ServiceDesc for Exporter service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Exporter_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "email.Exporter", + HandlerType: (*ExporterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SendContacts", + Handler: _Exporter_SendContacts_Handler, + }, + { + MethodName: "SendCase", + Handler: _Exporter_SendCase_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "emailexporter.proto", +} diff --git a/salesforce/exporter.go b/salesforce/exporter.go new file mode 100644 index 00000000000..5c933acb0c8 --- /dev/null +++ b/salesforce/exporter.go @@ -0,0 +1,215 @@ +package salesforce + +import ( + "context" + "errors" + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "golang.org/x/time/rate" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/letsencrypt/boulder/core" + berrors "github.com/letsencrypt/boulder/errors" + blog "github.com/letsencrypt/boulder/log" + salesforcepb "github.com/letsencrypt/boulder/salesforce/proto" +) + +// contactsQueueCap limits the queue size to prevent unbounded growth. This +// value is adjustable as needed. Each RFC 5321 email address, encoded in UTF-8, +// is at most 320 bytes. Storing 100,000 emails requires ~34.4 MB of memory. +const contactsQueueCap = 100000 + +var ErrQueueFull = errors.New("email-exporter queue is full") + +// ExporterImpl implements the gRPC server and processes email exports. +type ExporterImpl struct { + salesforcepb.UnsafeExporterServer + + sync.Mutex + drainWG sync.WaitGroup + // wake is used to signal workers when new emails are enqueued in toSend. + // The sync.Cond docs note that "For many simple use cases, users will be + // better off using channels." However, channels enforce FIFO ordering, + // while this implementation uses a LIFO queue. Making channels behave as + // LIFO would require extra complexity. Using a slice and broadcasting is + // simpler and achieves exactly what we need. + wake *sync.Cond + toSend []string + + maxConcurrentRequests int + limiter *rate.Limiter + client SalesforceClient + emailCache *EmailCache + emailsHandledCounter prometheus.Counter + pardotErrorCounter prometheus.Counter + caseErrorCounter prometheus.Counter + log blog.Logger +} + +var _ salesforcepb.ExporterServer = (*ExporterImpl)(nil) + +// NewExporterImpl initializes an ExporterImpl with the given client and +// configuration. Both perDayLimit and maxConcurrentRequests should be +// distributed proportionally among instances based on their share of the daily +// request cap. For example, if the total daily limit is 50,000 and one instance +// is assigned 40% (20,000 requests), it should also receive 40% of the max +// concurrent requests (e.g., 2 out of 5). For more details, see: +// https://developer.salesforce.com/docs/marketing/pardot/guide/overview.html?q=rate%20limits +func NewExporterImpl(client SalesforceClient, cache *EmailCache, perDayLimit float64, maxConcurrentRequests int, stats prometheus.Registerer, logger blog.Logger) *ExporterImpl { + limiter := rate.NewLimiter(rate.Limit(perDayLimit/86400.0), maxConcurrentRequests) + + emailsHandledCounter := promauto.With(stats).NewCounter(prometheus.CounterOpts{ + Name: "email_exporter_emails_handled", + Help: "Total number of emails handled by the email exporter", + }) + + pardotErrorCounter := promauto.With(stats).NewCounter(prometheus.CounterOpts{ + Name: "email_exporter_errors", + Help: "Total number of Pardot API errors encountered by the email exporter", + }) + + caseErrorCounter := promauto.With(stats).NewCounter(prometheus.CounterOpts{ + Name: "email_exporter_case_errors", + Help: "Total number of errors encountered when sending Cases to the Salesforce REST API", + }) + + impl := &ExporterImpl{ + maxConcurrentRequests: maxConcurrentRequests, + limiter: limiter, + toSend: make([]string, 0, contactsQueueCap), + client: client, + emailCache: cache, + emailsHandledCounter: emailsHandledCounter, + pardotErrorCounter: pardotErrorCounter, + caseErrorCounter: caseErrorCounter, + log: logger, + } + impl.wake = sync.NewCond(&impl.Mutex) + + // This metric doesn't need to be part of impl, since it computes itself + // each time it is scraped. + promauto.With(stats).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "email_exporter_queue_length", + Help: "Current length of the email export queue", + }, func() float64 { + impl.Lock() + defer impl.Unlock() + return float64(len(impl.toSend)) + }) + + return impl +} + +// SendContacts enqueues the provided email addresses. If the queue cannot +// accommodate the new emails, an ErrQueueFull is returned. +func (impl *ExporterImpl) SendContacts(ctx context.Context, req *salesforcepb.SendContactsRequest) (*emptypb.Empty, error) { + if core.IsAnyNilOrZero(req, req.Emails) { + return nil, berrors.InternalServerError("Incomplete gRPC request message") + } + + impl.Lock() + defer impl.Unlock() + + spotsLeft := contactsQueueCap - len(impl.toSend) + if spotsLeft < len(req.Emails) { + return nil, ErrQueueFull + } + impl.toSend = append(impl.toSend, req.Emails...) + // Wake waiting workers to process the new emails. + impl.wake.Broadcast() + + return &emptypb.Empty{}, nil +} + +// SendCase immediately submits a new Case to the Salesforce REST API using the +// provided details. Any retries are handled internally by the SalesforceClient. +// The following fields are required: Origin, Subject, ContactEmail. +func (impl *ExporterImpl) SendCase(ctx context.Context, req *salesforcepb.SendCaseRequest) (*emptypb.Empty, error) { + if core.IsAnyNilOrZero(req, req.Origin, req.Subject, req.ContactEmail) { + return nil, berrors.InternalServerError("incomplete gRPC request message") + } + + err := impl.client.SendCase(Case{ + Origin: req.Origin, + Subject: req.Subject, + Description: req.Description, + ContactEmail: req.ContactEmail, + Organization: req.Organization, + AccountId: req.AccountId, + RateLimitName: req.RateLimitName, + RateLimitTier: req.RateLimitTier, + UseCase: req.UseCase, + }) + if err != nil { + impl.caseErrorCounter.Inc() + return nil, berrors.InternalServerError("sending Case to the Salesforce REST API: %s", err) + } + + return &emptypb.Empty{}, nil +} + +// Start begins asynchronous processing of the email queue. When the parent +// daemonCtx is cancelled the queue will be drained and the workers will exit. +func (impl *ExporterImpl) Start(daemonCtx context.Context) { + go func() { + <-daemonCtx.Done() + // Wake waiting workers to exit. + impl.wake.Broadcast() + }() + + worker := func() { + defer impl.drainWG.Done() + for { + impl.Lock() + + for len(impl.toSend) == 0 && daemonCtx.Err() == nil { + // Wait for the queue to be updated or the daemon to exit. + impl.wake.Wait() + } + + if len(impl.toSend) == 0 && daemonCtx.Err() != nil { + // No more emails to process, exit. + impl.Unlock() + return + } + + // Dequeue and dispatch an email. + last := len(impl.toSend) - 1 + email := impl.toSend[last] + impl.toSend = impl.toSend[:last] + impl.Unlock() + + if !impl.emailCache.StoreIfAbsent(email) { + // Another worker has already processed this email. + continue + } + + err := impl.limiter.Wait(daemonCtx) + if err != nil && !errors.Is(err, context.Canceled) { + impl.log.Errf("Unexpected limiter.Wait() error: %s", err) + continue + } + + err = impl.client.SendContact(email) + if err != nil { + impl.emailCache.Remove(email) + impl.pardotErrorCounter.Inc() + impl.log.Errf("Sending Contact to Pardot: %s", err) + } else { + impl.emailsHandledCounter.Inc() + } + } + } + + for range impl.maxConcurrentRequests { + impl.drainWG.Add(1) + go worker() + } +} + +// Drain blocks until all workers have finished processing the email queue. +func (impl *ExporterImpl) Drain() { + impl.drainWG.Wait() +} diff --git a/salesforce/exporter_test.go b/salesforce/exporter_test.go new file mode 100644 index 00000000000..d7d818a8191 --- /dev/null +++ b/salesforce/exporter_test.go @@ -0,0 +1,309 @@ +package salesforce + +import ( + "context" + "fmt" + "slices" + "sync" + "testing" + "time" + + blog "github.com/letsencrypt/boulder/log" + "github.com/letsencrypt/boulder/metrics" + salesforcepb "github.com/letsencrypt/boulder/salesforce/proto" + "github.com/letsencrypt/boulder/test" + + "github.com/prometheus/client_golang/prometheus" +) + +var ctx = context.Background() + +var _ SalesforceClient = (*mockSalesforceClientImpl)(nil) + +// mockSalesforceClientImpl is a mock implementation of PardotClient. +type mockSalesforceClientImpl struct { + SalesforceClient + + sync.Mutex + CreatedContacts []string + CreatedCases []Case +} + +// newMockSalesforceClientImpl returns a mockSalesforceClientImpl, which implements +// the PardotClient interface. It returns the underlying concrete type, so callers +// have access to its struct members and helper methods. +func newMockSalesforceClientImpl() *mockSalesforceClientImpl { + return &mockSalesforceClientImpl{} +} + +// SendContact adds an email to CreatedContacts. +func (m *mockSalesforceClientImpl) SendContact(email string) error { + m.Lock() + defer m.Unlock() + m.CreatedContacts = append(m.CreatedContacts, email) + return nil +} + +func (m *mockSalesforceClientImpl) getCreatedContacts() []string { + m.Lock() + defer m.Unlock() + + // Return a copy to avoid race conditions. + return slices.Clone(m.CreatedContacts) +} + +func (m *mockSalesforceClientImpl) SendCase(payload Case) error { + m.Lock() + defer m.Unlock() + m.CreatedCases = append(m.CreatedCases, payload) + return nil +} + +func (m *mockSalesforceClientImpl) getCreatedCases() []Case { + m.Lock() + defer m.Unlock() + + // Return a copy to avoid race conditions. + return slices.Clone(m.CreatedCases) +} + +// setup creates a new ExporterImpl, a mockSalesForceClientImpl, and the start and +// cleanup functions for the ExporterImpl. Call start() to begin processing the +// ExporterImpl queue and cleanup() to drain and shutdown. If start() is called, +// cleanup() must be called. +func setup() (*ExporterImpl, *mockSalesforceClientImpl, func(), func()) { + clientImpl := newMockSalesforceClientImpl() + exporter := NewExporterImpl(clientImpl, nil, 1000000, 5, metrics.NoopRegisterer, blog.NewMock()) + daemonCtx, cancel := context.WithCancel(context.Background()) + return exporter, clientImpl, + func() { exporter.Start(daemonCtx) }, + func() { + cancel() + exporter.Drain() + } +} + +func TestSendContacts(t *testing.T) { + t.Parallel() + + exporter, clientImpl, start, cleanup := setup() + start() + defer cleanup() + + wantContacts := []string{"test@example.com", "user@example.com"} + _, err := exporter.SendContacts(ctx, &salesforcepb.SendContactsRequest{ + Emails: wantContacts, + }) + test.AssertNotError(t, err, "Error creating contacts") + + var gotContacts []string + for range 100 { + gotContacts = clientImpl.getCreatedContacts() + if len(gotContacts) == 2 { + break + } + time.Sleep(5 * time.Millisecond) + } + test.AssertSliceContains(t, gotContacts, wantContacts[0]) + test.AssertSliceContains(t, gotContacts, wantContacts[1]) + + // Check that the error counter was not incremented. + test.AssertMetricWithLabelsEquals(t, exporter.pardotErrorCounter, prometheus.Labels{}, 0) +} + +func TestSendContactsQueueFull(t *testing.T) { + t.Parallel() + + exporter, _, start, cleanup := setup() + start() + defer cleanup() + + var err error + for range contactsQueueCap * 2 { + _, err = exporter.SendContacts(ctx, &salesforcepb.SendContactsRequest{ + Emails: []string{"test@example.com"}, + }) + if err != nil { + break + } + } + test.AssertErrorIs(t, err, ErrQueueFull) +} + +func TestSendContactsQueueDrains(t *testing.T) { + t.Parallel() + + exporter, clientImpl, start, cleanup := setup() + start() + + var emails []string + for i := range 100 { + emails = append(emails, fmt.Sprintf("test@%d.example.com", i)) + } + + _, err := exporter.SendContacts(ctx, &salesforcepb.SendContactsRequest{ + Emails: emails, + }) + test.AssertNotError(t, err, "Error creating contacts") + + // Drain the queue. + cleanup() + + test.AssertEquals(t, 100, len(clientImpl.getCreatedContacts())) +} + +type mockAlwaysFailClient struct { + mockSalesforceClientImpl +} + +func (m *mockAlwaysFailClient) SendContact(email string) error { + return fmt.Errorf("simulated failure") +} + +func TestSendContactsErrorMetrics(t *testing.T) { + t.Parallel() + + mockClient := &mockAlwaysFailClient{} + exporter := NewExporterImpl(mockClient, nil, 1000000, 5, metrics.NoopRegisterer, blog.NewMock()) + + daemonCtx, cancel := context.WithCancel(context.Background()) + exporter.Start(daemonCtx) + + _, err := exporter.SendContacts(ctx, &salesforcepb.SendContactsRequest{ + Emails: []string{"test@example.com"}, + }) + test.AssertNotError(t, err, "Error creating contacts") + + // Drain the queue. + cancel() + exporter.Drain() + + // Check that the error counter was incremented. + test.AssertMetricWithLabelsEquals(t, exporter.pardotErrorCounter, prometheus.Labels{}, 1) +} + +func TestSendContactDeduplication(t *testing.T) { + t.Parallel() + + cache := NewHashedEmailCache(1000, metrics.NoopRegisterer) + clientImpl := newMockSalesforceClientImpl() + exporter := NewExporterImpl(clientImpl, cache, 1000000, 5, metrics.NoopRegisterer, blog.NewMock()) + + daemonCtx, cancel := context.WithCancel(context.Background()) + exporter.Start(daemonCtx) + + _, err := exporter.SendContacts(ctx, &salesforcepb.SendContactsRequest{ + Emails: []string{"duplicate@example.com", "duplicate@example.com"}, + }) + test.AssertNotError(t, err, "Error enqueuing contacts") + + // Drain the queue. + cancel() + exporter.Drain() + + contacts := clientImpl.getCreatedContacts() + test.AssertEquals(t, 1, len(contacts)) + test.AssertEquals(t, "duplicate@example.com", contacts[0]) + + // Only one successful send should be recorded. + test.AssertMetricWithLabelsEquals(t, exporter.emailsHandledCounter, prometheus.Labels{}, 1) + + if !cache.Seen("duplicate@example.com") { + t.Errorf("duplicate@example.com should have been cached after send") + } +} + +func TestSendContactErrorRemovesFromCache(t *testing.T) { + t.Parallel() + + cache := NewHashedEmailCache(1000, metrics.NoopRegisterer) + fc := &mockAlwaysFailClient{} + + exporter := NewExporterImpl(fc, cache, 1000000, 1, metrics.NoopRegisterer, blog.NewMock()) + + daemonCtx, cancel := context.WithCancel(context.Background()) + exporter.Start(daemonCtx) + + _, err := exporter.SendContacts(ctx, &salesforcepb.SendContactsRequest{ + Emails: []string{"error@example.com"}, + }) + test.AssertNotError(t, err, "enqueue failed") + + // Drain the queue. + cancel() + exporter.Drain() + + // The email should have been evicted from the cache after send encountered + // an error. + if cache.Seen("error@example.com") { + t.Errorf("error@example.com should have been evicted from cache after send errors") + } + + // Check that the error counter was incremented. + test.AssertMetricWithLabelsEquals(t, exporter.pardotErrorCounter, prometheus.Labels{}, 1) +} + +func TestSendCase(t *testing.T) { + t.Parallel() + + clientImpl := newMockSalesforceClientImpl() + exporter := NewExporterImpl(clientImpl, nil, 1000000, 5, metrics.NoopRegisterer, blog.NewMock()) + + _, err := exporter.SendCase(ctx, &salesforcepb.SendCaseRequest{ + Origin: "Web", + Subject: "Some Override", + Description: "Please review", + ContactEmail: "foo@example.com", + }) + test.AssertNotError(t, err, "SendCase should succeed") + + got := clientImpl.getCreatedCases() + if len(got) != 1 { + t.Fatalf("expected 1 case, got %d", len(got)) + } + test.AssertEquals(t, got[0].Origin, "Web") + test.AssertEquals(t, got[0].Subject, "Some Override") + test.AssertEquals(t, got[0].Description, "Please review") + test.AssertEquals(t, got[0].ContactEmail, "foo@example.com") + test.AssertMetricWithLabelsEquals(t, exporter.caseErrorCounter, prometheus.Labels{}, 0) +} + +type mockAlwaysFailCaseClient struct { + mockSalesforceClientImpl +} + +func (m *mockAlwaysFailCaseClient) SendCase(payload Case) error { + return fmt.Errorf("oops, lol") +} + +func TestSendCaseClientErrorIncrementsMetric(t *testing.T) { + t.Parallel() + + mockClient := &mockAlwaysFailCaseClient{} + exporter := NewExporterImpl(mockClient, nil, 1000000, 5, metrics.NoopRegisterer, blog.NewMock()) + + _, err := exporter.SendCase(ctx, &salesforcepb.SendCaseRequest{ + Origin: "Web", + Subject: "Some Override", + Description: "Please review", + ContactEmail: "foo@bar.baz", + }) + test.AssertError(t, err, "SendCase should return error on client failure") + test.AssertMetricWithLabelsEquals(t, exporter.caseErrorCounter, prometheus.Labels{}, 1) +} + +func TestSendCaseMissingOriginValidation(t *testing.T) { + t.Parallel() + + clientImpl := newMockSalesforceClientImpl() + exporter := NewExporterImpl(clientImpl, nil, 1000000, 5, metrics.NoopRegisterer, blog.NewMock()) + + _, err := exporter.SendCase(ctx, &salesforcepb.SendCaseRequest{Subject: "No origin in this one, d00d"}) + test.AssertError(t, err, "SendCase should fail validation when Origin is missing") + + got := clientImpl.getCreatedCases() + if len(got) != 0 { + t.Errorf("expected 0 cases due to validation error, got %d", len(got)) + } + test.AssertMetricWithLabelsEquals(t, exporter.caseErrorCounter, prometheus.Labels{}, 0) +} diff --git a/salesforce/pardot.go b/salesforce/pardot.go new file mode 100644 index 00000000000..06eaf4f215e --- /dev/null +++ b/salesforce/pardot.go @@ -0,0 +1,334 @@ +package salesforce + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "sync" + "time" + + "github.com/jmhodges/clock" + "github.com/letsencrypt/boulder/core" +) + +const ( + // tokenPath is the path to the Salesforce OAuth2 token endpoint. + tokenPath = "/services/oauth2/token" + + // contactsPath is the path to the Pardot v5 Prospect upsert-by-email + // endpoint. This endpoint will create a new Prospect if one does not + // already exist with the same email address. + // + // https://developer.salesforce.com/docs/marketing/pardot/guide/prospect-v5.html#prospect-upsert-by-email + contactsPath = "/api/v5/objects/prospects/do/upsertLatestByEmail" + + // casesPath is the path to create a new Case object in Salesforce. This + // path includes the API version (v64.0). Normally, Salesforce maintains + // backward compatibility across versions. Update only if Salesforce retires + // this API version (rare) or we want to make use of new Case fields + // (unlikely). + // + // To check the current version for our org, see "Identify your current API + // version": https://help.salesforce.com/s/articleView?id=000386929&type=1 + casesPath = "/services/data/v64.0/sobjects/Case" + + // maxAttempts is the maximum number of attempts to retry a request. + maxAttempts = 3 + + // retryBackoffBase is the base for exponential backoff. + retryBackoffBase = 2.0 + + // retryBackoffMax is the maximum backoff time. + retryBackoffMax = 10 * time.Second + + // retryBackoffMin is the minimum backoff time. + retryBackoffMin = 200 * time.Millisecond + + // tokenExpirationBuffer is the time before the token expires that we will + // attempt to refresh it. + tokenExpirationBuffer = 5 * time.Minute +) + +// SalesforceClient is an interface for interacting with a limited set of +// Salesforce APIs. It exists to facilitate testing mocks. +type SalesforceClient interface { + SendContact(email string) error + SendCase(payload Case) error +} + +// oAuthToken holds the OAuth2 access token and its expiration. +type oAuthToken struct { + sync.Mutex + + accessToken string + expiresAt time.Time +} + +// SalesforceClientImpl handles authentication and sending contacts to Pardot +// and creating Cases in Salesforce. +type SalesforceClientImpl struct { + businessUnit string + clientId string + clientSecret string + pardotURL string + casesURL string + tokenURL string + token *oAuthToken + clk clock.Clock +} + +var _ SalesforceClient = (*SalesforceClientImpl)(nil) + +// NewSalesforceClientImpl creates a new SalesforceClientImpl. +func NewSalesforceClientImpl(clk clock.Clock, businessUnit, clientId, clientSecret, salesforceBaseURL, pardotBaseURL string) (*SalesforceClientImpl, error) { + pardotURL, err := url.JoinPath(pardotBaseURL, contactsPath) + if err != nil { + return nil, fmt.Errorf("failed to join contacts path: %w", err) + } + tokenURL, err := url.JoinPath(salesforceBaseURL, tokenPath) + if err != nil { + return nil, fmt.Errorf("failed to join token path: %w", err) + } + casesURL, err := url.JoinPath(salesforceBaseURL, casesPath) + if err != nil { + return nil, fmt.Errorf("failed to join cases path: %w", err) + } + + return &SalesforceClientImpl{ + businessUnit: businessUnit, + clientId: clientId, + clientSecret: clientSecret, + pardotURL: pardotURL, + casesURL: casesURL, + tokenURL: tokenURL, + token: &oAuthToken{}, + clk: clk, + }, nil +} + +type oauthTokenResp struct { + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` +} + +// updateToken updates the OAuth token if necessary. +func (pc *SalesforceClientImpl) updateToken() error { + pc.token.Lock() + defer pc.token.Unlock() + + now := pc.clk.Now() + if now.Before(pc.token.expiresAt.Add(-tokenExpirationBuffer)) && pc.token.accessToken != "" { + return nil + } + + resp, err := http.PostForm(pc.tokenURL, url.Values{ + "grant_type": {"client_credentials"}, + "client_id": {pc.clientId}, + "client_secret": {pc.clientSecret}, + }) + if err != nil { + return fmt.Errorf("failed to retrieve token: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, readErr := io.ReadAll(resp.Body) + if readErr != nil { + return fmt.Errorf("token request failed with status %d; while reading body: %w", resp.StatusCode, readErr) + } + return fmt.Errorf("token request failed with status %d: %s", resp.StatusCode, body) + } + + var respJSON oauthTokenResp + err = json.NewDecoder(resp.Body).Decode(&respJSON) + if err != nil { + return fmt.Errorf("failed to decode token response: %w", err) + } + pc.token.accessToken = respJSON.AccessToken + pc.token.expiresAt = pc.clk.Now().Add(time.Duration(respJSON.ExpiresIn) * time.Second) + + return nil +} + +// redactEmail replaces all occurrences of an email address in a response body +// with "[REDACTED]". +func redactEmail(body []byte, email string) string { + return string(bytes.ReplaceAll(body, []byte(email), []byte("[REDACTED]"))) +} + +type prospect struct { + // Email is the email address of the prospect. + Email string `json:"email"` +} + +type upsertPayload struct { + // MatchEmail is the email address to match against existing prospects to + // avoid adding duplicates. + MatchEmail string `json:"matchEmail"` + // Prospect is the prospect data to be upserted. + Prospect prospect `json:"prospect"` +} + +// SendContact submits an email to the Pardot Contacts endpoint, retrying up +// to 3 times with exponential backoff. +func (pc *SalesforceClientImpl) SendContact(email string) error { + var err error + for attempt := range maxAttempts { + time.Sleep(core.RetryBackoff(attempt, retryBackoffMin, retryBackoffMax, retryBackoffBase)) + err = pc.updateToken() + if err != nil { + continue + } + break + } + if err != nil { + return fmt.Errorf("failed to update token: %w", err) + } + + payload, err := json.Marshal(upsertPayload{ + MatchEmail: email, + Prospect: prospect{Email: email}, + }) + if err != nil { + return fmt.Errorf("failed to marshal payload: %w", err) + } + + var finalErr error + for attempt := range maxAttempts { + time.Sleep(core.RetryBackoff(attempt, retryBackoffMin, retryBackoffMax, retryBackoffBase)) + + req, err := http.NewRequest("POST", pc.pardotURL, bytes.NewReader(payload)) + if err != nil { + finalErr = fmt.Errorf("failed to create new contact request: %w", err) + continue + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+pc.token.accessToken) + req.Header.Set("Pardot-Business-Unit-Id", pc.businessUnit) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + finalErr = fmt.Errorf("create contact request failed: %w", err) + continue + } + + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + resp.Body.Close() + return nil + } + + body, err := io.ReadAll(resp.Body) + resp.Body.Close() + + if err != nil { + finalErr = fmt.Errorf("create contact request returned status %d; while reading body: %w", resp.StatusCode, err) + continue + } + finalErr = fmt.Errorf("create contact request returned status %d: %s", resp.StatusCode, redactEmail(body, email)) + continue + } + + return finalErr +} + +// Case represents the payload for populating a new Case object in Salesforce. +// For more information, see: +// https://developer.salesforce.com/docs/atlas.en-us.object_reference.meta/object_reference/sforce_api_objects_case.htm +// https://help.salesforce.com/s/articleView?id=platform.custom_field_types.htm&type=5 +type Case struct { + // Origin is required in all requests, a safe default is "Web". + Origin string `json:"Origin"` + + // Subject is an optional standard field. Max length: 255 characters. + Subject string `json:"Subject,omitempty"` + + // Description is an optional standard field. Max length: 32,768 characters. + Description string `json:"Description,omitempty"` + + // ContactEmail is an optional standard field indicating the email address + // of the requester. Max length: 80 characters. + ContactEmail string `json:"ContactEmail,omitempty"` + + // Note: Fields below this point are optional custom fields. + + // Organization indicates the name of the requesting organization. Max + // length: 255 characters. + Organization string `json:"Organization__c,omitempty"` + + // AccountId indicates the requester's ACME Account ID. Max length: 255 + // characters. + AccountId string `json:"Account_ID__c,omitempty"` + + // RateLimitName indicates which rate limit the override request is for. Max + // length: 255 characters. + RateLimitName string `json:"Rate_Limit_Name__c,omitempty"` + + // Tier indicates the requested tier of the rate limit override. Max length: + // 255 characters. + RateLimitTier string `json:"Rate_Limit_Tier__c,omitempty"` + + // UseCase indicates the intended to use case supplied by the requester. Max + // length: 131,072 characters. + UseCase string `json:"Use_Case__c,omitempty"` +} + +// SendCase submits a new Case object to Salesforce. For more information, see: +// https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/dome_sobject_create.htm +func (pc *SalesforceClientImpl) SendCase(payload Case) error { + var err error + for attempt := range maxAttempts { + time.Sleep(core.RetryBackoff(attempt, retryBackoffMin, retryBackoffMax, retryBackoffBase)) + err = pc.updateToken() + if err == nil { + break + } + } + if err != nil { + return fmt.Errorf("failed to update token: %w", err) + } + + body, err := json.Marshal(payload) + if err != nil { + return fmt.Errorf("failed to marshal case payload: %w", err) + } + + var finalErr error + for attempt := range maxAttempts { + time.Sleep(core.RetryBackoff(attempt, retryBackoffMin, retryBackoffMax, retryBackoffBase)) + + req, err := http.NewRequest("POST", pc.casesURL, bytes.NewReader(body)) + if err != nil { + finalErr = fmt.Errorf("failed to create new case request: %w", err) + continue + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+pc.token.accessToken) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + finalErr = fmt.Errorf("create case request failed: %w", err) + continue + } + + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + resp.Body.Close() + return nil + } + + respBody, err := io.ReadAll(resp.Body) + resp.Body.Close() + + if err != nil { + finalErr = fmt.Errorf("create case request returned status %d; while reading body: %w", resp.StatusCode, err) + continue + } + + finalErr = fmt.Errorf("create case request returned status %d: %s", resp.StatusCode, redactEmail(respBody, payload.ContactEmail)) + continue + } + + return finalErr +} diff --git a/salesforce/pardot_test.go b/salesforce/pardot_test.go new file mode 100644 index 00000000000..11a4648a461 --- /dev/null +++ b/salesforce/pardot_test.go @@ -0,0 +1,328 @@ +package salesforce + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/jmhodges/clock" + "github.com/letsencrypt/boulder/test" +) + +func defaultTokenHandler(w http.ResponseWriter, r *http.Request) { + err := json.NewEncoder(w).Encode(oauthTokenResp{ + AccessToken: "dummy", + ExpiresIn: 3600, + }) + if err != nil { + // This should never happen. + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte("failed to encode token")) + return + } +} + +func TestSendContactSuccess(t *testing.T) { + t.Parallel() + + contactHandler := func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Authorization") != "Bearer dummy" { + w.WriteHeader(http.StatusUnauthorized) + return + } + w.WriteHeader(http.StatusOK) + } + + tokenSrv := httptest.NewServer(http.HandlerFunc(defaultTokenHandler)) + defer tokenSrv.Close() + + contactSrv := httptest.NewServer(http.HandlerFunc(contactHandler)) + defer contactSrv.Close() + + clk := clock.NewFake() + client, err := NewSalesforceClientImpl(clk, "biz-unit", "cid", "csec", tokenSrv.URL, contactSrv.URL) + test.AssertNotError(t, err, "failed to create client") + + err = client.SendContact("test@example.com") + test.AssertNotError(t, err, "SendContact should succeed") +} + +func TestSendContactUpdateTokenFails(t *testing.T) { + t.Parallel() + + tokenHandlerThatAlwaysErrors := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintln(w, "token error") + } + + contactHandler := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + } + + tokenSrv := httptest.NewServer(http.HandlerFunc(tokenHandlerThatAlwaysErrors)) + defer tokenSrv.Close() + + contactSrv := httptest.NewServer(http.HandlerFunc(contactHandler)) + defer contactSrv.Close() + + clk := clock.NewFake() + client, err := NewSalesforceClientImpl(clk, "biz-unit", "cid", "csec", tokenSrv.URL, contactSrv.URL) + test.AssertNotError(t, err, "Failed to create client") + + err = client.SendContact("test@example.com") + test.AssertError(t, err, "Expected token update to fail") + test.AssertContains(t, err.Error(), "failed to update token") +} + +func TestSendContact4xx(t *testing.T) { + t.Parallel() + + contactHandler := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, err := io.WriteString(w, "bad request") + test.AssertNotError(t, err, "failed to write response") + } + + tokenSrv := httptest.NewServer(http.HandlerFunc(defaultTokenHandler)) + defer tokenSrv.Close() + + contactSrv := httptest.NewServer(http.HandlerFunc(contactHandler)) + defer contactSrv.Close() + + clk := clock.NewFake() + client, err := NewSalesforceClientImpl(clk, "biz-unit", "cid", "csec", tokenSrv.URL, contactSrv.URL) + test.AssertNotError(t, err, "Failed to create client") + + err = client.SendContact("test@example.com") + test.AssertError(t, err, "Should fail on 400") + test.AssertContains(t, err.Error(), "create contact request returned status 400") +} + +func TestSendContactTokenExpiry(t *testing.T) { + t.Parallel() + + // tokenHandler returns "old_token" on the first call and "new_token" on subsequent calls. + tokenRetrieved := false + tokenHandler := func(w http.ResponseWriter, r *http.Request) { + token := "new_token" + if !tokenRetrieved { + token = "old_token" + tokenRetrieved = true + } + err := json.NewEncoder(w).Encode(oauthTokenResp{ + AccessToken: token, + ExpiresIn: 3600, + }) + test.AssertNotError(t, err, "failed to encode token") + } + + // contactHandler expects "old_token" for the first request and "new_token" for the next. + firstRequest := true + contactHandler := func(w http.ResponseWriter, r *http.Request) { + expectedToken := "new_token" + if firstRequest { + expectedToken = "old_token" + firstRequest = false + } + if r.Header.Get("Authorization") != "Bearer "+expectedToken { + w.WriteHeader(http.StatusUnauthorized) + return + } + w.WriteHeader(http.StatusOK) + } + + tokenSrv := httptest.NewServer(http.HandlerFunc(tokenHandler)) + defer tokenSrv.Close() + + contactSrv := httptest.NewServer(http.HandlerFunc(contactHandler)) + defer contactSrv.Close() + + clk := clock.NewFake() + client, err := NewSalesforceClientImpl(clk, "biz-unit", "cid", "csec", tokenSrv.URL, contactSrv.URL) + test.AssertNotError(t, err, "Failed to create client") + + // First call uses the initial token ("old_token"). + err = client.SendContact("test@example.com") + test.AssertNotError(t, err, "SendContact should succeed with the initial token") + + // Advance time to force token expiry. + clk.Add(3601 * time.Second) + + // Second call should refresh the token to "new_token". + err = client.SendContact("test@example.com") + test.AssertNotError(t, err, "SendContact should succeed after refreshing the token") +} + +func TestSendContactServerErrorsAfterMaxAttempts(t *testing.T) { + t.Parallel() + + gotAttempts := 0 + contactHandler := func(w http.ResponseWriter, r *http.Request) { + gotAttempts++ + w.WriteHeader(http.StatusServiceUnavailable) + } + + tokenSrv := httptest.NewServer(http.HandlerFunc(defaultTokenHandler)) + defer tokenSrv.Close() + + contactSrv := httptest.NewServer(http.HandlerFunc(contactHandler)) + defer contactSrv.Close() + + client, err := NewSalesforceClientImpl(clock.NewFake(), "biz-unit", "cid", "csec", tokenSrv.URL, contactSrv.URL) + test.AssertNotError(t, err, "Failed to create Salesforce API client") + + err = client.SendContact("test@example.com") + test.AssertError(t, err, "Should fail after retrying all attempts") + test.AssertEquals(t, maxAttempts, gotAttempts) + test.AssertContains(t, err.Error(), "create contact request returned status 503") +} + +func TestSendContactRedactsEmail(t *testing.T) { + t.Parallel() + + emailToTest := "test@example.com" + + contactHandler := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + // Intentionally include the request email in the response body. + resp := fmt.Sprintf("error: %s is invalid", emailToTest) + _, err := io.WriteString(w, resp) + test.AssertNotError(t, err, "failed to write response") + } + + tokenSrv := httptest.NewServer(http.HandlerFunc(defaultTokenHandler)) + defer tokenSrv.Close() + + contactSrv := httptest.NewServer(http.HandlerFunc(contactHandler)) + defer contactSrv.Close() + + clk := clock.NewFake() + client, err := NewSalesforceClientImpl(clk, "biz-unit", "cid", "csec", tokenSrv.URL, contactSrv.URL) + test.AssertNotError(t, err, "failed to create client") + + err = client.SendContact(emailToTest) + test.AssertError(t, err, "SendContact should fail") + test.AssertNotContains(t, err.Error(), emailToTest) + test.AssertContains(t, err.Error(), "[REDACTED]") +} + +func TestSendCaseSuccess(t *testing.T) { + t.Parallel() + + handler := func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/services/oauth2/token": + defaultTokenHandler(w, r) + case "/services/data/v64.0/sobjects/Case": + if r.Header.Get("Authorization") != "Bearer dummy" { + w.WriteHeader(http.StatusUnauthorized) + return + } + w.WriteHeader(http.StatusCreated) + w.Write([]byte(`{"id":"500xx000001ABCdAAH","success":true,"errors":[]}`)) + default: + t.Errorf("unexpected path: %s", r.URL.Path) + w.WriteHeader(http.StatusNotFound) + } + } + + salesforceSrv := httptest.NewServer(http.HandlerFunc(handler)) + defer salesforceSrv.Close() + + client, err := NewSalesforceClientImpl(clock.NewFake(), "biz-unit", "cid", "csec", salesforceSrv.URL, "") + test.AssertNotError(t, err, "failed to create client") + + err = client.SendCase(Case{ + Subject: "Unit Test Case", + Origin: "Web", + }) + test.AssertNotError(t, err, "SendCase should succeed") +} + +func TestSendCaseUpdateTokenFails(t *testing.T) { + t.Parallel() + + handler := func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/services/oauth2/token": + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintln(w, "token error") + case "/services/data/v64.0/sobjects/Case": + w.WriteHeader(http.StatusOK) // should never reach here + default: + t.Errorf("unexpected path: %s", r.URL.Path) + w.WriteHeader(http.StatusNotFound) + } + } + + salesforceSrv := httptest.NewServer(http.HandlerFunc(handler)) + defer salesforceSrv.Close() + + client, err := NewSalesforceClientImpl(clock.NewFake(), "biz-unit", "cid", "csec", salesforceSrv.URL, "") + test.AssertNotError(t, err, "Failed to create client") + + err = client.SendCase(Case{Subject: "fail", Origin: "Web"}) + test.AssertError(t, err, "Expected token update to fail") + test.AssertContains(t, err.Error(), "failed to update token") +} + +func TestSendCase4xx(t *testing.T) { + t.Parallel() + + handler := func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/services/oauth2/token": + defaultTokenHandler(w, r) + case "/services/data/v64.0/sobjects/Case": + w.WriteHeader(http.StatusBadRequest) + _, err := io.WriteString(w, "bad request") + test.AssertNotError(t, err, "failed to write response") + default: + t.Errorf("unexpected path: %s", r.URL.Path) + w.WriteHeader(http.StatusNotFound) + } + } + + salesforceSrv := httptest.NewServer(http.HandlerFunc(handler)) + defer salesforceSrv.Close() + + client, err := NewSalesforceClientImpl(clock.NewFake(), "biz-unit", "cid", "csec", salesforceSrv.URL, "") + test.AssertNotError(t, err, "Failed to create client") + + err = client.SendCase(Case{Subject: "bad", Origin: "Web"}) + test.AssertError(t, err, "Should fail on 400") + test.AssertContains(t, err.Error(), "create case request returned status 400") +} + +func TestSendCaseServerErrorsAfterMaxAttempts(t *testing.T) { + t.Parallel() + + gotAttempts := 0 + handler := func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/services/oauth2/token": + defaultTokenHandler(w, r) + case "/services/data/v64.0/sobjects/Case": + gotAttempts++ + w.WriteHeader(http.StatusServiceUnavailable) + default: + t.Errorf("unexpected path: %s", r.URL.Path) + w.WriteHeader(http.StatusNotFound) + } + } + + salesforceSrv := httptest.NewServer(http.HandlerFunc(handler)) + defer salesforceSrv.Close() + + client, err := NewSalesforceClientImpl(clock.NewFake(), "biz-unit", "cid", "csec", salesforceSrv.URL, "") + test.AssertNotError(t, err, "Failed to create client") + + err = client.SendCase(Case{Subject: "retry", Origin: "Web"}) + test.AssertError(t, err, "Should fail after retrying all attempts") + test.AssertEquals(t, maxAttempts, gotAttempts) + test.AssertContains(t, err.Error(), "create case request returned status 503") +} diff --git a/salesforce/proto/exporter.pb.go b/salesforce/proto/exporter.pb.go new file mode 100644 index 00000000000..777fd754ff0 --- /dev/null +++ b/salesforce/proto/exporter.pb.go @@ -0,0 +1,279 @@ +// NOTE: Any changes to this service MUST also be made to email.Exporter and +// kept in sync with the adapter in cmd/email-exporter/main.go. +// +// TODO(#8410): Remove this comment once we've fully migrated to +// salesforce.Exporter. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc v3.20.1 +// source: exporter.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SendContactsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Emails []string `protobuf:"bytes,1,rep,name=emails,proto3" json:"emails,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SendContactsRequest) Reset() { + *x = SendContactsRequest{} + mi := &file_exporter_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SendContactsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendContactsRequest) ProtoMessage() {} + +func (x *SendContactsRequest) ProtoReflect() protoreflect.Message { + mi := &file_exporter_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendContactsRequest.ProtoReflect.Descriptor instead. +func (*SendContactsRequest) Descriptor() ([]byte, []int) { + return file_exporter_proto_rawDescGZIP(), []int{0} +} + +func (x *SendContactsRequest) GetEmails() []string { + if x != nil { + return x.Emails + } + return nil +} + +type SendCaseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Origin string `protobuf:"bytes,1,opt,name=origin,proto3" json:"origin,omitempty"` + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + ContactEmail string `protobuf:"bytes,4,opt,name=contactEmail,proto3" json:"contactEmail,omitempty"` + Organization string `protobuf:"bytes,5,opt,name=organization,proto3" json:"organization,omitempty"` + AccountId string `protobuf:"bytes,6,opt,name=accountId,proto3" json:"accountId,omitempty"` + RateLimitName string `protobuf:"bytes,7,opt,name=rateLimitName,proto3" json:"rateLimitName,omitempty"` + RateLimitTier string `protobuf:"bytes,8,opt,name=rateLimitTier,proto3" json:"rateLimitTier,omitempty"` + UseCase string `protobuf:"bytes,9,opt,name=useCase,proto3" json:"useCase,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SendCaseRequest) Reset() { + *x = SendCaseRequest{} + mi := &file_exporter_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SendCaseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendCaseRequest) ProtoMessage() {} + +func (x *SendCaseRequest) ProtoReflect() protoreflect.Message { + mi := &file_exporter_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendCaseRequest.ProtoReflect.Descriptor instead. +func (*SendCaseRequest) Descriptor() ([]byte, []int) { + return file_exporter_proto_rawDescGZIP(), []int{1} +} + +func (x *SendCaseRequest) GetOrigin() string { + if x != nil { + return x.Origin + } + return "" +} + +func (x *SendCaseRequest) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *SendCaseRequest) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *SendCaseRequest) GetContactEmail() string { + if x != nil { + return x.ContactEmail + } + return "" +} + +func (x *SendCaseRequest) GetOrganization() string { + if x != nil { + return x.Organization + } + return "" +} + +func (x *SendCaseRequest) GetAccountId() string { + if x != nil { + return x.AccountId + } + return "" +} + +func (x *SendCaseRequest) GetRateLimitName() string { + if x != nil { + return x.RateLimitName + } + return "" +} + +func (x *SendCaseRequest) GetRateLimitTier() string { + if x != nil { + return x.RateLimitTier + } + return "" +} + +func (x *SendCaseRequest) GetUseCase() string { + if x != nil { + return x.UseCase + } + return "" +} + +var File_exporter_proto protoreflect.FileDescriptor + +var file_exporter_proto_rawDesc = string([]byte{ + 0x0a, 0x0e, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0a, 0x73, 0x61, 0x6c, 0x65, 0x73, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x1a, 0x1b, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2d, 0x0a, 0x13, 0x53, 0x65, 0x6e, + 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xb1, 0x02, 0x0a, 0x0f, 0x53, 0x65, 0x6e, + 0x64, 0x43, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, + 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x45, + 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, + 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x49, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0d, + 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x54, 0x69, 0x65, 0x72, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x54, 0x69, + 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x43, 0x61, 0x73, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x75, 0x73, 0x65, 0x43, 0x61, 0x73, 0x65, 0x32, 0x94, 0x01, 0x0a, + 0x08, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x6e, + 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x73, 0x61, 0x6c, 0x65, + 0x73, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x08, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x61, 0x73, 0x65, 0x12, 0x1b, + 0x2e, 0x73, 0x61, 0x6c, 0x65, 0x73, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x6e, 0x64, + 0x43, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, + 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x73, 0x61, 0x6c, 0x65, 0x73, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_exporter_proto_rawDescOnce sync.Once + file_exporter_proto_rawDescData []byte +) + +func file_exporter_proto_rawDescGZIP() []byte { + file_exporter_proto_rawDescOnce.Do(func() { + file_exporter_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_exporter_proto_rawDesc), len(file_exporter_proto_rawDesc))) + }) + return file_exporter_proto_rawDescData +} + +var file_exporter_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_exporter_proto_goTypes = []any{ + (*SendContactsRequest)(nil), // 0: salesforce.SendContactsRequest + (*SendCaseRequest)(nil), // 1: salesforce.SendCaseRequest + (*emptypb.Empty)(nil), // 2: google.protobuf.Empty +} +var file_exporter_proto_depIdxs = []int32{ + 0, // 0: salesforce.Exporter.SendContacts:input_type -> salesforce.SendContactsRequest + 1, // 1: salesforce.Exporter.SendCase:input_type -> salesforce.SendCaseRequest + 2, // 2: salesforce.Exporter.SendContacts:output_type -> google.protobuf.Empty + 2, // 3: salesforce.Exporter.SendCase:output_type -> google.protobuf.Empty + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_exporter_proto_init() } +func file_exporter_proto_init() { + if File_exporter_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_exporter_proto_rawDesc), len(file_exporter_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_exporter_proto_goTypes, + DependencyIndexes: file_exporter_proto_depIdxs, + MessageInfos: file_exporter_proto_msgTypes, + }.Build() + File_exporter_proto = out.File + file_exporter_proto_goTypes = nil + file_exporter_proto_depIdxs = nil +} diff --git a/salesforce/proto/exporter.proto b/salesforce/proto/exporter.proto new file mode 100644 index 00000000000..55e04dfdda2 --- /dev/null +++ b/salesforce/proto/exporter.proto @@ -0,0 +1,34 @@ +// NOTE: Any changes to this service MUST also be made to email.Exporter and +// kept in sync with the adapter in cmd/email-exporter/main.go. +// +// TODO(#8410): Remove this comment once we've fully migrated to +// salesforce.Exporter. + +syntax = "proto3"; + +package salesforce; +option go_package = "github.com/letsencrypt/boulder/salesforce/proto"; + +import "google/protobuf/empty.proto"; + +service Exporter { + rpc SendContacts (SendContactsRequest) returns (google.protobuf.Empty); + rpc SendCase (SendCaseRequest) returns (google.protobuf.Empty); +} + +message SendContactsRequest { + repeated string emails = 1; +} + +message SendCaseRequest { + string origin = 1; + string subject = 2; + string description = 3; + string contactEmail = 4; + string organization = 5; + string accountId = 6; + string rateLimitName = 7; + string rateLimitTier = 8; + string useCase = 9; +} + diff --git a/salesforce/proto/exporter_grpc.pb.go b/salesforce/proto/exporter_grpc.pb.go new file mode 100644 index 00000000000..5758e32ebf6 --- /dev/null +++ b/salesforce/proto/exporter_grpc.pb.go @@ -0,0 +1,166 @@ +// NOTE: Any changes to this service MUST also be made to email.Exporter and +// kept in sync with the adapter in cmd/email-exporter/main.go. +// +// TODO(#8410): Remove this comment once we've fully migrated to +// salesforce.Exporter. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.20.1 +// source: exporter.proto + +package proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Exporter_SendContacts_FullMethodName = "/salesforce.Exporter/SendContacts" + Exporter_SendCase_FullMethodName = "/salesforce.Exporter/SendCase" +) + +// ExporterClient is the client API for Exporter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ExporterClient interface { + SendContacts(ctx context.Context, in *SendContactsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + SendCase(ctx context.Context, in *SendCaseRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type exporterClient struct { + cc grpc.ClientConnInterface +} + +func NewExporterClient(cc grpc.ClientConnInterface) ExporterClient { + return &exporterClient{cc} +} + +func (c *exporterClient) SendContacts(ctx context.Context, in *SendContactsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, Exporter_SendContacts_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *exporterClient) SendCase(ctx context.Context, in *SendCaseRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, Exporter_SendCase_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ExporterServer is the server API for Exporter service. +// All implementations must embed UnimplementedExporterServer +// for forward compatibility. +type ExporterServer interface { + SendContacts(context.Context, *SendContactsRequest) (*emptypb.Empty, error) + SendCase(context.Context, *SendCaseRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedExporterServer() +} + +// UnimplementedExporterServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedExporterServer struct{} + +func (UnimplementedExporterServer) SendContacts(context.Context, *SendContactsRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendContacts not implemented") +} +func (UnimplementedExporterServer) SendCase(context.Context, *SendCaseRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendCase not implemented") +} +func (UnimplementedExporterServer) mustEmbedUnimplementedExporterServer() {} +func (UnimplementedExporterServer) testEmbeddedByValue() {} + +// UnsafeExporterServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ExporterServer will +// result in compilation errors. +type UnsafeExporterServer interface { + mustEmbedUnimplementedExporterServer() +} + +func RegisterExporterServer(s grpc.ServiceRegistrar, srv ExporterServer) { + // If the following call pancis, it indicates UnimplementedExporterServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Exporter_ServiceDesc, srv) +} + +func _Exporter_SendContacts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendContactsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExporterServer).SendContacts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Exporter_SendContacts_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExporterServer).SendContacts(ctx, req.(*SendContactsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Exporter_SendCase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendCaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExporterServer).SendCase(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Exporter_SendCase_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExporterServer).SendCase(ctx, req.(*SendCaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Exporter_ServiceDesc is the grpc.ServiceDesc for Exporter service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Exporter_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "salesforce.Exporter", + HandlerType: (*ExporterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SendContacts", + Handler: _Exporter_SendContacts_Handler, + }, + { + MethodName: "SendCase", + Handler: _Exporter_SendCase_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "exporter.proto", +} diff --git a/sfe/forms/fields.go b/sfe/forms/fields.go new file mode 100644 index 00000000000..72925d6c457 --- /dev/null +++ b/sfe/forms/fields.go @@ -0,0 +1,200 @@ +package forms + +import ( + "fmt" + "html/template" + "strings" +) + +type Field interface { + // RenderForm returns the HTML representation of the field. + RenderField() template.HTML +} + +type InputField struct { + // displayName is the name displayed in the form UI. + displayName string + + // name is the name of the field when submitted in the form. It is required + // and must be unique within the form. + name string + + // description is a short description displayed below the field. It is + // required. + description string + + // required indicates whether the field is required. + required bool +} + +var _ Field = (*InputField)(nil) + +func NewInputField(displayName, name, description string, required bool) *InputField { + return &InputField{ + displayName: displayName, + name: name, + description: description, + required: required, + } +} + +func (field InputField) RenderField() template.HTML { + var reqAttr string + if field.required { + reqAttr = `required="required"` + } + fieldHTML := fmt.Sprintf(` +
+ + %[3]s
+ +
+
`, field.name, field.displayName, field.description, reqAttr) + return template.HTML(fieldHTML) //nolint:gosec // G203: html produced by html/template; no raw user HTML is injected. +} + +type DropdownField struct { + // displayName is the name displayed in the form UI. + displayName string + + // name is the name of the field when submitted in the form. It is required + // and must be unique within the form. + name string + + // description is a short description displayed below the field. It is + // required. + description string + + // options is the list of options available in the dropdown. + options []string + + // required indicates whether the field is required. + required bool +} + +var _ Field = (*DropdownField)(nil) + +func NewDropdownField(displayName, name, description string, options []string, required bool) Field { + return &DropdownField{ + displayName: displayName, + name: name, + description: description, + options: options, + required: required, + } +} + +func (field DropdownField) RenderField() template.HTML { + var reqAttr string + if field.required { + reqAttr = `required="required"` + } + var b strings.Builder + b.WriteString(fmt.Sprintf(` +
+ + %[3]s
+ +
+
`) + return template.HTML(b.String()) //nolint:gosec // G203: html produced by html/template; no raw user HTML is injected. +} + +type TextareaField struct { + // displayName is the name displayed in the form UI. + displayName string + + // name is the name of the field when submitted in the form. It is required + // and must be unique within the form. + name string + + // description is a short description displayed below the field. It is + // required. + description string + + // rows is the number of lines to show in the textarea. Optional and + // defaults to 4. + rows int + + // required indicates whether the field is required. + required bool +} + +var _ Field = (*TextareaField)(nil) + +func NewTextareaField(displayName, name, description string, rows int, required bool) *TextareaField { + return &TextareaField{ + displayName: displayName, + name: name, + description: description, + rows: rows, + required: required, + } +} + +func (field TextareaField) RenderField() template.HTML { + numRows := field.rows + if numRows <= 0 { + numRows = 4 + } + var reqAttr string + if field.required { + reqAttr = `required="required"` + } + fieldHTML := fmt.Sprintf(` +
+ + %[3]s
+ +
+
`, field.name, field.displayName, field.description, numRows, reqAttr) + return template.HTML(fieldHTML) //nolint:gosec // G203: html produced by html/template; no raw user HTML is injected. +} + +type CheckboxField struct { + // displayName is the name displayed in the form UI. + displayName string + + // name is the name of the field when submitted in the form. It is required + // and must be unique within the form. + name string + + // text is the text displayed to the right of the checkbox. It is required. + text string + + // required indicates whether the checkbox must be checked. + required bool +} + +var _ Field = (*CheckboxField)(nil) + +func NewCheckboxField(displayName, name, text string, required bool) *CheckboxField { + return &CheckboxField{ + displayName: displayName, + name: name, + text: text, + required: required, + } +} + +func (field CheckboxField) RenderField() template.HTML { + var reqAttr string + if field.required { + reqAttr = `required="required"` + } + fieldHTML := fmt.Sprintf(` +
+ +
+ + %[3]s +
+
+
`, field.name, field.displayName, field.text, reqAttr) + return template.HTML(fieldHTML) //nolint:gosec // G203: html produced by html/template; no raw user HTML is injected. +} diff --git a/sfe/forms/fields_test.go b/sfe/forms/fields_test.go new file mode 100644 index 00000000000..300a3979c8e --- /dev/null +++ b/sfe/forms/fields_test.go @@ -0,0 +1,126 @@ +package forms + +import ( + "testing" + + "github.com/letsencrypt/boulder/test" +) + +func TestInputFieldRenderField(t *testing.T) { + cases := []struct { + name string + required bool + }{ + {"required", true}, + {"optional", false}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + f := NewInputField("Email Address", "email", "Where we can reach you", tc.required) + html := string(f.RenderField()) + + test.AssertContains(t, html, `
`) + test.AssertContains(t, html, ``) + test.AssertContains(t, html, `Where we can reach you`) + test.AssertContains(t, html, ``) + test.AssertContains(t, html, ``) + test.AssertContains(t, html, `Pick one`) + test.AssertContains(t, html, `