diff --git a/.action_templates/jobs/setup.yaml b/.action_templates/jobs/setup.yaml index 3e7a70fd3..ad46dc26d 100644 --- a/.action_templates/jobs/setup.yaml +++ b/.action_templates/jobs/setup.yaml @@ -4,8 +4,8 @@ setup: fail-fast: false matrix: include: - - pipeline-argument: operator-ubi - - pipeline-argument: version-post-start-hook-init - - pipeline-argument: readiness-probe-init - - pipeline-argument: agent-ubi + - pipeline-argument: operator + - pipeline-argument: version-upgrade-hook + - pipeline-argument: readiness-probe + - pipeline-argument: agent - pipeline-argument: e2e diff --git a/.action_templates/jobs/tests.yaml b/.action_templates/jobs/tests.yaml index 6701b8c2e..f360ee3d6 100644 --- a/.action_templates/jobs/tests.yaml +++ b/.action_templates/jobs/tests.yaml @@ -11,6 +11,10 @@ tests: distro: ubi - test-name: replica_set_enterprise_upgrade_5_6 distro: ubi + - test-name: replica_set_enterprise_upgrade_6_7 + distro: ubi + - test-name: replica_set_enterprise_upgrade_7_8 + distro: ubi - test-name: replica_set_recovery distro: ubi - test-name: replica_set_mongod_readiness @@ -60,3 +64,5 @@ tests: distro: ubi - test-name: replica_set_x509 distro: ubi + - test-name: replica_set_remove_user + distro: ubi diff --git a/.action_templates/steps/build-and-push-development-images.yaml b/.action_templates/steps/build-and-push-development-images.yaml index 9e740af06..4fe3df401 100644 --- a/.action_templates/steps/build-and-push-development-images.yaml +++ b/.action_templates/steps/build-and-push-development-images.yaml @@ -1,6 +1,6 @@ - name: Build and Push Images run: | - python pipeline.py --image-name ${{ matrix.pipeline-argument }} --release false --tag ${{ github.run_id }} + python pipeline.py --image-name ${{ matrix.pipeline-argument }} --tag ${{ github.run_id }} env: MONGODB_COMMUNITY_CONFIG: "${{ github.workspace }}/scripts/ci/config.json" version_id: "${{ github.run_id }}" diff --git a/.action_templates/steps/cancel-previous.yaml b/.action_templates/steps/cancel-previous.yaml index 59ced338b..301d5af50 100644 --- a/.action_templates/steps/cancel-previous.yaml +++ b/.action_templates/steps/cancel-previous.yaml @@ -1,4 +1,4 @@ - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@89f242ee29e10c53a841bfe71cc0ce7b2f065abc # 0.9.0 + uses: styfle/cancel-workflow-action@0.12.1 with: access_token: ${{ github.token }} diff --git a/.action_templates/steps/checkout-fork.yaml b/.action_templates/steps/checkout-fork.yaml index 81b97e54d..abd35041c 100644 --- a/.action_templates/steps/checkout-fork.yaml +++ b/.action_templates/steps/checkout-fork.yaml @@ -2,7 +2,7 @@ # Because we are using pull_request_target the Github Secrets will be passed # So code should be reviewed before labeling as "safe-to-test" - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: ref: ${{github.event.pull_request.head.sha}} repository: ${{github.event.pull_request.head.repo.full_name}} diff --git a/.action_templates/steps/checkout.yaml b/.action_templates/steps/checkout.yaml index 8d3e07859..da02fc2f3 100644 --- a/.action_templates/steps/checkout.yaml +++ b/.action_templates/steps/checkout.yaml @@ -1,4 +1,4 @@ - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: true diff --git a/.action_templates/steps/dump-and-upload-diagnostics-always.yaml b/.action_templates/steps/dump-and-upload-diagnostics-always.yaml index 61cfff7db..968ecd9ce 100644 --- a/.action_templates/steps/dump-and-upload-diagnostics-always.yaml +++ b/.action_templates/steps/dump-and-upload-diagnostics-always.yaml @@ -5,7 +5,7 @@ - name: Upload Diagnostics if: always() - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 continue-on-error: true with: name: "${{ github.event.inputs.test-name }}-${{ github.event.inputs.distro }}-diagnostics" diff --git a/.action_templates/steps/dump-and-upload-diagnostics.yaml b/.action_templates/steps/dump-and-upload-diagnostics.yaml index d8c23e3dd..17f5d2688 100644 --- a/.action_templates/steps/dump-and-upload-diagnostics.yaml +++ b/.action_templates/steps/dump-and-upload-diagnostics.yaml @@ -6,7 +6,7 @@ - name: Upload Diagnostics if: always() && steps.dump_diagnostics.outcome == 'success' - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 continue-on-error: true with: name: "${{ matrix.test-name }}-${{ matrix.distro }}-diagnostics" diff --git a/.action_templates/steps/quay-login.yaml b/.action_templates/steps/quay-login.yaml index 85073e70b..77a8dd06f 100644 --- a/.action_templates/steps/quay-login.yaml +++ b/.action_templates/steps/quay-login.yaml @@ -1,5 +1,5 @@ - name: Login to Quay.io - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: registry: quay.io username: ${{ secrets.QUAY_USERNAME }} diff --git a/.action_templates/steps/set-run-status.yaml b/.action_templates/steps/set-run-status.yaml index 40bb34079..9f4a76541 100644 --- a/.action_templates/steps/set-run-status.yaml +++ b/.action_templates/steps/set-run-status.yaml @@ -7,7 +7,7 @@ # see https://github.com/actions/runner/issues/432 - name: Restore last run status id: last_run - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: last_run_status key: ${{ github.run_id }}-${{ matrix.test-name }}-${{ matrix.distro }} diff --git a/.action_templates/steps/set-up-qemu.yaml b/.action_templates/steps/set-up-qemu.yaml index 7ae0e920a..c84384bfc 100644 --- a/.action_templates/steps/set-up-qemu.yaml +++ b/.action_templates/steps/set-up-qemu.yaml @@ -1,2 +1,2 @@ - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 diff --git a/.action_templates/steps/setup-and-install-python.yaml b/.action_templates/steps/setup-and-install-python.yaml index e97bcf462..b924e01ae 100644 --- a/.action_templates/steps/setup-and-install-python.yaml +++ b/.action_templates/steps/setup-and-install-python.yaml @@ -1,9 +1,9 @@ - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: '3.10.4' - name: Cache Dependencies - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ hashFiles('requirements.txt') }} diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5218c3702..db61cf612 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @irajdeep @mircea-cosbuc @lsierant @slaskawi @nammn +* @mircea-cosbuc @lsierant @nammn @Julien-Ben @MaciejKaras @lucian-tosa @fealebenpae @m1kola \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index f3021cc27..08b2b00ab 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -35,6 +35,30 @@ If applicable, add screenshots to help explain your problem. Add any other context about the problem here. If possible, please include: - - `kubectl describe` output - - yaml definitions for your objects - - log files for the operator and database pods + - The operator logs + - Below we assume that your replicaset database pods are named `mongo-<>`. For instance: +``` +❯ k get pods +NAME READY STATUS RESTARTS AGE +mongo-0 2/2 Running 0 19h +mongo-1 2/2 Running 0 19h + +❯ k get mdbc +NAME PHASE VERSION +mongo Running 4.4.0 +``` + - yaml definitions of your MongoDB Deployment(s): + - `kubectl get mdbc -oyaml` + - yaml definitions of your kubernetes objects like the statefulset(s), pods (we need to see the state of the containers): + - `kubectl get sts -oyaml` + - `kubectl get pods -oyaml` + - The Pod logs: + - `kubectl logs mongo-0` + - The agent clusterconfig of the faulty members: + - `kubectl exec -it mongo-0 -c mongodb-agent -- cat /var/lib/automation/config/cluster-config.json` + - The agent health status of the faulty members: + - `kubectl exec -it mongo-0 -c mongodb-agent -- cat /var/log/mongodb-mms-automation/healthstatus/agent-health-status.json` + - The verbose agent logs of the faulty members: + - `kubectl exec -it mongo-0 -c mongodb-agent -- cat /var/log/mongodb-mms-automation/automation-agent-verbose.log` + - You might not have the verbose ones, in that case the non-verbose agent logs: + - `kubectl exec -it mongo-0 -c mongodb-agent -- cat /var/log/mongodb-mms-automation/automation-agent.log` diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index f77733901..650880d32 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,3 +1,12 @@ +### Summary: + + ### All Submissions: * [ ] Have you opened an Issue before filing this PR? diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 1f09448f2..eb3084c66 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,9 +5,14 @@ updates: schedule: interval: weekly day: monday - reviewers: - - "slaskawi" - - "lsierant" - - "irajdeep" - - "mircea-cosbuc" - - "nammn" + ignore: + - dependency-name: k8s.io/api + - dependency-name: k8s.io/apimachinery + - dependency-name: k8s.io/client-go + - dependency-name: k8s.io/code-generator + - dependency-name: sigs.k8s.io/controller-runtime + - package-ecosystem: pip + directory: "/" + schedule: + interval: weekly + day: monday diff --git a/.github/workflows/close-stale-issues.yml b/.github/workflows/close-stale-issues.yml index 8a806294a..942020dbd 100644 --- a/.github/workflows/close-stale-issues.yml +++ b/.github/workflows/close-stale-issues.yml @@ -10,7 +10,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v3 + - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: 'This issue is being marked stale because it has been open for 60 days with no activity. Please comment if this issue is still affecting you. If there is no change, this issue will be closed in 30 days.' diff --git a/.github/workflows/code-health.yml b/.github/workflows/code-health.yml index 3522142a5..345941c18 100644 --- a/.github/workflows/code-health.yml +++ b/.github/workflows/code-health.yml @@ -7,7 +7,7 @@ jobs: Black: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Black Check uses: jpetrucciani/black-check@7f5b2ad20fa5484f1884f07c1937e032ed8cd939 @@ -15,9 +15,19 @@ jobs: Mypy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Mypy linting uses: jpetrucciani/mypy-check@179fdad632bf3ccf4cabb7ee4307ef25e51d2f96 with: path: scripts/*/*.py + + Golangci-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: stable + - name: golangci-lint + uses: golangci/golangci-lint-action@v6 diff --git a/.github/workflows/comment-release-pr.yml b/.github/workflows/comment-release-pr.yml index 0720856d2..3944aa660 100644 --- a/.github/workflows/comment-release-pr.yml +++ b/.github/workflows/comment-release-pr.yml @@ -9,7 +9,7 @@ jobs: if: startsWith(github.event.pull_request.title, 'Release MongoDB Kubernetes Operator') runs-on: ubuntu-latest steps: - - uses: actions/github-script@v3 + - uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | diff --git a/.github/workflows/e2e-dispatch.yml b/.github/workflows/e2e-dispatch.yml index 508a9207e..b3522124d 100644 --- a/.github/workflows/e2e-dispatch.yml +++ b/.github/workflows/e2e-dispatch.yml @@ -40,24 +40,24 @@ jobs: fail-fast: false matrix: include: - - pipeline-argument: operator-ubi - - pipeline-argument: version-post-start-hook-init - - pipeline-argument: readiness-probe-init - - pipeline-argument: agent-ubi + - pipeline-argument: operator + - pipeline-argument: version-upgrade-hook + - pipeline-argument: readiness-probe + - pipeline-argument: agent - pipeline-argument: e2e steps: # template: .action_templates/steps/checkout.yaml - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: true # template: .action_templates/steps/setup-and-install-python.yaml - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.10.4 - name: Cache Dependencies - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ hashFiles('requirements.txt') }} @@ -65,18 +65,18 @@ jobs: run: pip install -r requirements.txt # template: .action_templates/steps/quay-login.yaml - name: Login to Quay.io - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: registry: quay.io username: ${{ secrets.QUAY_USERNAME }} password: ${{ secrets.QUAY_ROBOT_TOKEN }} # template: .action_templates/steps/set-up-qemu.yaml - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 # template: .action_templates/steps/build-and-push-development-images.yaml - name: Build and Push Images run: | - python pipeline.py --image-name ${{ matrix.pipeline-argument }} --release false --tag ${{ github.run_id }} + python pipeline.py --image-name ${{ matrix.pipeline-argument }} --tag ${{ github.run_id }} env: MONGODB_COMMUNITY_CONFIG: ${{ github.workspace }}/scripts/ci/config.json version_id: ${{ github.run_id }} @@ -87,16 +87,16 @@ jobs: steps: # template: .action_templates/steps/checkout.yaml - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: true # template: .action_templates/steps/setup-and-install-python.yaml - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.10.4 - name: Cache Dependencies - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ hashFiles('requirements.txt') }} @@ -126,7 +126,7 @@ jobs: - name: Upload Diagnostics if: always() - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 continue-on-error: true with: name: ${{ github.event.inputs.test-name }}-${{ github.event.inputs.distro diff --git a/.github/workflows/e2e-fork.yml b/.github/workflows/e2e-fork.yml index 4cb8b75e1..a5c3ae53e 100644 --- a/.github/workflows/e2e-fork.yml +++ b/.github/workflows/e2e-fork.yml @@ -33,33 +33,33 @@ jobs: fail-fast: false matrix: include: - - pipeline-argument: operator-ubi - - pipeline-argument: version-post-start-hook-init - - pipeline-argument: readiness-probe-init - - pipeline-argument: agent-ubi + - pipeline-argument: operator + - pipeline-argument: version-upgrade-hook + - pipeline-argument: readiness-probe + - pipeline-argument: agent - pipeline-argument: e2e if: contains(github.event.pull_request.labels.*.name, 'dependencies') || contains(github.event.pull_request.labels.*.name, 'safe-to-test') steps: # template: .action_templates/steps/cancel-previous.yaml - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@89f242ee29e10c53a841bfe71cc0ce7b2f065abc # 0.9.0 + uses: styfle/cancel-workflow-action@0.12.1 with: access_token: ${{ github.token }} # template: .action_templates/steps/checkout-fork.yaml - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: ref: ${{github.event.pull_request.head.sha}} repository: ${{github.event.pull_request.head.repo.full_name}} submodules: true # template: .action_templates/steps/setup-and-install-python.yaml - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.10.4 - name: Cache Dependencies - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ hashFiles('requirements.txt') }} @@ -67,18 +67,18 @@ jobs: run: pip install -r requirements.txt # template: .action_templates/steps/quay-login.yaml - name: Login to Quay.io - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: registry: quay.io username: ${{ secrets.QUAY_USERNAME }} password: ${{ secrets.QUAY_ROBOT_TOKEN }} # template: .action_templates/steps/set-up-qemu.yaml - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 # template: .action_templates/steps/build-and-push-development-images.yaml - name: Build and Push Images run: | - python pipeline.py --image-name ${{ matrix.pipeline-argument }} --release false --tag ${{ github.run_id }} + python pipeline.py --image-name ${{ matrix.pipeline-argument }} --tag ${{ github.run_id }} env: MONGODB_COMMUNITY_CONFIG: ${{ github.workspace }}/scripts/ci/config.json version_id: ${{ github.run_id }} @@ -96,6 +96,10 @@ jobs: distro: ubi - test-name: replica_set_enterprise_upgrade_5_6 distro: ubi + - test-name: replica_set_enterprise_upgrade_6_7 + distro: ubi + - test-name: replica_set_enterprise_upgrade_7_8 + distro: ubi - test-name: replica_set_recovery distro: ubi - test-name: replica_set_mongod_readiness @@ -145,15 +149,17 @@ jobs: distro: ubi - test-name: replica_set_x509 distro: ubi + - test-name: replica_set_remove_user + distro: ubi steps: # template: .action_templates/steps/cancel-previous.yaml - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@89f242ee29e10c53a841bfe71cc0ce7b2f065abc # 0.9.0 + uses: styfle/cancel-workflow-action@0.12.1 with: access_token: ${{ github.token }} # template: .action_templates/steps/checkout-fork.yaml - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: ref: ${{github.event.pull_request.head.sha}} repository: ${{github.event.pull_request.head.repo.full_name}} @@ -168,7 +174,7 @@ jobs: # see https://github.com/actions/runner/issues/432 - name: Restore last run status id: last_run - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: last_run_status key: ${{ github.run_id }}-${{ matrix.test-name }}-${{ matrix.distro }} @@ -178,11 +184,11 @@ jobs: run: cat last_run_status # template: .action_templates/steps/setup-and-install-python.yaml - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.10.4 - name: Cache Dependencies - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ hashFiles('requirements.txt') }} @@ -227,7 +233,7 @@ jobs: - name: Upload Diagnostics if: always() && steps.dump_diagnostics.outcome == 'success' - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 continue-on-error: true with: name: ${{ matrix.test-name }}-${{ matrix.distro }}-diagnostics diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index a3a20e3ec..8501431b6 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -40,10 +40,10 @@ jobs: fail-fast: false matrix: include: - - pipeline-argument: operator-ubi - - pipeline-argument: version-post-start-hook-init - - pipeline-argument: readiness-probe-init - - pipeline-argument: agent-ubi + - pipeline-argument: operator + - pipeline-argument: version-upgrade-hook + - pipeline-argument: readiness-probe + - pipeline-argument: agent - pipeline-argument: e2e if: github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' || (github.event.pull_request.head.repo.full_name == github.repository && github.actor @@ -51,21 +51,21 @@ jobs: steps: # template: .action_templates/steps/cancel-previous.yaml - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@89f242ee29e10c53a841bfe71cc0ce7b2f065abc # 0.9.0 + uses: styfle/cancel-workflow-action@0.12.1 with: access_token: ${{ github.token }} # template: .action_templates/steps/checkout.yaml - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: true # template: .action_templates/steps/setup-and-install-python.yaml - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.10.4 - name: Cache Dependencies - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ hashFiles('requirements.txt') }} @@ -73,18 +73,18 @@ jobs: run: pip install -r requirements.txt # template: .action_templates/steps/quay-login.yaml - name: Login to Quay.io - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: registry: quay.io username: ${{ secrets.QUAY_USERNAME }} password: ${{ secrets.QUAY_ROBOT_TOKEN }} # template: .action_templates/steps/set-up-qemu.yaml - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 # template: .action_templates/steps/build-and-push-development-images.yaml - name: Build and Push Images run: | - python pipeline.py --image-name ${{ matrix.pipeline-argument }} --release false --tag ${{ github.run_id }} + python pipeline.py --image-name ${{ matrix.pipeline-argument }} --tag ${{ github.run_id }} env: MONGODB_COMMUNITY_CONFIG: ${{ github.workspace }}/scripts/ci/config.json version_id: ${{ github.run_id }} @@ -102,6 +102,10 @@ jobs: distro: ubi - test-name: replica_set_enterprise_upgrade_5_6 distro: ubi + - test-name: replica_set_enterprise_upgrade_6_7 + distro: ubi + - test-name: replica_set_enterprise_upgrade_7_8 + distro: ubi - test-name: replica_set_recovery distro: ubi - test-name: replica_set_mongod_readiness @@ -151,15 +155,17 @@ jobs: distro: ubi - test-name: replica_set_x509 distro: ubi + - test-name: replica_set_remove_user + distro: ubi steps: # template: .action_templates/steps/cancel-previous.yaml - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@89f242ee29e10c53a841bfe71cc0ce7b2f065abc # 0.9.0 + uses: styfle/cancel-workflow-action@0.12.1 with: access_token: ${{ github.token }} # template: .action_templates/steps/checkout.yaml - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: submodules: true # template: .action_templates/steps/set-run-status.yaml @@ -172,7 +178,7 @@ jobs: # see https://github.com/actions/runner/issues/432 - name: Restore last run status id: last_run - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: last_run_status key: ${{ github.run_id }}-${{ matrix.test-name }}-${{ matrix.distro }} @@ -182,11 +188,11 @@ jobs: run: cat last_run_status # template: .action_templates/steps/setup-and-install-python.yaml - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: 3.10.4 - name: Cache Dependencies - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ hashFiles('requirements.txt') }} @@ -231,7 +237,7 @@ jobs: - name: Upload Diagnostics if: always() && steps.dump_diagnostics.outcome == 'success' - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 continue-on-error: true with: name: ${{ matrix.test-name }}-${{ matrix.distro }}-diagnostics diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 42554d1ca..ecce33378 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -9,12 +9,12 @@ jobs: UnitTests: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: '1.21' + go-version: '1.24' - name: Test api run: go test -v ./api/... @@ -30,3 +30,6 @@ jobs: - name: Test mongotester run: go test -v ./test/e2e/util/mongotester/... + + - name: Check licenses + run: make check-licenses diff --git a/.github/workflows/kubelinter-check.yml b/.github/workflows/kubelinter-check.yml index b4ef431fd..2fcb5b725 100644 --- a/.github/workflows/kubelinter-check.yml +++ b/.github/workflows/kubelinter-check.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Scan directory ./deploy/clusterwide/ with kube-linter uses: stackrox/kube-linter-action@v1.0.3 diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index b922a90d8..3442f28df 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -36,7 +36,7 @@ jobs: # Checkout the code base # ########################## - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: # Make sure we also get the helm-charts submodule! submodules: true @@ -51,25 +51,3 @@ jobs: - name: Move the dependencies run: mv .venv /home/runner/work/_temp/_github_workflow - - # This part is not needed until we can add GO linting - # - name : Install Operator SDK - # run: | - # curl -s https://api.github.com/repos/operator-framework/operator-sdk/releases/latest | grep browser_download_url | grep x86_64-linux-gnu | cut -d '"' -f 4 | wget -i - - # sudo mv operator-sdk-*-x86_64-linux-gnu /usr/local/bin/operator-sdk - # sudo chmod 777 /usr/local/bin/operator-sdk - # - name: Generate DeepCopy - # Run: operator-sdk generate k8s - - - name: Lint Code Base - uses: docker://github/super-linter:v4 - env: - VALIDATE_ALL_CODEBASE: true - # Now we set the PYTHONPATH to the path of the dependencies *inside* the container - PYTHONPATH: "/github/workspace/:\ - /github/workflow/.venv/lib/python3.6/site-packages" - VALIDATE_YAML: true - VALIDATE_PYTHON: true - VALIDATE_BASH: true - FILTER_REGEX_EXCLUDE: "/helm-charts/charts/community-operator/templates/*" - # VALIDATE_GO: true This is currently broken: https://github.com/github/super-linter/issues/143 diff --git a/.github/workflows/release-images.yml b/.github/workflows/release-images.yml index a5a7cbec4..5ced57eae 100644 --- a/.github/workflows/release-images.yml +++ b/.github/workflows/release-images.yml @@ -12,26 +12,23 @@ jobs: strategy: matrix: include: - - pipeline-argument: operator-ubi - release-key: mongodb-kubernetes-operator - - pipeline-argument: version-post-start-hook-init + - pipeline-argument: operator + release-key: operator + - pipeline-argument: version-upgrade-hook release-key: version-upgrade-hook - - pipeline-argument: readiness-probe-init + - pipeline-argument: readiness-probe release-key: readiness-probe - - pipeline-argument: agent-ubi - release-key: mongodb-agent - - pipeline-argument: agent-ubuntu - release-key: mongodb-agent + steps: - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: '3.10.4' architecture: 'x64' - - uses: actions/cache@v2 + - uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ hashFiles('requirements.txt') }} @@ -57,18 +54,24 @@ jobs: - name: Publish Image To Quay if: steps.release_status.outputs.OUTPUT == 'unreleased' - run: python pipeline.py --image-name ${{ matrix.pipeline-argument }} --release true + run: python pipeline.py --image-name ${{ matrix.pipeline-argument }} --release --sign env: MONGODB_COMMUNITY_CONFIG: "${{ github.workspace }}/scripts/ci/config.json" AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}" AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}" + GRS_USERNAME: "${{ vars.GRS_USERNAME }}" + GRS_PASSWORD: "${{ secrets.GRS_PASSWORD }}" + PKCS11_URI: "${{ vars.PKCS11_URI }}" + ARTIFACTORY_USERNAME: "${{ vars.ARTIFACTORY_USERNAME }}" + ARTIFACTORY_PASSWORD: "${{ secrets.ARTIFACTORY_PASSWORD }}" + AWS_DEFAULT_REGION: "${{ vars.AWS_DEFAULT_REGION }}" create-draft-release: runs-on: ubuntu-latest needs: [release-images] steps: - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Determine Release Tag id: release_tag run: | diff --git a/.github/workflows/release-single-image.yml b/.github/workflows/release-single-image.yml index 9c2aebeea..162454391 100644 --- a/.github/workflows/release-single-image.yml +++ b/.github/workflows/release-single-image.yml @@ -13,14 +13,14 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: '3.10.4' architecture: 'x64' - - uses: actions/cache@v2 + - uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ hashFiles('requirements.txt') }} @@ -40,10 +40,19 @@ jobs: username: ${{ secrets.QUAY_USERNAME }} password: ${{ secrets.QUAY_ROBOT_TOKEN }} + # template: .action_templates/steps/set-up-qemu.yaml + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Publish Image To Quay if: steps.release_status.outputs.OUTPUT == 'unreleased' - run: python pipeline.py --image-name ${{ github.event.inputs.pipeline-argument }} --release true + run: python pipeline.py --image-name ${{ github.event.inputs.pipeline-argument }} --release --sign env: MONGODB_COMMUNITY_CONFIG: "${{ github.workspace }}/scripts/ci/config.json" AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}" AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}" + GRS_USERNAME: "${{ vars.GRS_USERNAME }}" + GRS_PASSWORD: "${{ secrets.GRS_PASSWORD }}" + PKCS11_URI: "${{ vars.PKCS11_URI }}" + ARTIFACTORY_USERNAME: "${{ vars.ARTIFACTORY_USERNAME }}" + ARTIFACTORY_PASSWORD: "${{ secrets.ARTIFACTORY_PASSWORD }}" diff --git a/.gitignore b/.gitignore index 7836e6873..0229263df 100644 --- a/.gitignore +++ b/.gitignore @@ -100,3 +100,4 @@ diagnostics Pipfile Pipfile.lock .community-operator-dev +*.iml diff --git a/.golangci.yml b/.golangci.yml index 7e0ad5f76..795e08728 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -16,6 +16,12 @@ issues: - goconst - golint text: "underscore" + - path: ^pkg\/util\/envvar + linters: + - forbidigo + - path: ^cmd\/(readiness|versionhook|manager)\/main\.go$ + linters: + - forbidigo linters: enable: - govet @@ -23,17 +29,32 @@ linters: - staticcheck - unused - gosimple - - structcheck - - varcheck - ineffassign - - deadcode - typecheck - rowserrcheck - gosec - unconvert + - forbidigo +linters-settings: + gosec: + excludes: + - G115 + forbidigo: + forbid: + - p: os\.(Getenv|LookupEnv|Environ|ExpandEnv) + pkg: os + msg: "Reading environemnt variables here is prohibited. Please read environment variables in the main package." + - p: os\.(Clearenv|Unsetenv|Setenv) + msg: "Modifying environemnt variables is prohibited." + pkg: os + - p: envvar\.(Read.*?|MergeWithOverride|GetEnvOrDefault) + pkg: github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar + msg: "Using this envvar package here is prohibited. Please work with environment variables in the main package." + # Rules with the `pkg` depend on it + analyze-types: true run: - modules-download-mode: + modules-download-mode: mod # timeout for analysis, e.g. 30s, 5m, default is 1m timeout: 5m # default concurrency is a available CPU number diff --git a/LICENSE.md b/LICENSE.md index 5463e3426..9c600b1bc 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,5 +1,5 @@ The MongoDB Agent binary in the agent/ directory may be used under the "Free for Commercial Use - Oct 2020" license found in [agent/LICENSE](scripts/dev/templates/agent/LICENSE). -The source code of this Operator, and all other content in this repo are available under the Apache v2 license. The text of this license is available in [APACHE2](APACHE2) +The source code of this Operator, and all other content in this repository are available under the Apache v2 license. The text of this license is available in [APACHE2](APACHE2) To use this Operator, you must agree to both licenses. diff --git a/Makefile b/Makefile index 6226a8eda..6f1811c8f 100644 --- a/Makefile +++ b/Makefile @@ -9,11 +9,11 @@ NAMESPACE := $(shell jq -r .namespace < $(MONGODB_COMMUNITY_CONFIG)) UPGRADE_HOOK_IMG := $(shell jq -r .version_upgrade_hook_image < $(MONGODB_COMMUNITY_CONFIG)) READINESS_PROBE_IMG := $(shell jq -r .readiness_probe_image < $(MONGODB_COMMUNITY_CONFIG)) REGISTRY := $(shell jq -r .repo_url < $(MONGODB_COMMUNITY_CONFIG)) -AGENT_IMAGE_NAME := $(shell jq -r .agent_image_ubi < $(MONGODB_COMMUNITY_CONFIG)) - +AGENT_IMAGE_NAME := $(shell jq -r .agent_image < $(MONGODB_COMMUNITY_CONFIG)) HELM_CHART ?= ./helm-charts/charts/community-operator STRING_SET_VALUES := --set namespace=$(NAMESPACE),versionUpgradeHook.name=$(UPGRADE_HOOK_IMG),readinessProbe.name=$(READINESS_PROBE_IMG),registry.operator=$(REPO_URL),operator.operatorImageName=$(OPERATOR_IMAGE),operator.version=latest,registry.agent=$(REGISTRY),registry.versionUpgradeHook=$(REGISTRY),registry.readinessProbe=$(REGISTRY),registry.operator=$(REGISTRY),versionUpgradeHook.version=latest,readinessProbe.version=latest,agent.version=latest,agent.name=$(AGENT_IMAGE_NAME) +STRING_SET_VALUES_LOCAL := $(STRING_SET_VALUES) --set operator.replicas=0 DOCKERFILE ?= operator # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) @@ -28,6 +28,10 @@ else GOBIN=$(shell go env GOBIN) endif +BASE_GO_PACKAGE = github.com/mongodb/mongodb-kubernetes-operator +GO_LICENSES = go-licenses +DISALLOWED_LICENSES = restricted # found reciprocal MPL-2.0 + all: manager ##@ Development @@ -41,6 +45,35 @@ vet: ## Run go vet against code generate: controller-gen ## Generate code $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." +$(GO_LICENSES): + @if ! which $@ &> /dev/null; then \ + go install github.com/google/go-licenses@latest; \ + fi + +licenses.csv: go.mod $(GO_LICENSES) ## Track licenses in a CSV file + @echo "Tracking licenses into file $@" + @echo "========================================" + GOOS=linux GOARCH=amd64 $(GO_LICENSES) csv --include_tests $(BASE_GO_PACKAGE)/... > $@ + +# We only check that go.mod is NOT newer than licenses.csv because the CI +# tends to generate slightly different results, so content comparison wouldn't work +licenses-tracked: ## Checks license.csv is up to date + @if [ go.mod -nt licenses.csv ]; then \ + echo "License.csv is stale! Please run 'make licenses.csv' and commit"; exit 1; \ + else echo "License.csv OK (up to date)"; fi + +.PHONY: check-licenses-compliance +check-licenses-compliance: licenses.csv ## Check licenses are compliant with our restrictions + @echo "Checking licenses not to be: $(DISALLOWED_LICENSES)" + @echo "============================================" + GOOS=linux GOARCH=amd64 $(GO_LICENSES) check --include_tests $(BASE_GO_PACKAGE)/... \ + --disallowed_types $(DISALLOWED_LICENSES) + @echo "--------------------" + @echo "Licenses check: PASS" + +.PHONY: check-licenses +check-licenses: licenses-tracked check-licenses-compliance ## Check license tracking & compliance + TEST ?= ./pkg/... ./api/... ./cmd/... ./controllers/... ./test/e2e/util/mongotester/... test: generate fmt vet manifests ## Run unit tests go test $(options) $(TEST) -coverprofile cover.out @@ -48,7 +81,7 @@ test: generate fmt vet manifests ## Run unit tests manager: generate fmt vet ## Build operator binary go build -o bin/manager ./cmd/manager/main.go -run: install install-rbac ## Run the operator against the configured Kubernetes cluster in ~/.kube/config +run: install ## Run the operator against the configured Kubernetes cluster in ~/.kube/config eval $$(scripts/dev/get_e2e_env_vars.py $(cleanup)); \ go run ./cmd/manager/main.go @@ -58,7 +91,7 @@ debug: install install-rbac ## Run the operator in debug mode with dlv CONTROLLER_GEN = $(shell pwd)/bin/controller-gen controller-gen: ## Download controller-gen locally if necessary - $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.11.3) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.15.0) # Try to use already installed helm from PATH ifeq (ok,$(shell test -f "$$(which helm)" && echo ok)) @@ -80,17 +113,27 @@ install: manifests helm install-crd ## Install CRDs into a cluster install-crd: kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml -install-chart: +install-chart: uninstall-crd $(HELM) upgrade --install $(STRING_SET_VALUES) $(RELEASE_NAME_HELM) $(HELM_CHART) --namespace $(NAMESPACE) --create-namespace +install-chart-local-operator: uninstall-crd + $(HELM) upgrade --install $(STRING_SET_VALUES_LOCAL) $(RELEASE_NAME_HELM) $(HELM_CHART) --namespace $(NAMESPACE) --create-namespace + +prepare-local-dev: generate-env-file install-chart-local-operator install-rbac setup-sas + +# patches all sas to use the local-image-registry +setup-sas: + scripts/dev/setup_sa.sh + install-chart-with-tls-enabled: $(HELM) upgrade --install --set createResource=true $(STRING_SET_VALUES) $(RELEASE_NAME_HELM) $(HELM_CHART) --namespace $(NAMESPACE) --create-namespace + install-rbac: $(HELM) template $(STRING_SET_VALUES) -s templates/database_roles.yaml $(HELM_CHART) | kubectl apply -f - $(HELM) template $(STRING_SET_VALUES) -s templates/operator_roles.yaml $(HELM_CHART) | kubectl apply -f - uninstall-crd: - kubectl delete crd mongodbcommunity.mongodbcommunity.mongodb.com + kubectl delete crd --ignore-not-found mongodbcommunity.mongodbcommunity.mongodb.com uninstall-chart: $(HELM) uninstall $(RELEASE_NAME_HELM) -n $(NAMESPACE) @@ -115,7 +158,10 @@ manifests: controller-gen ## Generate manifests e.g. CRD, RBAC etc. # Run e2e tests locally using go build while also setting up a proxy in the shell to allow # the test to run as if it were inside the cluster. This enables mongodb connectivity while running locally. +# "MDB_LOCAL_OPERATOR=true" ensures the operator pod is not spun up while running the e2e test - since you're +# running it locally. e2e-telepresence: cleanup-e2e install ## Run e2e tests locally using go build while also setting up a proxy e.g. make e2e-telepresence test=replica_set cleanup=true + export MDB_LOCAL_OPERATOR=true; \ telepresence connect; \ eval $$(scripts/dev/get_e2e_env_vars.py $(cleanup)); \ go test -v -timeout=30m -failfast $(options) ./test/e2e/$(test) ; \ @@ -139,25 +185,26 @@ cleanup-e2e: ## Cleans up e2e test env kubectl delete pv --all -n $(NAMESPACE) || true generate-env-file: ## generates a local-test.env for local testing - python scripts/dev/get_e2e_env_vars.py | cut -d' ' -f2 > .community-operator-dev/local-test.env + mkdir -p .community-operator-dev + { python scripts/dev/get_e2e_env_vars.py | tee >(cut -d' ' -f2 > .community-operator-dev/local-test.env) ;} > .community-operator-dev/local-test.export.env + . .community-operator-dev/local-test.export.env ##@ Image operator-image: ## Build and push the operator image - python pipeline.py --image-name operator-ubi + python pipeline.py --image-name operator $(IMG_BUILD_ARGS) e2e-image: ## Build and push e2e test image - python pipeline.py --image-name e2e + python pipeline.py --image-name e2e $(IMG_BUILD_ARGS) agent-image: ## Build and push agent image - python pipeline.py --image-name agent-ubuntu - python pipeline.py --image-name agent-ubi + python pipeline.py --image-name agent $(IMG_BUILD_ARGS) readiness-probe-image: ## Build and push readiness probe image - python pipeline.py --image-name readiness-probe-init + python pipeline.py --image-name readiness-probe $(IMG_BUILD_ARGS) version-upgrade-post-start-hook-image: ## Build and push version upgrade post start hook image - python pipeline.py --image-name version-post-start-hook-init + python pipeline.py --image-name version-upgrade-hook $(IMG_BUILD_ARGS) all-images: operator-image e2e-image agent-image readiness-probe-image version-upgrade-post-start-hook-image ## create all required images diff --git a/README.md b/README.md index 3ace2dd62..5476e4383 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,10 @@ +> **DEPRECATED:** This repository is deprecated but we will continue a best-effort support until November 2025. Please use the new repository at [mongodb/mongodb-kubernetes](https://github.com/mongodb/mongodb-kubernetes) instead. +> +> For more information on this decision - what it means and entails - see the [announcement](https://github.com/mongodb/mongodb-kubernetes/releases/tag/v1.0.0) and our [public documentation](https://www.mongodb.com/docs/kubernetes/current/). +> +> A detailed migration guide is available to help you transition smoothly - see [guide](https://github.com/mongodb/mongodb-kubernetes/blob/master/docs/migration/community-operator-migration.md). There will be no functional changes in the new repository - only a better and unified experience as well as improved visibility into the development process. + + # MongoDB Community Kubernetes Operator # @@ -33,6 +40,7 @@ See the [documentation](docs) to learn how to: 1. [Install or upgrade](docs/install-upgrade.md) the Operator. 1. [Deploy and configure](docs/deploy-configure.md) MongoDB resources. +1. [Configure Logging](docs/logging.md) of the MongoDB resource components. 1. [Create a database user](docs/users.md) with SCRAM authentication. 1. [Secure MongoDB resource connections](docs/secure.md) using TLS. @@ -42,9 +50,6 @@ See the [documentation](docs) to learn how to: The MongoDB Community Kubernetes Operator supports the following features: -> Warning: Currently the operator doesn't support Replicaset deployment with server version >= 7.0. We are working on adding the support for it. - - - Create [replica sets](https://www.mongodb.com/docs/manual/replication/) - Upgrade and downgrade MongoDB server version - Scale replica sets up and down @@ -57,9 +62,6 @@ The MongoDB Community Kubernetes Operator supports the following features: - Create custom roles - Enable a [metrics target that can be used with Prometheus](docs/prometheus/README.md) -### Planned Features -- Server internal authentication via keyfile - ## Contribute Before you contribute to the MongoDB Community Kubernetes Operator, please read: @@ -71,6 +73,17 @@ Please file issues before filing PRs. For PRs to be accepted, contributors must Reviewers, please ensure that the CLA has been signed by referring to [the contributors tool](https://contributors.corp.mongodb.com/) (internal link). +## Linting + +This project uses the following linters upon every Pull Request: + +* `gosec` is a tool that find security problems in the code +* `Black` is a tool that verifies if Python code is properly formatted +* `MyPy` is a Static Type Checker for Python +* `Kube-linter` is a tool that verified if all Kubernetes YAML manifests are formatted correctly +* `Go vet` A built-in Go static checker +* `Snyk` The vulnerability scanner + ## License Please see the [LICENSE](LICENSE.md) file. diff --git a/api/v1/mongodbcommunity_types.go b/api/v1/mongodbcommunity_types.go index 0153e22e6..6a5e4bf0c 100644 --- a/api/v1/mongodbcommunity_types.go +++ b/api/v1/mongodbcommunity_types.go @@ -128,6 +128,10 @@ type MongoDBCommunitySpec struct { // +kubebuilder:pruning:PreserveUnknownFields // +nullable AdditionalConnectionStringConfig MapWrapper `json:"additionalConnectionStringConfig,omitempty"` + + // MemberConfig + // +optional + MemberConfig []automationconfig.MemberOptions `json:"memberConfig,omitempty"` } // MapWrapper is a wrapper for a map to be used by other structs. @@ -319,7 +323,17 @@ type AuthenticationRestriction struct { // AutomationConfigOverride contains fields which will be overridden in the operator created config. type AutomationConfigOverride struct { - Processes []OverrideProcess `json:"processes"` + Processes []OverrideProcess `json:"processes,omitempty"` + ReplicaSet OverrideReplicaSet `json:"replicaSet,omitempty"` +} + +type OverrideReplicaSet struct { + // Id can be used together with additionalMongodConfig.replication.replSetName + // to manage clusters where replSetName differs from the MongoDBCommunity resource name + Id *string `json:"id,omitempty"` + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + Settings MapWrapper `json:"settings,omitempty"` } // Note: We do not use the automationconfig.Process type directly here as unmarshalling cannot happen directly @@ -345,10 +359,10 @@ type LogLevel string const ( LogLevelDebug LogLevel = "DEBUG" - LogLevelInfo string = "INFO" - LogLevelWarn string = "WARN" - LogLevelError string = "ERROR" - LogLevelFatal string = "FATAL" + LogLevelInfo LogLevel = "INFO" + LogLevelWarn LogLevel = "WARN" + LogLevelError LogLevel = "ERROR" + LogLevelFatal LogLevel = "FATAL" ) type AgentConfiguration struct { @@ -362,6 +376,9 @@ type AgentConfiguration struct { // LogRotate if enabled, will enable LogRotate for all processes. LogRotate *automationconfig.CrdLogRotate `json:"logRotate,omitempty"` // +optional + // AuditLogRotate if enabled, will enable AuditLogRotate for all processes. + AuditLogRotate *automationconfig.CrdLogRotate `json:"auditLogRotate,omitempty"` + // +optional // SystemLog configures system log of mongod SystemLog *automationconfig.SystemLog `json:"systemLog,omitempty"` } @@ -471,6 +488,10 @@ type MongoDBUser struct { // +optional ConnectionStringSecretName string `json:"connectionStringSecretName,omitempty"` + // ConnectionStringSecretNamespace is the namespace of the secret object created by the operator which exposes the connection strings for the user. + // +optional + ConnectionStringSecretNamespace string `json:"connectionStringSecretNamespace,omitempty"` + // Additional options to be appended to the connection string. // These options apply only to this user and will override any existing options in the resource. // +kubebuilder:validation:Type=object @@ -503,6 +524,16 @@ func (m MongoDBUser) GetConnectionStringSecretName(resourceName string) string { return normalizeName(fmt.Sprintf("%s-%s-%s", resourceName, m.DB, m.Name)) } +// GetConnectionStringSecretNamespace gets the connection string secret namespace provided by the user or generated +// from the SCRAM user configuration. +func (m MongoDBUser) GetConnectionStringSecretNamespace(resourceNamespace string) string { + if m.ConnectionStringSecretNamespace != "" { + return m.ConnectionStringSecretNamespace + } + + return resourceNamespace +} + // normalizeName returns a string that conforms to RFC-1123 func normalizeName(name string) string { errors := validation.IsDNS1123Subdomain(name) @@ -761,11 +792,12 @@ func (m *MongoDBCommunity) GetAuthUsers() []authtypes.User { } users[i] = authtypes.User{ - Username: u.Name, - Database: u.DB, - Roles: roles, - ConnectionStringSecretName: u.GetConnectionStringSecretName(m.Name), - ConnectionStringOptions: u.AdditionalConnectionStringConfig.Object, + Username: u.Name, + Database: u.DB, + Roles: roles, + ConnectionStringSecretName: u.GetConnectionStringSecretName(m.Name), + ConnectionStringSecretNamespace: u.GetConnectionStringSecretNamespace(m.Namespace), + ConnectionStringOptions: u.AdditionalConnectionStringConfig.Object, } if u.DB != constants.ExternalDB { diff --git a/api/v1/mongodbcommunity_types_test.go b/api/v1/mongodbcommunity_types_test.go index efdd042c3..19b365527 100644 --- a/api/v1/mongodbcommunity_types_test.go +++ b/api/v1/mongodbcommunity_types_test.go @@ -328,6 +328,16 @@ func TestGetConnectionStringSecretName(t *testing.T) { }, "connection-string-secret", }, + { + MongoDBUser{ + Name: "mdb-2", + DB: "admin", + ScramCredentialsSecretName: "scram-credential-secret-name-2", + ConnectionStringSecretName: "connection-string-secret-2", + ConnectionStringSecretNamespace: "other-namespace", + }, + "connection-string-secret-2", + }, } for _, tt := range testusers { @@ -521,11 +531,12 @@ func TestMongoDBCommunity_GetAuthUsers(t *testing.T) { Database: "admin", Name: "readWriteAnyDatabase", }}, - PasswordSecretKey: "password", - PasswordSecretName: "my-user-password", - ScramCredentialsSecretName: "my-scram-scram-credentials", - ConnectionStringSecretName: "mdb-admin-my-user", - ConnectionStringOptions: nil, + PasswordSecretKey: "password", + PasswordSecretName: "my-user-password", + ScramCredentialsSecretName: "my-scram-scram-credentials", + ConnectionStringSecretName: "mdb-admin-my-user", + ConnectionStringSecretNamespace: mdb.Namespace, + ConnectionStringOptions: nil, }, authUsers[0]) assert.Equal(t, authtypes.User{ Username: "CN=my-x509-authenticated-user,OU=organizationalunit,O=organization", @@ -534,11 +545,12 @@ func TestMongoDBCommunity_GetAuthUsers(t *testing.T) { Database: "admin", Name: "readWriteAnyDatabase", }}, - PasswordSecretKey: "", - PasswordSecretName: "", - ScramCredentialsSecretName: "", - ConnectionStringSecretName: "mdb-external-cn-my-x509-authenticated-user-ou-organizationalunit-o-organization", - ConnectionStringOptions: nil, + PasswordSecretKey: "", + PasswordSecretName: "", + ScramCredentialsSecretName: "", + ConnectionStringSecretName: "mdb-external-cn-my-x509-authenticated-user-ou-organizationalunit-o-organization", + ConnectionStringSecretNamespace: mdb.Namespace, + ConnectionStringOptions: nil, }, authUsers[1]) } diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 964bb2453..df22b4876 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2021. @@ -35,6 +34,11 @@ func (in *AgentConfiguration) DeepCopyInto(out *AgentConfiguration) { *out = new(automationconfig.CrdLogRotate) **out = **in } + if in.AuditLogRotate != nil { + in, out := &in.AuditLogRotate, &out.AuditLogRotate + *out = new(automationconfig.CrdLogRotate) + **out = **in + } if in.SystemLog != nil { in, out := &in.SystemLog, &out.SystemLog *out = new(automationconfig.SystemLog) @@ -117,6 +121,7 @@ func (in *AutomationConfigOverride) DeepCopyInto(out *AutomationConfigOverride) (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.ReplicaSet.DeepCopyInto(&out.ReplicaSet) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomationConfigOverride. @@ -266,6 +271,13 @@ func (in *MongoDBCommunitySpec) DeepCopyInto(out *MongoDBCommunitySpec) { **out = **in } in.AdditionalConnectionStringConfig.DeepCopyInto(&out.AdditionalConnectionStringConfig) + if in.MemberConfig != nil { + in, out := &in.MemberConfig, &out.MemberConfig + *out = make([]automationconfig.MemberOptions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBCommunitySpec. @@ -351,6 +363,27 @@ func (in *OverrideProcess) DeepCopy() *OverrideProcess { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideReplicaSet) DeepCopyInto(out *OverrideReplicaSet) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + in.Settings.DeepCopyInto(&out.Settings) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideReplicaSet. +func (in *OverrideReplicaSet) DeepCopy() *OverrideReplicaSet { + if in == nil { + return nil + } + out := new(OverrideReplicaSet) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Privilege) DeepCopyInto(out *Privilege) { *out = *in diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 4a1eb6d93..b8dd5d184 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -4,9 +4,12 @@ import ( "fmt" "os" + "sigs.k8s.io/controller-runtime/pkg/cache" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" "github.com/mongodb/mongodb-kubernetes-operator/controllers" "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -55,7 +58,14 @@ func main() { log.Sugar().Fatalf("Failed to configure logger: %v", err) } - if !hasRequiredVariables(log, construct.AgentImageEnv, construct.VersionUpgradeHookImageEnv, construct.ReadinessProbeImageEnv) { + if !hasRequiredVariables( + log, + construct.MongodbRepoUrlEnv, + construct.MongodbImageEnv, + construct.AgentImageEnv, + construct.VersionUpgradeHookImageEnv, + construct.ReadinessProbeImageEnv, + ) { os.Exit(1) } @@ -82,7 +92,9 @@ func main() { // Create a new Cmd to provide shared dependencies and start components mgr, err := manager.New(cfg, manager.Options{ - Namespace: watchNamespace, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{watchNamespace: {}}, + }, }) if err != nil { log.Sugar().Fatalf("Unable to create manager: %v", err) @@ -96,7 +108,15 @@ func main() { } // Setup Controller. - if err = controllers.NewReconciler(mgr).SetupWithManager(mgr); err != nil { + if err = controllers.NewReconciler( + mgr, + os.Getenv(construct.MongodbRepoUrlEnv), + os.Getenv(construct.MongodbImageEnv), + envvar.GetEnvOrDefault(construct.MongoDBImageTypeEnv, construct.DefaultImageType), + os.Getenv(construct.AgentImageEnv), + os.Getenv(construct.VersionUpgradeHookImageEnv), + os.Getenv(construct.ReadinessProbeImageEnv), + ).SetupWithManager(mgr); err != nil { log.Sugar().Fatalf("Unable to create controller: %v", err) } // +kubebuilder:scaffold:builder diff --git a/cmd/readiness/main.go b/cmd/readiness/main.go index 0849eb188..6cf9e7804 100644 --- a/cmd/readiness/main.go +++ b/cmd/readiness/main.go @@ -1,6 +1,7 @@ package main import ( + "context" "encoding/json" "fmt" "io" @@ -42,7 +43,7 @@ func init() { // - If MongoDB: then just the 'statuses[0].IsInGoalState` field is used to learn if the Agent has reached the goal // - if AppDB: the 'mmsStatus[0].lastGoalVersionAchieved' field is compared with the one from mounted automation config // Additionally if the previous check hasn't returned 'true' an additional check for wait steps is being performed -func isPodReady(conf config.Config) (bool, error) { +func isPodReady(ctx context.Context, conf config.Config) (bool, error) { healthStatus, err := parseHealthStatus(conf.HealthStatusReader) if err != nil { logger.Errorf("There was problem parsing health status file: %s", err) @@ -56,7 +57,7 @@ func isPodReady(conf config.Config) (bool, error) { } // If the agent has reached the goal state - inGoalState, err := isInGoalState(healthStatus, conf) + inGoalState, err := isInGoalState(ctx, healthStatus, conf) if err != nil { logger.Errorf("There was problem checking the health status: %s", err) return false, err @@ -159,9 +160,9 @@ func isWaitStep(status *health.StepStatus) bool { return false } -func isInGoalState(health health.Status, conf config.Config) (bool, error) { +func isInGoalState(ctx context.Context, health health.Status, conf config.Config) (bool, error) { if isHeadlessMode() { - return headless.PerformCheckHeadlessMode(health, conf) + return headless.PerformCheckHeadlessMode(ctx, health, conf) } return performCheckOMMode(health), nil } @@ -207,28 +208,55 @@ func parseHealthStatus(reader io.Reader) (health.Status, error) { } func initLogger(l *lumberjack.Logger) { - log := zap.New(zapcore.NewCore( - zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), - zapcore.AddSync(l), - zap.DebugLevel, - ), zap.Development()) + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + + consoleCore := zapcore.NewCore( + zapcore.NewJSONEncoder(encoderConfig), + zapcore.AddSync(os.Stdout), + zap.DebugLevel) + + cores := []zapcore.Core{consoleCore} + if config.ReadBoolWitDefault(config.WithAgentFileLogging, "true") { + fileCore := zapcore.NewCore( + zapcore.NewJSONEncoder(encoderConfig), + zapcore.AddSync(l), + zap.DebugLevel) + cores = append(cores, fileCore) + } + + core := zapcore.NewTee(cores...) + log := zap.New(core, zap.Development()) logger = log.Sugar() + + logger.Infof("logging configuration: %+v", l) } func main() { + ctx := context.Background() clientSet, err := kubernetesClientset() if err != nil { panic(err) } - cfg, err := config.BuildFromEnvVariables(clientSet, isHeadlessMode()) + initLogger(config.GetLogger()) + + healthStatusFilePath := config.GetEnvOrDefault(config.AgentHealthStatusFilePathEnv, config.DefaultAgentHealthStatusFilePath) + file, err := os.Open(healthStatusFilePath) + // The agent might be slow in creating the health status file. + // In that case, we don't want to panic to show the message + // in the kubernetes description. That would be a red herring, since that will solve itself with enough time. if err != nil { - panic(err) + logger.Errorf("health status file not avaible yet: %s ", err) + os.Exit(1) } - initLogger(cfg.Logger) + cfg, err := config.BuildFromEnvVariables(clientSet, isHeadlessMode(), file) + if err != nil { + panic(err) + } - ready, err := isPodReady(cfg) + ready, err := isPodReady(ctx, cfg) if err != nil { panic(err) } diff --git a/cmd/readiness/readiness_test.go b/cmd/readiness/readiness_test.go index 11c5bbe2a..11222effa 100644 --- a/cmd/readiness/readiness_test.go +++ b/cmd/readiness/readiness_test.go @@ -22,6 +22,7 @@ import ( // TestDeadlockDetection verifies that if the agent is stuck in "WaitAllRsMembersUp" phase (started > 15 seconds ago) // then the function returns "ready" func TestDeadlockDetection(t *testing.T) { + ctx := context.Background() type TestConfig struct { conf config.Config isErrorExpected bool @@ -105,10 +106,10 @@ func TestDeadlockDetection(t *testing.T) { isReadyExpected: false, }, } - for testName, _ := range tests { + for testName := range tests { testConfig := tests[testName] t.Run(testName, func(t *testing.T) { - ready, err := isPodReady(testConfig.conf) + ready, err := isPodReady(ctx, testConfig.conf) if testConfig.isErrorExpected { assert.Error(t, err) } else { @@ -224,7 +225,7 @@ func TestObtainingCurrentStep(t *testing.T) { expectedStep: "test", }, } - for testName, _ := range tests { + for testName := range tests { testConfig := tests[testName] t.Run(testName, func(t *testing.T) { step := findCurrentStep(testConfig.processStatuses) @@ -237,32 +238,46 @@ func TestObtainingCurrentStep(t *testing.T) { } } +// TestReadyWithWaitForCorrectBinaries tests the Static Containers Architecture mode for the Agent. +// In this case, the Readiness Probe needs to return Ready and let the StatefulSet Controller to proceed +// with the Pod rollout. +func TestReadyWithWaitForCorrectBinaries(t *testing.T) { + ctx := context.Background() + c := testConfigWithMongoUp("testdata/health-status-ok-with-WaitForCorrectBinaries.json", time.Second*30) + ready, err := isPodReady(ctx, c) + + assert.True(t, ready) + assert.NoError(t, err) +} + // TestHeadlessAgentHasntReachedGoal verifies that the probe reports "false" if the config version is higher than the // last achieved version of the Agent // Note that the edge case is checked here: the health-status-ok.json has the "WaitRsInit" phase stuck in the last plan // (as Agent doesn't marks all the step statuses finished when it reaches the goal) but this doesn't affect the result // as the whole plan is complete already func TestHeadlessAgentHasntReachedGoal(t *testing.T) { + ctx := context.Background() t.Setenv(headlessAgent, "true") c := testConfig("testdata/health-status-ok.json") c.ClientSet = fake.NewSimpleClientset(testdata.TestPod(c.Namespace, c.Hostname), testdata.TestSecret(c.Namespace, c.AutomationConfigSecretName, 6)) - ready, err := isPodReady(c) + ready, err := isPodReady(ctx, c) assert.False(t, ready) assert.NoError(t, err) - thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(context.TODO(), c.Hostname, metav1.GetOptions{}) + thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(ctx, c.Hostname, metav1.GetOptions{}) assert.Equal(t, map[string]string{"agent.mongodb.com/version": "5"}, thePod.Annotations) } // TestHeadlessAgentReachedGoal verifies that the probe reports "true" if the config version is equal to the // last achieved version of the Agent func TestHeadlessAgentReachedGoal(t *testing.T) { + ctx := context.Background() t.Setenv(headlessAgent, "true") c := testConfig("testdata/health-status-ok.json") c.ClientSet = fake.NewSimpleClientset(testdata.TestPod(c.Namespace, c.Hostname), testdata.TestSecret(c.Namespace, c.AutomationConfigSecretName, 5)) - ready, err := isPodReady(c) + ready, err := isPodReady(ctx, c) assert.True(t, ready) assert.NoError(t, err) - thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(context.TODO(), c.Hostname, metav1.GetOptions{}) + thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(ctx, c.Hostname, metav1.GetOptions{}) assert.Equal(t, map[string]string{"agent.mongodb.com/version": "5"}, thePod.Annotations) } diff --git a/cmd/readiness/testdata/health-status-ok-with-WaitForCorrectBinaries.json b/cmd/readiness/testdata/health-status-ok-with-WaitForCorrectBinaries.json new file mode 100644 index 000000000..c2c6bb307 --- /dev/null +++ b/cmd/readiness/testdata/health-status-ok-with-WaitForCorrectBinaries.json @@ -0,0 +1,144 @@ +{ + "statuses": { + "my-replica-set-downgrade-0": { + "IsInGoalState": false, + "LastMongoUpTime": 1701853492, + "ExpectedToBeUp": true, + "ReplicationStatus": 1 + } + }, + "mmsStatus": { + "my-replica-set-downgrade-0": { + "name": "my-replica-set-downgrade-0", + "lastGoalVersionAchieved": 1, + "plans": [ + { + "automationConfigVersion": 1, + "started": "2023-12-06T09:03:33.709679218Z", + "completed": "2023-12-06T09:03:43.65117796Z", + "moves": [ + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2023-12-06T09:03:33.709703572Z", + "completed": null, + "result": "error" + } + ] + }, + { + "move": "WaitAllRsMembersUp", + "moveDoc": "Wait until all members of this process' repl set are up", + "steps": [ + { + "step": "WaitAllRsMembersUp", + "stepDoc": "Wait until all members of this process' repl set are up", + "isWaitStep": true, + "started": "2023-12-06T09:03:35.652236845Z", + "completed": null, + "result": "wait" + } + ] + }, + { + "move": "RsInit", + "moveDoc": "Initialize a replica set including the current MongoDB process", + "steps": [ + { + "step": "RsInit", + "stepDoc": "Initialize a replica set", + "isWaitStep": false, + "started": "2023-12-06T09:03:43.536653463Z", + "completed": "2023-12-06T09:03:43.650871495Z", + "result": "success" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": "2023-12-06T09:03:43.650920722Z", + "completed": "2023-12-06T09:03:43.65111749Z", + "result": "success" + } + ] + } + ] + }, + { + "automationConfigVersion": 2, + "started": "2023-12-06T09:04:03.576712545Z", + "completed": null, + "moves": [ + { + "move": "ChangeVersionKube", + "moveDoc": "Change MongoDB Version on operator mode", + "steps": [ + { + "step": "CheckRunningOperatorMode", + "stepDoc": "Check Running in operator mode", + "isWaitStep": false, + "started": "2023-12-06T09:04:03.576729706Z", + "completed": "2023-12-06T09:04:03.576893698Z", + "result": "success" + }, + { + "step": "CheckWrongVersion", + "stepDoc": "Check that MongoDB version is wrong", + "isWaitStep": false, + "started": "2023-12-06T09:04:03.576894027Z", + "completed": "2023-12-06T09:04:03.577041016Z", + "result": "success" + }, + { + "step": "CheckRsCorrect", + "stepDoc": "Check that replica set configuration is correct", + "isWaitStep": false, + "started": "2023-12-06T09:04:03.577041402Z", + "completed": "2023-12-06T09:04:03.577219188Z", + "result": "success" + }, + { + "step": "WaitAllRouterConfigsFlushedForUpgrade", + "stepDoc": "Wait until flushRouterConfig has been run on all mongoses", + "isWaitStep": true, + "started": "2023-12-06T09:04:03.577219563Z", + "completed": "2023-12-06T09:04:03.577356271Z", + "result": "success" + }, + { + "step": "DisableBalancerIfFirst", + "stepDoc": "Disable the balancer (may take a while)", + "isWaitStep": false, + "started": "2023-12-06T09:04:03.577356599Z", + "completed": "2023-12-06T09:04:03.604579059Z", + "result": "success" + }, + { + "step": "WaitForCorrectBinaries", + "stepDoc": "Wait until correct binaries are available", + "isWaitStep": true, + "started": "2023-12-06T09:04:03.60458063Z", + "completed": null, + "result": "wait" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} \ No newline at end of file diff --git a/cmd/versionhook/main.go b/cmd/versionhook/main.go index e2b551630..6e0d02f95 100644 --- a/cmd/versionhook/main.go +++ b/cmd/versionhook/main.go @@ -27,6 +27,7 @@ const ( ) func main() { + ctx := context.Background() logger := setupLogger() logger.Info("Running version change post-start hook") @@ -57,7 +58,7 @@ func main() { if shouldDelete { logger.Infof("Pod should be deleted") - if err := deletePod(); err != nil { + if err := deletePod(ctx); err != nil { // We should not raise an error if the Pod could not be deleted. It can have even // worse consequences: Pod being restarted with the same version, and the agent // killing it immediately after. @@ -182,7 +183,7 @@ func isWaitingToBeDeleted(healthStatus agent.MmsDirectorStatus) bool { } // deletePod attempts to delete the pod this mongod is running in -func deletePod() error { +func deletePod(ctx context.Context) error { thisPod, err := getThisPod() if err != nil { return fmt.Errorf("could not get pod: %s", err) @@ -192,7 +193,7 @@ func deletePod() error { return fmt.Errorf("could not get client: %s", err) } - if err := k8sClient.Delete(context.TODO(), &thisPod); err != nil { + if err := k8sClient.Delete(ctx, &thisPod); err != nil { return fmt.Errorf("could not delete pod: %s", err) } return nil diff --git a/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml b/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml index f903a1b53..12207a6bd 100644 --- a/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml +++ b/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml @@ -3,14 +3,13 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.15.0 service.binding: path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret service.binding/connectionString: path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=connectionString.standardSrv service.binding/password: path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=password service.binding/provider: community service.binding/type: mongodb service.binding/username: path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=username - creationTimestamp: null name: mongodbcommunity.mongodbcommunity.mongodb.com spec: group: mongodbcommunity.mongodb.com @@ -38,14 +37,19 @@ spec: description: MongoDBCommunity is the Schema for the mongodbs API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -59,9 +63,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true additionalMongodConfig: - description: 'AdditionalMongodConfig is additional configuration that - can be passed to each data-bearing mongod at runtime. Uses the same - structure as the mongod configuration file: https://www.mongodb.com/docs/manual/reference/configuration-options/' + description: |- + AdditionalMongodConfig is additional configuration that can be passed to + each data-bearing mongod at runtime. Uses the same structure as the mongod + configuration file: https://www.mongodb.com/docs/manual/reference/configuration-options/ nullable: true type: object x-kubernetes-preserve-unknown-fields: true @@ -69,6 +74,40 @@ spec: description: AgentConfiguration sets options for the MongoDB automation agent properties: + auditLogRotate: + description: AuditLogRotate if enabled, will enable AuditLogRotate + for all processes. + properties: + includeAuditLogsWithMongoDBLogs: + description: |- + set to 'true' to have the Automation Agent rotate the audit files along + with mongodb log files + type: boolean + numTotal: + description: maximum number of log files to have total + type: integer + numUncompressed: + description: maximum number of log files to leave uncompressed + type: integer + percentOfDiskspace: + description: |- + Maximum percentage of the total disk space these log files should take up. + The string needs to be able to be converted to float64 + type: string + sizeThresholdMB: + description: |- + Maximum size for an individual log file before rotation. + The string needs to be able to be converted to float64. + Fractional values of MB are supported. + type: string + timeThresholdHrs: + description: maximum hours for an individual log file before + rotation + type: integer + required: + - sizeThresholdMB + - timeThresholdHrs + type: object logFile: type: string logLevel: @@ -78,8 +117,9 @@ spec: processes. properties: includeAuditLogsWithMongoDBLogs: - description: set to 'true' to have the Automation Agent rotate - the audit files along with mongodb log files + description: |- + set to 'true' to have the Automation Agent rotate the audit files along + with mongodb log files type: boolean numTotal: description: maximum number of log files to have total @@ -88,14 +128,15 @@ spec: description: maximum number of log files to leave uncompressed type: integer percentOfDiskspace: - description: Maximum percentage of the total disk space these - log files should take up. The string needs to be able to - be converted to float64 + description: |- + Maximum percentage of the total disk space these log files should take up. + The string needs to be able to be converted to float64 type: string sizeThresholdMB: - description: Maximum size for an individual log file before - rotation. The string needs to be able to be converted to - float64. Fractional values of MB are supported. + description: |- + Maximum size for an individual log file before rotation. + The string needs to be able to be converted to float64. + Fractional values of MB are supported. type: string timeThresholdHrs: description: maximum hours for an individual log file before @@ -123,14 +164,15 @@ spec: type: object type: object arbiters: - description: 'Arbiters is the number of arbiters to add to the Replica - Set. It is not recommended to have more than one arbiter per Replica - Set. More info: https://www.mongodb.com/docs/manual/tutorial/add-replica-set-arbiter/' + description: |- + Arbiters is the number of arbiters to add to the Replica Set. + It is not recommended to have more than one arbiter per Replica Set. + More info: https://www.mongodb.com/docs/manual/tutorial/add-replica-set-arbiter/ type: integer automationConfig: - description: AutomationConfigOverride is merged on top of the operator - created automation config. Processes are merged by name. Currently - Only the process.disabled field is supported. + description: |- + AutomationConfigOverride is merged on top of the operator created automation config. Processes are merged + by name. Currently Only the process.disabled field is supported. properties: processes: items: @@ -145,8 +187,9 @@ spec: as float64 properties: includeAuditLogsWithMongoDBLogs: - description: set to 'true' to have the Automation Agent - rotate the audit files along with mongodb log files + description: |- + set to 'true' to have the Automation Agent rotate the audit files along + with mongodb log files type: boolean numTotal: description: maximum number of log files to have total @@ -155,15 +198,15 @@ spec: description: maximum number of log files to leave uncompressed type: integer percentOfDiskspace: - description: Maximum percentage of the total disk space - these log files should take up. The string needs to - be able to be converted to float64 + description: |- + Maximum percentage of the total disk space these log files should take up. + The string needs to be able to be converted to float64 type: string sizeThresholdMB: - description: Maximum size for an individual log file - before rotation. The string needs to be able to be - converted to float64. Fractional values of MB are - supported. + description: |- + Maximum size for an individual log file before rotation. + The string needs to be able to be converted to float64. + Fractional values of MB are supported. type: string timeThresholdHrs: description: maximum hours for an individual log file @@ -180,13 +223,42 @@ spec: - name type: object type: array - required: - - processes + replicaSet: + properties: + id: + description: |- + Id can be used together with additionalMongodConfig.replication.replSetName + to manage clusters where replSetName differs from the MongoDBCommunity resource name + type: string + settings: + description: |- + MapWrapper is a wrapper for a map to be used by other structs. + The CRD generator does not support map[string]interface{} + on the top level and hence we need to work around this with + a wrapping struct. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object type: object featureCompatibilityVersion: - description: FeatureCompatibilityVersion configures the feature compatibility - version that will be set for the deployment + description: |- + FeatureCompatibilityVersion configures the feature compatibility version that will + be set for the deployment type: string + memberConfig: + description: MemberConfig + items: + properties: + priority: + type: string + tags: + additionalProperties: + type: string + type: object + votes: + type: integer + type: object + type: array members: description: Members is the number of members in the replica set type: integer @@ -216,8 +288,9 @@ spec: to 9216. type: integer tlsSecretKeyRef: - description: Name of a Secret (type kubernetes.io/tls) holding - the certificates to use in the Prometheus endpoint. + description: |- + Name of a Secret (type kubernetes.io/tls) holding the certificates to use in the + Prometheus endpoint. properties: key: description: Key is the key in the secret storing this password. @@ -238,12 +311,13 @@ spec: - username type: object replicaSetHorizons: - description: ReplicaSetHorizons Add this parameter and values if you - need your database to be accessed outside of Kubernetes. This setting - allows you to provide different DNS settings within the Kubernetes - cluster and to the Kubernetes cluster. The Kubernetes Operator uses - split horizon DNS for replica set members. This feature allows communication - both within the Kubernetes cluster and from outside Kubernetes. + description: |- + ReplicaSetHorizons Add this parameter and values if you need your database + to be accessed outside of Kubernetes. This setting allows you to + provide different DNS settings within the Kubernetes cluster and + to the Kubernetes cluster. The Kubernetes Operator uses split horizon + DNS for replica set members. This feature allows communication both + within the Kubernetes cluster and from outside Kubernetes. items: additionalProperties: type: string @@ -256,16 +330,23 @@ spec: authentication: properties: agentCertificateSecretRef: - description: 'AgentCertificateSecret is a reference to a Secret - containing the certificate and the key for the automation - agent The secret needs to have available: - certificate - under key: "tls.crt" - private key under key: "tls.key" - If additionally, tls.pem is present, then it needs to be - equal to the concatenation of tls.crt and tls.key' + description: |- + AgentCertificateSecret is a reference to a Secret containing the certificate and the key for the automation agent + The secret needs to have available: + - certificate under key: "tls.crt" + - private key under key: "tls.key" + If additionally, tls.pem is present, then it needs to be equal to the concatenation of tls.crt and tls.key properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -306,9 +387,9 @@ spec: description: The authentication restrictions the server enforces on the role. items: - description: AuthenticationRestriction specifies a list - of IP addresses and CIDR ranges users are allowed to - connect to or from. + description: |- + AuthenticationRestriction specifies a list of IP addresses and CIDR ranges users + are allowed to connect to or from. properties: clientSource: items: @@ -337,9 +418,9 @@ spec: type: string type: array resource: - description: Resource specifies specifies the resources - upon which a privilege permits actions. See https://www.mongodb.com/docs/manual/reference/resource-document - for more. + description: |- + Resource specifies specifies the resources upon which a privilege permits actions. + See https://www.mongodb.com/docs/manual/reference/resource-document for more. properties: anyResource: type: boolean @@ -387,45 +468,60 @@ spec: communication properties: caCertificateSecretRef: - description: CaCertificateSecret is a reference to a Secret - containing the certificate for the CA which signed the server - certificates The certificate is expected to be available - under the key "ca.crt" + description: |- + CaCertificateSecret is a reference to a Secret containing the certificate for the CA which signed the server certificates + The certificate is expected to be available under the key "ca.crt" properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic caConfigMapRef: - description: CaConfigMap is a reference to a ConfigMap containing - the certificate for the CA which signed the server certificates - The certificate is expected to be available under the key - "ca.crt" This field is ignored when CaCertificateSecretRef - is configured + description: |- + CaConfigMap is a reference to a ConfigMap containing the certificate for the CA which signed the server certificates + The certificate is expected to be available under the key "ca.crt" + This field is ignored when CaCertificateSecretRef is configured properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic certificateKeySecretRef: - description: CertificateKeySecret is a reference to a Secret - containing a private key and certificate to use for TLS. - The key and cert are expected to be PEM encoded and available - at "tls.key" and "tls.crt". This is the same format used - for the standard "kubernetes.io/tls" Secret type, but no - specific type is required. Alternatively, an entry tls.pem, - containing the concatenation of cert and key, can be provided. - If all of tls.pem, tls.crt and tls.key are present, the - tls.pem one needs to be equal to the concatenation of tls.crt - and tls.key + description: |- + CertificateKeySecret is a reference to a Secret containing a private key and certificate to use for TLS. + The key and cert are expected to be PEM encoded and available at "tls.key" and "tls.crt". + This is the same format used for the standard "kubernetes.io/tls" Secret type, but no specific type is required. + Alternatively, an entry tls.pem, containing the concatenation of cert and key, can be provided. + If all of tls.pem, tls.crt and tls.key are present, the tls.pem one needs to be equal to the concatenation of tls.crt and tls.key properties: name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -440,7 +536,8 @@ spec: type: object type: object statefulSet: - description: StatefulSetConfiguration holds the optional custom StatefulSet + description: |- + StatefulSetConfiguration holds the optional custom StatefulSet that should be merged into the operator created one. properties: metadata: @@ -474,17 +571,21 @@ spec: items: properties: additionalConnectionStringConfig: - description: Additional options to be appended to the connection - string. These options apply only to this user and will override - any existing options in the resource. + description: |- + Additional options to be appended to the connection string. + These options apply only to this user and will override any existing options in the resource. nullable: true type: object x-kubernetes-preserve-unknown-fields: true connectionStringSecretName: - description: ConnectionStringSecretName is the name of the secret - object created by the operator which exposes the connection - strings for the user. If provided, this secret must be different - for each user in a deployment. + description: |- + ConnectionStringSecretName is the name of the secret object created by the operator which exposes the connection strings for the user. + If provided, this secret must be different for each user in a deployment. + type: string + connectionStringSecretNamespace: + description: ConnectionStringSecretNamespace is the namespace + of the secret object created by the operator which exposes + the connection strings for the user. type: string db: default: admin @@ -526,10 +627,9 @@ spec: type: object type: array scramCredentialsSecretName: - description: ScramCredentialsSecretName appended by string "scram-credentials" - is the name of the secret object created by the mongoDB operator - for storing SCRAM credentials These secrets names must be - different for each user in a deployment. + description: |- + ScramCredentialsSecretName appended by string "scram-credentials" is the name of the secret object created by the mongoDB operator for storing SCRAM credentials + These secrets names must be different for each user in a deployment. pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ type: string required: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index e488e07ad..0705e7eae 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -45,16 +45,16 @@ spec: - name: OPERATOR_NAME value: mongodb-kubernetes-operator - name: AGENT_IMAGE - value: quay.io/mongodb/mongodb-agent:12.0.25.7724-1 + value: quay.io/mongodb/mongodb-agent-ubi:108.0.6.8796-1 - name: VERSION_UPGRADE_HOOK_IMAGE - value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.8 + value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.10 - name: READINESS_PROBE_IMAGE - value: quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.17 + value: quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.23 - name: MONGODB_IMAGE value: mongodb-community-server - name: MONGODB_REPO_URL value: quay.io/mongodb - image: quay.io/mongodb/mongodb-kubernetes-operator:0.8.3 + image: quay.io/mongodb/mongodb-kubernetes-operator:0.13.0 imagePullPolicy: Always name: mongodb-kubernetes-operator resources: diff --git a/config/samples/external_access/cert-x509.yaml b/config/samples/external_access/cert-x509.yaml new file mode 100644 index 000000000..0f2eb0906 --- /dev/null +++ b/config/samples/external_access/cert-x509.yaml @@ -0,0 +1,20 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: x509-user-cert + spec: + commonName: my-x509-authenticated-user + duration: 240h0m0s + issuerRef: + name: ca-issuer + renewBefore: 120h0m0s + secretName: x509-client-cert + subject: + organizationalUnits: + - organizationalunit + organizations: + - organization + usages: + - digital signature + - client auth + \ No newline at end of file diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_connection_string_secret_namespace.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_connection_string_secret_namespace.yaml new file mode 100644 index 000000000..47e55aaae --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_connection_string_secret_namespace.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + connectionStringSecretNamespace: other-namespace + additionalMongodConfig: + storage.wiredTiger.engineConfig.journalCompressor: zlib + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_cr_podantiaffinity.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_cr_podantiaffinity.yaml index ca5a2dca9..8d7a274a4 100644 --- a/config/samples/mongodb.com_v1_mongodbcommunity_cr_podantiaffinity.yaml +++ b/config/samples/mongodb.com_v1_mongodbcommunity_cr_podantiaffinity.yaml @@ -22,16 +22,18 @@ spec: db: admin scramCredentialsSecretName: my-scram statefulSet: +# NOTE: Overwriting the "app" labelSelectors via the sts wrapper is not supported since this labelselector is not +# getting propagated to the service. You can add others like defined below spec: selector: matchLabels: - app: mongodb + app.kubernetes.io/name: mongodb template: metadata: # label the pod which is used by the "labelSelector" in podAntiAffinty # you can label it witch some other labels as well -- make sure it change the podAntiAffinity labelselector accordingly labels: - app: mongodb + app.kubernetes.io/name: mongodb spec: affinity: podAntiAffinity: @@ -40,7 +42,7 @@ spec: podAffinityTerm: labelSelector: matchExpressions: - - key: app + - key: app.kubernetes.io/name operator: In values: - mongodb diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_override_ac_setting.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_override_ac_setting.yaml new file mode 100644 index 000000000..0a8a1566a --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_override_ac_setting.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + # to override ReplicaSet Configuration settings: + # https://www.mongodb.com/docs/manual/reference/replica-configuration/#replica-set-configuration-document-example + automationConfig: + replicaSet: + settings: + electionTimeoutMillis: 20 + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + storage.wiredTiger.engineConfig.journalCompressor: zlib + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_specify_pod_resources.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_specify_pod_resources.yaml index 78722d3c3..84f8e66af 100644 --- a/config/samples/mongodb.com_v1_mongodbcommunity_specify_pod_resources.yaml +++ b/config/samples/mongodb.com_v1_mongodbcommunity_specify_pod_resources.yaml @@ -44,6 +44,15 @@ spec: requests: cpu: "0.2" memory: 200M + initContainers: + - name: mongodb-agent-readinessprobe + resources: + limits: + cpu: "2" + memory: 200M + requests: + cpu: "1" + memory: 100M # the user credentials will be generated from this secret # once the credentials are generated, this secret is no longer required --- diff --git a/controllers/construct/build_statefulset_test.go b/controllers/construct/build_statefulset_test.go index a369464a1..791fa5a8b 100644 --- a/controllers/construct/build_statefulset_test.go +++ b/controllers/construct/build_statefulset_test.go @@ -1,7 +1,6 @@ package construct import ( - "os" "reflect" "testing" @@ -21,10 +20,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func init() { - os.Setenv(VersionUpgradeHookImageEnv, "version-upgrade-hook-image") -} - func newTestReplicaSet() mdbv1.MongoDBCommunity { return mdbv1.MongoDBCommunity{ ObjectMeta: metav1.ObjectMeta{ @@ -40,12 +35,8 @@ func newTestReplicaSet() mdbv1.MongoDBCommunity { } func TestMultipleCalls_DoNotCauseSideEffects(t *testing.T) { - t.Setenv(MongodbRepoUrl, "docker.io/mongodb") - t.Setenv(MongodbImageEnv, "mongodb-community-server") - t.Setenv(AgentImageEnv, "agent-image") - mdb := newTestReplicaSet() - stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb) + stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, "fake-mongodbImage", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage", true) sts := &appsv1.StatefulSet{} t.Run("1st Call", func(t *testing.T) { @@ -63,13 +54,10 @@ func TestMultipleCalls_DoNotCauseSideEffects(t *testing.T) { } func TestManagedSecurityContext(t *testing.T) { - t.Setenv(MongodbRepoUrl, "docker.io/mongodb") - t.Setenv(MongodbImageEnv, "mongodb-community-server") - t.Setenv(AgentImageEnv, "agent-image") t.Setenv(podtemplatespec.ManagedSecurityContextEnv, "true") mdb := newTestReplicaSet() - stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb) + stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, "fake-mongodbImage", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage", true) sts := &appsv1.StatefulSet{} stsFunc(sts) @@ -77,89 +65,9 @@ func TestManagedSecurityContext(t *testing.T) { assertStatefulSetIsBuiltCorrectly(t, mdb, sts) } -func TestGetMongoDBImage(t *testing.T) { - type testConfig struct { - setArgs func(t *testing.T) - version string - expectedImage string - } - tests := map[string]testConfig{ - "Default UBI8 Community image": { - setArgs: func(t *testing.T) { - t.Setenv(MongodbRepoUrl, "docker.io/mongodb") - t.Setenv(MongodbImageEnv, "mongodb-community-server") - }, - version: "6.0.5", - expectedImage: "docker.io/mongodb/mongodb-community-server:6.0.5-ubi8", - }, - "Overridden UBI8 Enterprise image": { - setArgs: func(t *testing.T) { - t.Setenv(MongodbRepoUrl, "docker.io/mongodb") - t.Setenv(MongodbImageEnv, "mongodb-enterprise-server") - }, - version: "6.0.5", - expectedImage: "docker.io/mongodb/mongodb-enterprise-server:6.0.5-ubi8", - }, - "Overridden UBI8 Enterprise image from Quay": { - setArgs: func(t *testing.T) { - t.Setenv(MongodbRepoUrl, "quay.io/mongodb") - t.Setenv(MongodbImageEnv, "mongodb-enterprise-server") - }, - version: "6.0.5", - expectedImage: "quay.io/mongodb/mongodb-enterprise-server:6.0.5-ubi8", - }, - "Overridden Ubuntu Community image": { - setArgs: func(t *testing.T) { - t.Setenv(MongodbRepoUrl, "docker.io/mongodb") - t.Setenv(MongodbImageEnv, "mongodb-community-server") - t.Setenv(MongoDBImageType, "ubuntu2204") - }, - version: "6.0.5", - expectedImage: "docker.io/mongodb/mongodb-community-server:6.0.5-ubuntu2204", - }, - "Overridden UBI Community image": { - setArgs: func(t *testing.T) { - t.Setenv(MongodbRepoUrl, "docker.io/mongodb") - t.Setenv(MongodbImageEnv, "mongodb-community-server") - t.Setenv(MongoDBImageType, "ubi8") - }, - version: "6.0.5", - expectedImage: "docker.io/mongodb/mongodb-community-server:6.0.5-ubi8", - }, - "Docker Inc images": { - setArgs: func(t *testing.T) { - t.Setenv(MongodbRepoUrl, "docker.io") - t.Setenv(MongodbImageEnv, "mongo") - }, - version: "6.0.5", - expectedImage: "docker.io/mongo:6.0.5", - }, - "Deprecated AppDB images defined the old way": { - setArgs: func(t *testing.T) { - t.Setenv(MongodbRepoUrl, "quay.io") - t.Setenv(MongodbImageEnv, "mongodb/mongodb-enterprise-appdb-database-ubi") - // In this example, we intentionally don't use the suffix from the env. variable and let users - // define it in the version instead. There are some known customers who do this. - // This is a backwards compatibility case. - t.Setenv(MongoDBImageType, "will-be-ignored") - }, - - version: "5.0.14-ent", - expectedImage: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:5.0.14-ent", - }, - } - for testName := range tests { - t.Run(testName, func(t *testing.T) { - testConfig := tests[testName] - testConfig.setArgs(t) - image := getMongoDBImage(testConfig.version) - assert.Equal(t, testConfig.expectedImage, image) - }) - } -} - func TestMongod_Container(t *testing.T) { - c := container.New(mongodbContainer("4.2", []corev1.VolumeMount{}, mdbv1.NewMongodConfiguration())) + const mongodbImageMock = "fake-mongodbImage" + c := container.New(mongodbContainer(mongodbImageMock, []corev1.VolumeMount{}, mdbv1.NewMongodConfiguration())) t.Run("Has correct Env vars", func(t *testing.T) { assert.Len(t, c.Env, 1) @@ -168,7 +76,7 @@ func TestMongod_Container(t *testing.T) { }) t.Run("Image is correct", func(t *testing.T) { - assert.Equal(t, getMongoDBImage("4.2"), c.Image) + assert.Equal(t, mongodbImageMock, c.Image) }) t.Run("Resource requirements are correct", func(t *testing.T) { @@ -176,18 +84,19 @@ func TestMongod_Container(t *testing.T) { }) } -func TestMongoDBAgentLogging_Container(t *testing.T) { - c := container.New(mongodbAgentContainer("test-mongodb-automation-config", []corev1.VolumeMount{}, "INFO", "/var/log/mongodb-mms-automation/automation-agent.log", 24)) - - t.Run("Has correct Env vars", func(t *testing.T) { - assert.Len(t, c.Env, 7) - assert.Equal(t, agentLogFileEnv, c.Env[0].Name) - assert.Equal(t, "/var/log/mongodb-mms-automation/automation-agent.log", c.Env[0].Value) - assert.Equal(t, agentLogLevelEnv, c.Env[1].Name) - assert.Equal(t, "INFO", c.Env[1].Value) - assert.Equal(t, agentMaxLogFileDurationHoursEnv, c.Env[2].Name) - assert.Equal(t, "24", c.Env[2].Value) - }) +func TestMongoDBAgentCommand(t *testing.T) { + cmd := AutomationAgentCommand(false, mdbv1.LogLevelInfo, "testfile", 24) + baseCmd := MongodbUserCommand + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + assert.Len(t, cmd, 3) + assert.Equal(t, cmd[0], "/bin/bash") + assert.Equal(t, cmd[1], "-c") + assert.Equal(t, cmd[2], baseCmd+" -logFile testfile -logLevel INFO -maxLogFileDurationHrs 24") + + cmd = AutomationAgentCommand(false, mdbv1.LogLevelInfo, "/dev/stdout", 24) + assert.Len(t, cmd, 3) + assert.Equal(t, cmd[0], "/bin/bash") + assert.Equal(t, cmd[1], "-c") + assert.Equal(t, cmd[2], baseCmd+" -logLevel INFO") } func assertStatefulSetIsBuiltCorrectly(t *testing.T, mdb mdbv1.MongoDBCommunity, sts *appsv1.StatefulSet) { @@ -197,10 +106,10 @@ func assertStatefulSetIsBuiltCorrectly(t *testing.T, mdb mdbv1.MongoDBCommunity, assert.Equal(t, mdb.Name, sts.Name) assert.Equal(t, mdb.Namespace, sts.Namespace) assert.Equal(t, mongodbDatabaseServiceAccountName, sts.Spec.Template.Spec.ServiceAccountName) - assert.Len(t, sts.Spec.Template.Spec.Containers[0].Env, 7) + assert.Len(t, sts.Spec.Template.Spec.Containers[0].Env, 4) assert.Len(t, sts.Spec.Template.Spec.Containers[1].Env, 1) - managedSecurityContext := envvar.ReadBool(podtemplatespec.ManagedSecurityContextEnv) + managedSecurityContext := envvar.ReadBool(podtemplatespec.ManagedSecurityContextEnv) // nolint:forbidigo if !managedSecurityContext { assert.NotNil(t, sts.Spec.Template.Spec.SecurityContext) assert.Equal(t, podtemplatespec.DefaultPodSecurityContext(), *sts.Spec.Template.Spec.SecurityContext) @@ -209,7 +118,7 @@ func assertStatefulSetIsBuiltCorrectly(t *testing.T, mdb mdbv1.MongoDBCommunity, } agentContainer := sts.Spec.Template.Spec.Containers[0] - assert.Equal(t, "agent-image", agentContainer.Image) + assert.Equal(t, "fake-agentImage", agentContainer.Image) probe := agentContainer.ReadinessProbe assert.True(t, reflect.DeepEqual(probes.New(DefaultReadiness()), *probe)) assert.Equal(t, probes.New(DefaultReadiness()).FailureThreshold, probe.FailureThreshold) @@ -230,7 +139,7 @@ func assertStatefulSetIsBuiltCorrectly(t *testing.T, mdb mdbv1.MongoDBCommunity, assertContainsVolumeMountWithName(t, agentContainer.VolumeMounts, "my-rs-keyfile") mongodContainer := sts.Spec.Template.Spec.Containers[1] - assert.Equal(t, "docker.io/mongodb/mongodb-community-server:6.0.5-ubi8", mongodContainer.Image) + assert.Equal(t, "fake-mongodbImage", mongodContainer.Image) assert.Len(t, mongodContainer.VolumeMounts, 6) if !managedSecurityContext { assert.NotNil(t, sts.Spec.Template.Spec.Containers[1].SecurityContext) @@ -247,7 +156,7 @@ func assertStatefulSetIsBuiltCorrectly(t *testing.T, mdb mdbv1.MongoDBCommunity, initContainer := sts.Spec.Template.Spec.InitContainers[0] assert.Equal(t, versionUpgradeHookName, initContainer.Name) - assert.Equal(t, "version-upgrade-hook-image", initContainer.Image) + assert.Equal(t, "fake-versionUpgradeHookImage", initContainer.Image) assert.Len(t, initContainer.VolumeMounts, 1) if !managedSecurityContext { assert.NotNil(t, sts.Spec.Template.Spec.InitContainers[0].SecurityContext) diff --git a/controllers/construct/mongodbstatefulset.go b/controllers/construct/mongodbstatefulset.go index 876d61505..ec94a6eac 100644 --- a/controllers/construct/mongodbstatefulset.go +++ b/controllers/construct/mongodbstatefulset.go @@ -4,9 +4,8 @@ import ( "fmt" "os" "strconv" - "strings" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config" "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" @@ -27,6 +26,16 @@ var ( OfficialMongodbRepoUrls = []string{"docker.io/mongodb", "quay.io/mongodb"} ) +// Environment variables used to configure the MongoDB StatefulSet. +const ( + MongodbRepoUrlEnv = "MONGODB_REPO_URL" + MongodbImageEnv = "MONGODB_IMAGE" + MongoDBImageTypeEnv = "MDB_IMAGE_TYPE" + AgentImageEnv = "AGENT_IMAGE" + VersionUpgradeHookImageEnv = "VERSION_UPGRADE_HOOK_IMAGE" + ReadinessProbeImageEnv = "READINESS_PROBE_IMAGE" +) + const ( AgentName = "mongodb-agent" MongodbName = "mongod" @@ -41,29 +50,30 @@ const ( mongodbDatabaseServiceAccountName = "mongodb-database" agentHealthStatusFilePathValue = "/var/log/mongodb-mms-automation/healthstatus/agent-health-status.json" - MongodbRepoUrl = "MONGODB_REPO_URL" OfficialMongodbEnterpriseServerImageName = "mongodb-enterprise-server" - headlessAgentEnv = "HEADLESS_AGENT" - podNamespaceEnv = "POD_NAMESPACE" - automationConfigEnv = "AUTOMATION_CONFIG_MAP" - AgentImageEnv = "AGENT_IMAGE" - MongodbImageEnv = "MONGODB_IMAGE" - MongoDBImageType = "MDB_IMAGE_TYPE" - MongoDBAssumeEnterpriseEnv = "MDB_ASSUME_ENTERPRISE" - VersionUpgradeHookImageEnv = "VERSION_UPGRADE_HOOK_IMAGE" - ReadinessProbeImageEnv = "READINESS_PROBE_IMAGE" - agentLogLevelEnv = "AGENT_LOG_LEVEL" - agentLogFileEnv = "AGENT_LOG_FILE" - agentMaxLogFileDurationHoursEnv = "AGENT_MAX_LOG_FILE_DURATION_HOURS" + headlessAgentEnv = "HEADLESS_AGENT" + podNamespaceEnv = "POD_NAMESPACE" + automationConfigEnv = "AUTOMATION_CONFIG_MAP" + MongoDBAssumeEnterpriseEnv = "MDB_ASSUME_ENTERPRISE" automationMongodConfFileName = "automation-mongod.conf" keyfileFilePath = "/var/lib/mongodb-mms-automation/authentication/keyfile" - automationAgentOptions = " -skipMongoStart -noDaemonize -useLocalMongoDbTools" - automationAgentLogOptions = " -logFile ${AGENT_LOG_FILE} -maxLogFileDurationHrs ${AGENT_MAX_LOG_FILE_DURATION_HOURS} -logLevel ${AGENT_LOG_LEVEL}" + automationAgentOptions = " -skipMongoStart -noDaemonize -useLocalMongoDbTools" MongodbUserCommand = `current_uid=$(id -u) +declare -r current_uid +if ! grep -q "${current_uid}" /etc/passwd ; then +sed -e "s/^mongodb:/builder:/" /etc/passwd > /tmp/passwd +echo "mongodb:x:$(id -u):$(id -g):,,,:/:/bin/bash" >> /tmp/passwd +export NSS_WRAPPER_PASSWD=/tmp/passwd +export LD_PRELOAD=libnss_wrapper.so +export NSS_WRAPPER_GROUP=/etc/group +fi +` + //nolint:gosec //The credentials path is hardcoded in the container. + MongodbUserCommandWithAPIKeyExport = `current_uid=$(id -u) AGENT_API_KEY="$(cat /mongodb-automation/agent-api-key/agentApiKey)" declare -r current_uid if ! grep -q "${current_uid}" /etc/passwd ; then @@ -115,7 +125,7 @@ type MongoDBStatefulSetOwner interface { // BuildMongoDBReplicaSetStatefulSetModificationFunction builds the parts of the replica set that are common between every resource that implements // MongoDBStatefulSetOwner. // It doesn't configure TLS or additional containers/env vars that the statefulset might need. -func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSetOwner, scaler scale.ReplicaSetScaler) statefulset.Modification { +func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSetOwner, scaler scale.ReplicaSetScaler, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage string, withInitContainers bool) statefulset.Modification { labels := map[string]string{ "app": mdb.ServiceName(), } @@ -127,13 +137,10 @@ func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSe agentHealthStatusVolumeMount := statefulset.CreateVolumeMount(healthStatusVolume.Name, "/var/log/mongodb-mms-automation/healthstatus") mongodHealthStatusVolumeMount := statefulset.CreateVolumeMount(healthStatusVolume.Name, "/healthstatus") - // hooks volume is only required on the mongod pod. - hooksVolume := statefulset.CreateVolumeFromEmptyDir("hooks") - hooksVolumeMount := statefulset.CreateVolumeMount(hooksVolume.Name, "/hooks", statefulset.WithReadOnly(false)) - - // scripts volume is only required on the mongodb-agent pod. - scriptsVolume := statefulset.CreateVolumeFromEmptyDir("agent-scripts") - scriptsVolumeMount := statefulset.CreateVolumeMount(scriptsVolume.Name, "/opt/scripts", statefulset.WithReadOnly(false)) + hooksVolume := corev1.Volume{} + scriptsVolume := corev1.Volume{} + upgradeInitContainer := podtemplatespec.NOOP() + readinessInitContainer := podtemplatespec.NOOP() // tmp volume is required by the mongodb-agent and mongod tmpVolume := statefulset.CreateVolumeFromEmptyDir("tmp") @@ -144,7 +151,7 @@ func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSe keyFileVolumeVolumeMount := statefulset.CreateVolumeMount(keyFileVolume.Name, "/var/lib/mongodb-mms-automation/authentication", statefulset.WithReadOnly(false)) keyFileVolumeVolumeMountMongod := statefulset.CreateVolumeMount(keyFileVolume.Name, "/var/lib/mongodb-mms-automation/authentication", statefulset.WithReadOnly(false)) - mongodbAgentVolumeMounts := []corev1.VolumeMount{agentHealthStatusVolumeMount, scriptsVolumeMount, keyFileVolumeVolumeMount, tmpVolumeMount} + mongodbAgentVolumeMounts := []corev1.VolumeMount{agentHealthStatusVolumeMount, keyFileVolumeVolumeMount, tmpVolumeMount} automationConfigVolumeFunc := podtemplatespec.NOOP() if mdb.NeedsAutomationConfigVolume() { @@ -153,7 +160,31 @@ func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSe automationConfigVolumeMount := statefulset.CreateVolumeMount(automationConfigVolume.Name, "/var/lib/automation/config", statefulset.WithReadOnly(true)) mongodbAgentVolumeMounts = append(mongodbAgentVolumeMounts, automationConfigVolumeMount) } - mongodVolumeMounts := []corev1.VolumeMount{mongodHealthStatusVolumeMount, hooksVolumeMount, keyFileVolumeVolumeMountMongod, tmpVolumeMount} + mongodVolumeMounts := []corev1.VolumeMount{mongodHealthStatusVolumeMount, keyFileVolumeVolumeMountMongod, tmpVolumeMount} + + hooksVolumeMod := podtemplatespec.NOOP() + scriptsVolumeMod := podtemplatespec.NOOP() + + // This is temporary code; + // once we make the operator fully deploy static workloads, we will remove those init containers. + if withInitContainers { + // hooks volume is only required on the mongod pod. + hooksVolume = statefulset.CreateVolumeFromEmptyDir("hooks") + hooksVolumeMount := statefulset.CreateVolumeMount(hooksVolume.Name, "/hooks", statefulset.WithReadOnly(false)) + + // scripts volume is only required on the mongodb-agent pod. + scriptsVolume = statefulset.CreateVolumeFromEmptyDir("agent-scripts") + scriptsVolumeMount := statefulset.CreateVolumeMount(scriptsVolume.Name, "/opt/scripts", statefulset.WithReadOnly(false)) + + upgradeInitContainer = podtemplatespec.WithInitContainer(versionUpgradeHookName, versionUpgradeHookInit([]corev1.VolumeMount{hooksVolumeMount}, versionUpgradeHookImage)) + readinessInitContainer = podtemplatespec.WithInitContainer(ReadinessProbeContainerName, readinessProbeInit([]corev1.VolumeMount{scriptsVolumeMount}, readinessProbeImage)) + scriptsVolumeMod = podtemplatespec.WithVolume(scriptsVolume) + hooksVolumeMod = podtemplatespec.WithVolume(hooksVolume) + + mongodVolumeMounts = append(mongodVolumeMounts, hooksVolumeMount) + mongodbAgentVolumeMounts = append(mongodbAgentVolumeMounts, scriptsVolumeMount) + } + dataVolumeClaim := statefulset.NOOP() logVolumeClaim := statefulset.NOOP() singleModeVolumeClaim := func(s *appsv1.StatefulSet) {} @@ -178,7 +209,7 @@ func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSe agentLogLevel := mdbv1.LogLevelInfo if mdb.GetAgentLogLevel() != "" { - agentLogLevel = string(mdb.GetAgentLogLevel()) + agentLogLevel = mdb.GetAgentLogLevel() } agentLogFile := automationconfig.DefaultAgentLogFile @@ -207,16 +238,16 @@ func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSe podSecurityContext, podtemplatespec.WithPodLabels(labels), podtemplatespec.WithVolume(healthStatusVolume), - podtemplatespec.WithVolume(hooksVolume), automationConfigVolumeFunc, - podtemplatespec.WithVolume(scriptsVolume), + hooksVolumeMod, + scriptsVolumeMod, podtemplatespec.WithVolume(tmpVolume), podtemplatespec.WithVolume(keyFileVolume), podtemplatespec.WithServiceAccount(mongodbDatabaseServiceAccountName), - podtemplatespec.WithContainer(AgentName, mongodbAgentContainer(mdb.AutomationConfigSecretName(), mongodbAgentVolumeMounts, agentLogLevel, agentLogFile, agentMaxLogFileDurationHours)), - podtemplatespec.WithContainer(MongodbName, mongodbContainer(mdb.GetMongoDBVersion(), mongodVolumeMounts, mdb.GetMongodConfiguration())), - podtemplatespec.WithInitContainer(versionUpgradeHookName, versionUpgradeHookInit([]corev1.VolumeMount{hooksVolumeMount})), - podtemplatespec.WithInitContainer(ReadinessProbeContainerName, readinessProbeInit([]corev1.VolumeMount{scriptsVolumeMount})), + podtemplatespec.WithContainer(AgentName, mongodbAgentContainer(mdb.AutomationConfigSecretName(), mongodbAgentVolumeMounts, agentLogLevel, agentLogFile, agentMaxLogFileDurationHours, agentImage)), + podtemplatespec.WithContainer(MongodbName, mongodbContainer(mongodbImage, mongodVolumeMounts, mdb.GetMongodConfiguration())), + upgradeInitContainer, + readinessInitContainer, ), )) } @@ -225,20 +256,37 @@ func BaseAgentCommand() string { return "agent/mongodb-agent -healthCheckFilePath=" + agentHealthStatusFilePathValue + " -serveStatusPort=5000" } -func AutomationAgentCommand() []string { - return []string{"/bin/bash", "-c", MongodbUserCommand + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + automationAgentLogOptions} +// AutomationAgentCommand withAgentAPIKeyExport detects whether we want to deploy this agent with the agent api key exported +// it can be used to register the agent with OM. +func AutomationAgentCommand(withAgentAPIKeyExport bool, logLevel mdbv1.LogLevel, logFile string, maxLogFileDurationHours int) []string { + // This is somewhat undocumented at https://www.mongodb.com/docs/ops-manager/current/reference/mongodb-agent-settings/ + // Not setting the -logFile option make the mongodb-agent log to stdout. Setting -logFile /dev/stdout will result in + // an error by the agent trying to open /dev/stdout-verbose and still trying to do log rotation. + // To keep consistent with old behavior not setting the logFile in the config does not log to stdout but keeps + // the default logFile as defined by DefaultAgentLogFile. Setting the logFile explictly to "/dev/stdout" will log to stdout. + agentLogOptions := "" + if logFile == "/dev/stdout" { + agentLogOptions += " -logLevel " + string(logLevel) + } else { + agentLogOptions += " -logFile " + logFile + " -logLevel " + string(logLevel) + " -maxLogFileDurationHrs " + strconv.Itoa(maxLogFileDurationHours) + } + + if withAgentAPIKeyExport { + return []string{"/bin/bash", "-c", MongodbUserCommandWithAPIKeyExport + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + agentLogOptions} + } + return []string{"/bin/bash", "-c", MongodbUserCommand + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + agentLogOptions} } -func mongodbAgentContainer(automationConfigSecretName string, volumeMounts []corev1.VolumeMount, logLevel string, logFile string, maxLogFileDurationHours int) container.Modification { +func mongodbAgentContainer(automationConfigSecretName string, volumeMounts []corev1.VolumeMount, logLevel mdbv1.LogLevel, logFile string, maxLogFileDurationHours int, agentImage string) container.Modification { _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() return container.Apply( container.WithName(AgentName), - container.WithImage(os.Getenv(AgentImageEnv)), + container.WithImage(agentImage), container.WithImagePullPolicy(corev1.PullAlways), container.WithReadinessProbe(DefaultReadiness()), container.WithResourceRequirements(resourcerequirements.Defaults()), container.WithVolumeMounts(volumeMounts), - container.WithCommand(AutomationAgentCommand()), + container.WithCommand(AutomationAgentCommand(false, logLevel, logFile, maxLogFileDurationHours)), containerSecurityContext, container.WithEnvs( corev1.EnvVar{ @@ -262,28 +310,17 @@ func mongodbAgentContainer(automationConfigSecretName string, volumeMounts []cor Name: agentHealthStatusFilePathEnv, Value: agentHealthStatusFilePathValue, }, - corev1.EnvVar{ - Name: agentLogLevelEnv, - Value: logLevel, - }, - corev1.EnvVar{ - Name: agentLogFileEnv, - Value: logFile, - }, - corev1.EnvVar{ - Name: agentMaxLogFileDurationHoursEnv, - Value: strconv.Itoa(maxLogFileDurationHours), - }, ), ) } -func versionUpgradeHookInit(volumeMount []corev1.VolumeMount) container.Modification { +func versionUpgradeHookInit(volumeMount []corev1.VolumeMount, versionUpgradeHookImage string) container.Modification { _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() return container.Apply( container.WithName(versionUpgradeHookName), container.WithCommand([]string{"cp", "version-upgrade-hook", "/hooks/version-upgrade"}), - container.WithImage(os.Getenv(VersionUpgradeHookImageEnv)), + container.WithImage(versionUpgradeHookImage), + container.WithResourceRequirements(resourcerequirements.Defaults()), container.WithImagePullPolicy(corev1.PullAlways), container.WithVolumeMounts(volumeMount), containerSecurityContext, @@ -316,44 +353,29 @@ func logsPvc(logsVolumeName string) persistentvolumeclaim.Modification { // readinessProbeInit returns a modification function which will add the readiness probe container. // this container will copy the readiness probe binary into the /opt/scripts directory. -func readinessProbeInit(volumeMount []corev1.VolumeMount) container.Modification { +func readinessProbeInit(volumeMount []corev1.VolumeMount, readinessProbeImage string) container.Modification { _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() return container.Apply( container.WithName(ReadinessProbeContainerName), container.WithCommand([]string{"cp", "/probes/readinessprobe", "/opt/scripts/readinessprobe"}), - container.WithImage(os.Getenv(ReadinessProbeImageEnv)), + container.WithImage(readinessProbeImage), container.WithImagePullPolicy(corev1.PullAlways), container.WithVolumeMounts(volumeMount), + container.WithResourceRequirements(resourcerequirements.Defaults()), containerSecurityContext, ) } -func getMongoDBImage(version string) string { - repoUrl := os.Getenv(MongodbRepoUrl) - imageType := envvar.GetEnvOrDefault(MongoDBImageType, DefaultImageType) - - if strings.HasSuffix(repoUrl, "/") { - repoUrl = strings.TrimRight(repoUrl, "/") - } - mongoImageName := os.Getenv(MongodbImageEnv) - for _, officialUrl := range OfficialMongodbRepoUrls { - if repoUrl == officialUrl { - return fmt.Sprintf("%s/%s:%s-%s", repoUrl, mongoImageName, version, imageType) - } - } - - // This is the old images backwards compatibility code path. - return fmt.Sprintf("%s/%s:%s", repoUrl, mongoImageName, version) -} - -func mongodbContainer(version string, volumeMounts []corev1.VolumeMount, additionalMongoDBConfig mdbv1.MongodConfiguration) container.Modification { +func mongodbContainer(mongodbImage string, volumeMounts []corev1.VolumeMount, additionalMongoDBConfig mdbv1.MongodConfiguration) container.Modification { filePath := additionalMongoDBConfig.GetDBDataDir() + "/" + automationMongodConfFileName mongoDbCommand := fmt.Sprintf(` -#run post-start hook to handle version changes -/hooks/version-upgrade +if [ -e "/hooks/version-upgrade" ]; then + #run post-start hook to handle version changes (if exists) + /hooks/version-upgrade +fi # wait for config and keyfile to be created by the agent - while ! [ -f %s -a -f %s ]; do sleep 3 ; done ; sleep 2 ; +while ! [ -f %s -a -f %s ]; do sleep 3 ; done ; sleep 2 ; # start mongod with this configuration exec mongod -f %s; @@ -370,7 +392,7 @@ exec mongod -f %s; return container.Apply( container.WithName(MongodbName), - container.WithImage(getMongoDBImage(version)), + container.WithImage(mongodbImage), container.WithResourceRequirements(resourcerequirements.Defaults()), container.WithCommand(containerCommand), // The official image provides both CMD and ENTRYPOINT. We're reusing the former and need to replace @@ -378,11 +400,37 @@ exec mongod -f %s; container.WithArgs([]string{""}), containerSecurityContext, container.WithEnvs( - corev1.EnvVar{ - Name: agentHealthStatusFilePathEnv, - Value: "/healthstatus/agent-health-status.json", - }, + collectEnvVars()..., ), container.WithVolumeMounts(volumeMounts), ) } + +// Function to collect and return the environment variables to be used in the +// MongoDB container. +func collectEnvVars() []corev1.EnvVar { + var envVars []corev1.EnvVar + + envVars = append(envVars, corev1.EnvVar{ + Name: agentHealthStatusFilePathEnv, + Value: "/healthstatus/agent-health-status.json", + }) + + addEnvVarIfSet := func(name string) { + value := os.Getenv(name) // nolint:forbidigo + if value != "" { + envVars = append(envVars, corev1.EnvVar{ + Name: name, + Value: value, + }) + } + } + + addEnvVarIfSet(config.ReadinessProbeLoggerBackups) + addEnvVarIfSet(config.ReadinessProbeLoggerMaxSize) + addEnvVarIfSet(config.ReadinessProbeLoggerMaxAge) + addEnvVarIfSet(config.ReadinessProbeLoggerCompress) + addEnvVarIfSet(config.WithAgentFileLogging) + + return envVars +} diff --git a/controllers/construct/mongodbstatefulset_test.go b/controllers/construct/mongodbstatefulset_test.go new file mode 100644 index 000000000..67d78174b --- /dev/null +++ b/controllers/construct/mongodbstatefulset_test.go @@ -0,0 +1,97 @@ +package construct + +import ( + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "testing" +) + +func TestCollectEnvVars(t *testing.T) { + tests := []struct { + name string + envSetup map[string]string + expectedEnv []corev1.EnvVar + }{ + { + name: "Basic env vars set", + envSetup: map[string]string{ + config.ReadinessProbeLoggerBackups: "3", + config.ReadinessProbeLoggerMaxSize: "10M", + config.ReadinessProbeLoggerMaxAge: "7", + config.WithAgentFileLogging: "enabled", + }, + expectedEnv: []corev1.EnvVar{ + { + Name: config.AgentHealthStatusFilePathEnv, + Value: "/healthstatus/agent-health-status.json", + }, + { + Name: config.ReadinessProbeLoggerBackups, + Value: "3", + }, + { + Name: config.ReadinessProbeLoggerMaxSize, + Value: "10M", + }, + { + Name: config.ReadinessProbeLoggerMaxAge, + Value: "7", + }, + { + Name: config.WithAgentFileLogging, + Value: "enabled", + }, + }, + }, + { + name: "Additional env var set", + envSetup: map[string]string{ + config.ReadinessProbeLoggerBackups: "3", + config.ReadinessProbeLoggerMaxSize: "10M", + config.ReadinessProbeLoggerMaxAge: "7", + config.ReadinessProbeLoggerCompress: "true", + config.WithAgentFileLogging: "enabled", + }, + expectedEnv: []corev1.EnvVar{ + { + Name: config.AgentHealthStatusFilePathEnv, + Value: "/healthstatus/agent-health-status.json", + }, + { + Name: config.ReadinessProbeLoggerBackups, + Value: "3", + }, + { + Name: config.ReadinessProbeLoggerMaxSize, + Value: "10M", + }, + { + Name: config.ReadinessProbeLoggerMaxAge, + Value: "7", + }, + { + Name: config.ReadinessProbeLoggerCompress, + Value: "true", + }, + { + Name: config.WithAgentFileLogging, + Value: "enabled", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup environment variables + for key, value := range tt.envSetup { + t.Setenv(key, value) + } + + actualEnvVars := collectEnvVars() + + assert.EqualValues(t, tt.expectedEnv, actualEnvVars) + }) + } +} diff --git a/controllers/mongodb_cleanup.go b/controllers/mongodb_cleanup.go index 59b391acf..d13b0426d 100644 --- a/controllers/mongodb_cleanup.go +++ b/controllers/mongodb_cleanup.go @@ -1,6 +1,7 @@ package controllers import ( + "context" apiErrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" @@ -9,14 +10,14 @@ import ( ) // cleanupPemSecret cleans up the old pem secret generated for the agent certificate. -func (r *ReplicaSetReconciler) cleanupPemSecret(currentMDB mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, namespace string) { - if currentMDB.GetAgentAuthMode() == lastAppliedMDBSpec.GetAgentAuthMode() { +func (r *ReplicaSetReconciler) cleanupPemSecret(ctx context.Context, currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, namespace string) { + if currentMDBSpec.GetAgentAuthMode() == lastAppliedMDBSpec.GetAgentAuthMode() { return } - if !currentMDB.IsAgentX509() && lastAppliedMDBSpec.IsAgentX509() { + if !currentMDBSpec.IsAgentX509() && lastAppliedMDBSpec.IsAgentX509() { agentCertSecret := lastAppliedMDBSpec.GetAgentCertificateRef() - if err := r.client.DeleteSecret(types.NamespacedName{ + if err := r.client.DeleteSecret(ctx, types.NamespacedName{ Namespace: namespace, Name: agentCertSecret + "-pem", }); err != nil { @@ -30,22 +31,38 @@ func (r *ReplicaSetReconciler) cleanupPemSecret(currentMDB mdbv1.MongoDBCommunit } // cleanupScramSecrets cleans up old scram secrets based on the last successful applied mongodb spec. -func (r *ReplicaSetReconciler) cleanupScramSecrets(currentMDB mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, namespace string) { - secretsToDelete := getScramSecretsToDelete(currentMDB, lastAppliedMDBSpec) +func (r *ReplicaSetReconciler) cleanupScramSecrets(ctx context.Context, currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, namespace string) { + secretsToDelete := getScramSecretsToDelete(currentMDBSpec, lastAppliedMDBSpec) for _, s := range secretsToDelete { - if err := r.client.DeleteSecret(types.NamespacedName{ + if err := r.client.DeleteSecret(ctx, types.NamespacedName{ Name: s, Namespace: namespace, }); err != nil { - r.log.Warnf("Could not cleanup old secret %s", s) + r.log.Warnf("Could not cleanup old secret %s: %s", s, err) } else { r.log.Debugf("Sucessfully cleaned up secret: %s", s) } } } -func getScramSecretsToDelete(currentMDB mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec) []string { +// cleanupConnectionStringSecrets cleans up old scram secrets based on the last successful applied mongodb spec. +func (r *ReplicaSetReconciler) cleanupConnectionStringSecrets(ctx context.Context, currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, namespace string, resourceName string) { + secretsToDelete := getConnectionStringSecretsToDelete(currentMDBSpec, lastAppliedMDBSpec, resourceName) + + for _, s := range secretsToDelete { + if err := r.client.DeleteSecret(ctx, types.NamespacedName{ + Name: s, + Namespace: namespace, + }); err != nil { + r.log.Warnf("Could not cleanup old secret %s: %s", s, err) + } else { + r.log.Debugf("Sucessfully cleaned up secret: %s", s) + } + } +} + +func getScramSecretsToDelete(currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec) []string { type user struct { db string name string @@ -53,10 +70,11 @@ func getScramSecretsToDelete(currentMDB mdbv1.MongoDBCommunitySpec, lastAppliedM m := map[user]string{} var secretsToDelete []string - for _, mongoDBUser := range currentMDB.Users { - if mongoDBUser.DB != constants.ExternalDB { - m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] = mongoDBUser.GetScramCredentialsSecretName() + for _, mongoDBUser := range currentMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue } + m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] = mongoDBUser.GetScramCredentialsSecretName() } for _, mongoDBUser := range lastAppliedMDBSpec.Users { @@ -72,3 +90,33 @@ func getScramSecretsToDelete(currentMDB mdbv1.MongoDBCommunitySpec, lastAppliedM } return secretsToDelete } + +func getConnectionStringSecretsToDelete(currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, resourceName string) []string { + type user struct { + db string + name string + } + m := map[user]string{} + var secretsToDelete []string + + for _, mongoDBUser := range currentMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] = mongoDBUser.GetConnectionStringSecretName(resourceName) + } + + for _, mongoDBUser := range lastAppliedMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + currentConnectionStringSecretName, ok := m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] + if !ok { // user was removed + secretsToDelete = append(secretsToDelete, mongoDBUser.GetConnectionStringSecretName(resourceName)) + } else if currentConnectionStringSecretName != mongoDBUser.GetConnectionStringSecretName(resourceName) { + // this happens when a new ConnectionStringSecretName was set for the old user + secretsToDelete = append(secretsToDelete, mongoDBUser.GetConnectionStringSecretName(resourceName)) + } + } + return secretsToDelete +} diff --git a/controllers/mongodb_cleanup_test.go b/controllers/mongodb_cleanup_test.go index 896b1797c..0123f63ee 100644 --- a/controllers/mongodb_cleanup_test.go +++ b/controllers/mongodb_cleanup_test.go @@ -1,12 +1,14 @@ package controllers import ( + "context" + "testing" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" kubeClient "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "testing" ) func TestReplicaSetReconcilerCleanupScramSecrets(t *testing.T) { @@ -21,8 +23,7 @@ func TestReplicaSetReconcilerCleanupScramSecrets(t *testing.T) { t.Run("no change same resource", func(t *testing.T) { actual := getScramSecretsToDelete(lastApplied.Spec, lastApplied.Spec) - var expected []string - assert.Equal(t, expected, actual) + assert.Equal(t, []string(nil), actual) }) t.Run("new user new secret", func(t *testing.T) { @@ -43,10 +44,9 @@ func TestReplicaSetReconcilerCleanupScramSecrets(t *testing.T) { }, ) - var expected []string actual := getScramSecretsToDelete(current.Spec, lastApplied.Spec) - assert.Equal(t, expected, actual) + assert.Equal(t, []string(nil), actual) }) t.Run("old user new secret", func(t *testing.T) { @@ -98,6 +98,7 @@ func TestReplicaSetReconcilerCleanupScramSecrets(t *testing.T) { } func TestReplicaSetReconcilerCleanupPemSecret(t *testing.T) { + ctx := context.Background() lastAppliedSpec := mdbv1.MongoDBCommunitySpec{ Security: mdbv1.Security{ Authentication: mdbv1.Authentication{ @@ -134,21 +135,108 @@ func TestReplicaSetReconcilerCleanupPemSecret(t *testing.T) { }, } - mgr := kubeClient.NewManager(&mdb) + mgr := kubeClient.NewManager(ctx, &mdb) client := kubeClient.NewClient(mgr.GetClient()) - err := createAgentCertPemSecret(client, mdb, "CERT", "KEY", "") + err := createAgentCertPemSecret(ctx, client, mdb, "CERT", "KEY", "") assert.NoError(t, err) - r := NewReconciler(mgr) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") - secret, err := r.client.GetSecret(mdb.AgentCertificatePemSecretNamespacedName()) + secret, err := r.client.GetSecret(ctx, mdb.AgentCertificatePemSecretNamespacedName()) assert.NoError(t, err) assert.Equal(t, "CERT", string(secret.Data["tls.crt"])) assert.Equal(t, "KEY", string(secret.Data["tls.key"])) - r.cleanupPemSecret(mdb.Spec, lastAppliedSpec, "my-ns") + r.cleanupPemSecret(ctx, mdb.Spec, lastAppliedSpec, "my-ns") - _, err = r.client.GetSecret(mdb.AgentCertificatePemSecretNamespacedName()) + _, err = r.client.GetSecret(ctx, mdb.AgentCertificatePemSecretNamespacedName()) assert.Error(t, err) } + +func TestReplicaSetReconcilerCleanupConnectionStringSecrets(t *testing.T) { + lastApplied := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + }) + + t.Run("no change same resource", func(t *testing.T) { + actual := getConnectionStringSecretsToDelete(lastApplied.Spec, lastApplied.Spec, "my-rs") + + assert.Equal(t, []string(nil), actual) + }) + + t.Run("new user does not require existing user cleanup", func(t *testing.T) { + current := newScramReplicaSet( + mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + }, + mdbv1.MongoDBUser{ + Name: "newUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret-2", + }, + ) + + actual := getConnectionStringSecretsToDelete(current.Spec, lastApplied.Spec, "my-rs") + + assert.Equal(t, []string(nil), actual) + }) + + t.Run("old user new secret", func(t *testing.T) { + current := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret-2", + }) + + expected := []string{"connection-string-secret"} + actual := getConnectionStringSecretsToDelete(current.Spec, lastApplied.Spec, "my-rs") + + assert.Equal(t, expected, actual) + }) + + t.Run("removed one user and changed secret of the other", func(t *testing.T) { + lastApplied = newScramReplicaSet( + mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + }, + mdbv1.MongoDBUser{ + Name: "anotherUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret-2", + }, + ) + + current := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret-1", + }) + + expected := []string{"connection-string-secret", "connection-string-secret-2"} + actual := getConnectionStringSecretsToDelete(current.Spec, lastApplied.Spec, "my-rs") + + assert.Equal(t, expected, actual) + }) + +} diff --git a/controllers/mongodb_tls.go b/controllers/mongodb_tls.go index d5427d76e..56c67642d 100644 --- a/controllers/mongodb_tls.go +++ b/controllers/mongodb_tls.go @@ -1,6 +1,7 @@ package controllers import ( + "context" "crypto/sha256" "fmt" "strings" @@ -35,7 +36,7 @@ const ( ) // validateTLSConfig will check that the configured ConfigMap and Secret exist and that they have the correct fields. -func (r *ReplicaSetReconciler) validateTLSConfig(mdb mdbv1.MongoDBCommunity) (bool, error) { +func (r *ReplicaSetReconciler) validateTLSConfig(ctx context.Context, mdb mdbv1.MongoDBCommunity) (bool, error) { if !mdb.Spec.Security.TLS.Enabled { return true, nil } @@ -43,7 +44,7 @@ func (r *ReplicaSetReconciler) validateTLSConfig(mdb mdbv1.MongoDBCommunity) (bo r.log.Info("Ensuring TLS is correctly configured") // Ensure CA cert is configured - _, err := getCaCrt(r.client, r.client, mdb) + _, err := getCaCrt(ctx, r.client, r.client, mdb) if err != nil { if apiErrors.IsNotFound(err) { @@ -55,7 +56,7 @@ func (r *ReplicaSetReconciler) validateTLSConfig(mdb mdbv1.MongoDBCommunity) (bo } // Ensure Secret exists - _, err = secret.ReadStringData(r.client, mdb.TLSSecretNamespacedName()) + _, err = secret.ReadStringData(ctx, r.client, mdb.TLSSecretNamespacedName()) if err != nil { if apiErrors.IsNotFound(err) { r.log.Warnf(`Secret "%s" not found`, mdb.TLSSecretNamespacedName()) @@ -67,20 +68,20 @@ func (r *ReplicaSetReconciler) validateTLSConfig(mdb mdbv1.MongoDBCommunity) (bo // validate whether the secret contains "tls.crt" and "tls.key", or it contains "tls.pem" // if it contains all three, then the pem entry should be equal to the concatenation of crt and key - _, err = getPemOrConcatenatedCrtAndKey(r.client, mdb, mdb.TLSSecretNamespacedName()) + _, err = getPemOrConcatenatedCrtAndKey(ctx, r.client, mdb.TLSSecretNamespacedName()) if err != nil { r.log.Warnf(err.Error()) return false, nil } // Watch certificate-key secret to handle rotations - r.secretWatcher.Watch(mdb.TLSSecretNamespacedName(), mdb.NamespacedName()) + r.secretWatcher.Watch(ctx, mdb.TLSSecretNamespacedName(), mdb.NamespacedName()) // Watch CA certificate changes if mdb.Spec.Security.TLS.CaCertificateSecret != nil { - r.secretWatcher.Watch(mdb.TLSCaCertificateSecretNamespacedName(), mdb.NamespacedName()) + r.secretWatcher.Watch(ctx, mdb.TLSCaCertificateSecretNamespacedName(), mdb.NamespacedName()) } else { - r.configMapWatcher.Watch(mdb.TLSConfigMapNamespacedName(), mdb.NamespacedName()) + r.configMapWatcher.Watch(ctx, mdb.TLSConfigMapNamespacedName(), mdb.NamespacedName()) } r.log.Infof("Successfully validated TLS config") @@ -89,17 +90,17 @@ func (r *ReplicaSetReconciler) validateTLSConfig(mdb mdbv1.MongoDBCommunity) (bo // getTLSConfigModification creates a modification function which enables TLS in the automation config. // It will also ensure that the combined cert-key secret is created. -func getTLSConfigModification(cmGetter configmap.Getter, secretGetter secret.Getter, mdb mdbv1.MongoDBCommunity) (automationconfig.Modification, error) { +func getTLSConfigModification(ctx context.Context, cmGetter configmap.Getter, secretGetter secret.Getter, mdb mdbv1.MongoDBCommunity) (automationconfig.Modification, error) { if !mdb.Spec.Security.TLS.Enabled { return automationconfig.NOOP(), nil } - caCert, err := getCaCrt(cmGetter, secretGetter, mdb) + caCert, err := getCaCrt(ctx, cmGetter, secretGetter, mdb) if err != nil { return automationconfig.NOOP(), err } - certKey, err := getPemOrConcatenatedCrtAndKey(secretGetter, mdb, mdb.TLSSecretNamespacedName()) + certKey, err := getPemOrConcatenatedCrtAndKey(ctx, secretGetter, mdb.TLSSecretNamespacedName()) if err != nil { return automationconfig.NOOP(), err } @@ -108,13 +109,13 @@ func getTLSConfigModification(cmGetter configmap.Getter, secretGetter secret.Get } // getCertAndKey will fetch the certificate and key from the user-provided Secret. -func getCertAndKey(getter secret.Getter, mdb mdbv1.MongoDBCommunity, secretName types.NamespacedName) string { - cert, err := secret.ReadKey(getter, tlsSecretCertName, secretName) +func getCertAndKey(ctx context.Context, getter secret.Getter, secretName types.NamespacedName) string { + cert, err := secret.ReadKey(ctx, getter, tlsSecretCertName, secretName) if err != nil { return "" } - key, err := secret.ReadKey(getter, tlsSecretKeyName, secretName) + key, err := secret.ReadKey(ctx, getter, tlsSecretKeyName, secretName) if err != nil { return "" } @@ -123,8 +124,8 @@ func getCertAndKey(getter secret.Getter, mdb mdbv1.MongoDBCommunity, secretName } // getPem will fetch the pem from the user-provided secret -func getPem(getter secret.Getter, mdb mdbv1.MongoDBCommunity, secretName types.NamespacedName) string { - pem, err := secret.ReadKey(getter, tlsSecretPemName, secretName) +func getPem(ctx context.Context, getter secret.Getter, secretName types.NamespacedName) string { + pem, err := secret.ReadKey(ctx, getter, tlsSecretPemName, secretName) if err != nil { return "" } @@ -141,9 +142,9 @@ func combineCertificateAndKey(cert, key string) string { // This is either the tls.pem entry in the given secret, or the concatenation // of tls.crt and tls.key // It performs a basic validation on the entries. -func getPemOrConcatenatedCrtAndKey(getter secret.Getter, mdb mdbv1.MongoDBCommunity, secretName types.NamespacedName) (string, error) { - certKey := getCertAndKey(getter, mdb, secretName) - pem := getPem(getter, mdb, secretName) +func getPemOrConcatenatedCrtAndKey(ctx context.Context, getter secret.Getter, secretName types.NamespacedName) (string, error) { + certKey := getCertAndKey(ctx, getter, secretName) + pem := getPem(ctx, getter, secretName) if certKey == "" && pem == "" { return "", fmt.Errorf(`neither "%s" nor the pair "%s"/"%s" were present in the TLS secret`, tlsSecretPemName, tlsSecretCertName, tlsSecretKeyName) } @@ -159,16 +160,16 @@ func getPemOrConcatenatedCrtAndKey(getter secret.Getter, mdb mdbv1.MongoDBCommun return certKey, nil } -func getCaCrt(cmGetter configmap.Getter, secretGetter secret.Getter, mdb mdbv1.MongoDBCommunity) (string, error) { +func getCaCrt(ctx context.Context, cmGetter configmap.Getter, secretGetter secret.Getter, mdb mdbv1.MongoDBCommunity) (string, error) { var caResourceName types.NamespacedName var caData map[string]string var err error if mdb.Spec.Security.TLS.CaCertificateSecret != nil { caResourceName = mdb.TLSCaCertificateSecretNamespacedName() - caData, err = secret.ReadStringData(secretGetter, caResourceName) + caData, err = secret.ReadStringData(ctx, secretGetter, caResourceName) } else if mdb.Spec.Security.TLS.CaConfigMap != nil { caResourceName = mdb.TLSConfigMapNamespacedName() - caData, err = configmap.ReadData(cmGetter, caResourceName) + caData, err = configmap.ReadData(ctx, cmGetter, caResourceName) } if err != nil { @@ -188,8 +189,8 @@ func getCaCrt(cmGetter configmap.Getter, secretGetter secret.Getter, mdb mdbv1.M // ensureCASecret will create or update the operator managed Secret containing // the CA certficate from the user provided Secret or ConfigMap. -func ensureCASecret(cmGetter configmap.Getter, secretGetter secret.Getter, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { - cert, err := getCaCrt(cmGetter, secretGetter, mdb) +func ensureCASecret(ctx context.Context, cmGetter configmap.Getter, secretGetter secret.Getter, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { + cert, err := getCaCrt(ctx, cmGetter, secretGetter, mdb) if err != nil { return err } @@ -203,13 +204,13 @@ func ensureCASecret(cmGetter configmap.Getter, secretGetter secret.Getter, getUp SetOwnerReferences(mdb.GetOwnerReferences()). Build() - return secret.CreateOrUpdate(getUpdateCreator, operatorSecret) + return secret.CreateOrUpdate(ctx, getUpdateCreator, operatorSecret) } // ensureTLSSecret will create or update the operator-managed Secret containing // the concatenated certificate and key from the user-provided Secret. -func ensureTLSSecret(getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { - certKey, err := getPemOrConcatenatedCrtAndKey(getUpdateCreator, mdb, mdb.TLSSecretNamespacedName()) +func ensureTLSSecret(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { + certKey, err := getPemOrConcatenatedCrtAndKey(ctx, getUpdateCreator, mdb.TLSSecretNamespacedName()) if err != nil { return err } @@ -223,15 +224,15 @@ func ensureTLSSecret(getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDB SetOwnerReferences(mdb.GetOwnerReferences()). Build() - return secret.CreateOrUpdate(getUpdateCreator, operatorSecret) + return secret.CreateOrUpdate(ctx, getUpdateCreator, operatorSecret) } -func ensureAgentCertSecret(getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { +func ensureAgentCertSecret(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { if mdb.Spec.GetAgentAuthMode() != "X509" { return nil } - certKey, err := getPemOrConcatenatedCrtAndKey(getUpdateCreator, mdb, mdb.AgentCertificateSecretNamespacedName()) + certKey, err := getPemOrConcatenatedCrtAndKey(ctx, getUpdateCreator, mdb.AgentCertificateSecretNamespacedName()) if err != nil { return err } @@ -243,13 +244,13 @@ func ensureAgentCertSecret(getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.M SetOwnerReferences(mdb.GetOwnerReferences()). Build() - return secret.CreateOrUpdate(getUpdateCreator, agentCertSecret) + return secret.CreateOrUpdate(ctx, getUpdateCreator, agentCertSecret) } // ensurePrometheusTLSSecret will create or update the operator-managed Secret containing // the concatenated certificate and key from the user-provided Secret. -func ensurePrometheusTLSSecret(getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { - certKey, err := getPemOrConcatenatedCrtAndKey(getUpdateCreator, mdb, mdb.DeepCopy().PrometheusTLSSecretNamespacedName()) +func ensurePrometheusTLSSecret(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { + certKey, err := getPemOrConcatenatedCrtAndKey(ctx, getUpdateCreator, mdb.DeepCopy().PrometheusTLSSecretNamespacedName()) if err != nil { return err } @@ -263,7 +264,7 @@ func ensurePrometheusTLSSecret(getUpdateCreator secret.GetUpdateCreator, mdb mdb SetOwnerReferences(mdb.GetOwnerReferences()). Build() - return secret.CreateOrUpdate(getUpdateCreator, operatorSecret) + return secret.CreateOrUpdate(ctx, getUpdateCreator, operatorSecret) } // tlsOperatorSecretFileName calculates the file name to use for the mounted diff --git a/controllers/mongodb_tls_test.go b/controllers/mongodb_tls_test.go index 1e755b612..b4e832778 100644 --- a/controllers/mongodb_tls_test.go +++ b/controllers/mongodb_tls_test.go @@ -24,66 +24,68 @@ import ( ) func TestStatefulSetIsCorrectlyConfiguredWithTLS(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSetWithTLS() - mgr := kubeClient.NewManager(&mdb) + mgr := kubeClient.NewManager(ctx, &mdb) client := kubeClient.NewClient(mgr.GetClient()) - err := createTLSSecret(client, mdb, "CERT", "KEY", "") + err := createTLSSecret(ctx, client, mdb, "CERT", "KEY", "") assert.NoError(t, err) - err = createTLSConfigMap(client, mdb) + err = createTLSConfigMap(ctx, client, mdb) assert.NoError(t, err) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) sts := appsv1.StatefulSet{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) assertStatefulSetVolumesAndVolumeMounts(t, sts, mdb.TLSOperatorCASecretNamespacedName().Name, mdb.TLSOperatorSecretNamespacedName().Name, "", "") } func TestStatefulSetIsCorrectlyConfiguredWithTLSAndX509(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSetWithTLS() mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509"} - mgr := kubeClient.NewManager(&mdb) + mgr := kubeClient.NewManager(ctx, &mdb) client := kubeClient.NewClient(mgr.GetClient()) - err := createTLSSecret(client, mdb, "CERT", "KEY", "") + err := createTLSSecret(ctx, client, mdb, "CERT", "KEY", "") assert.NoError(t, err) - err = createTLSConfigMap(client, mdb) + err = createTLSConfigMap(ctx, client, mdb) assert.NoError(t, err) crt, key, err := x509.CreateAgentCertificate() assert.NoError(t, err) - err = createAgentCertSecret(client, mdb, crt, key, "") + err = createAgentCertSecret(ctx, client, mdb, crt, key, "") assert.NoError(t, err) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) sts := appsv1.StatefulSet{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) // Check that the pem secret has been created s := corev1.Secret{} - err = mgr.GetClient().Get(context.TODO(), mdb.AgentCertificatePemSecretNamespacedName(), &s) + err = mgr.GetClient().Get(ctx, mdb.AgentCertificatePemSecretNamespacedName(), &s) assert.NoError(t, err) assertStatefulSetVolumesAndVolumeMounts(t, sts, mdb.TLSOperatorCASecretNamespacedName().Name, mdb.TLSOperatorSecretNamespacedName().Name, "", mdb.AgentCertificatePemSecretNamespacedName().Name) // If we deactivate X509 for the agent, we expect the certificates to be unmounted. mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"SCRAM"} - err = mgr.GetClient().Update(context.TODO(), &mdb) + err = mgr.GetClient().Update(ctx, &mdb) assert.NoError(t, err) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) sts = appsv1.StatefulSet{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) assertStatefulSetVolumesAndVolumeMounts(t, sts, mdb.TLSOperatorCASecretNamespacedName().Name, mdb.TLSOperatorSecretNamespacedName().Name, "", "") @@ -198,6 +200,7 @@ func assertStatefulSetVolumesAndVolumeMounts(t *testing.T, sts appsv1.StatefulSe } func TestStatefulSetIsCorrectlyConfiguredWithPrometheusTLS(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSetWithTLS() mdb.Spec.Prometheus = &mdbv1.Prometheus{ Username: "username", @@ -210,59 +213,58 @@ func TestStatefulSetIsCorrectlyConfiguredWithPrometheusTLS(t *testing.T) { }, } - mgr := kubeClient.NewManager(&mdb) + mgr := kubeClient.NewManager(ctx, &mdb) cli := kubeClient.NewClient(mgr.GetClient()) - err := secret.CreateOrUpdate(mgr.Client, - secret.Builder(). - SetName("prom-password-secret"). - SetNamespace(mdb.Namespace). - SetField("password", "my-password"). - Build(), - ) + err := secret.CreateOrUpdate(ctx, mgr.Client, secret.Builder(). + SetName("prom-password-secret"). + SetNamespace(mdb.Namespace). + SetField("password", "my-password"). + Build()) assert.NoError(t, err) - err = createTLSSecret(cli, mdb, "CERT", "KEY", "") + err = createTLSSecret(ctx, cli, mdb, "CERT", "KEY", "") assert.NoError(t, err) - err = createPrometheusTLSSecret(cli, mdb, "CERT", "KEY", "") + err = createPrometheusTLSSecret(ctx, cli, mdb, "CERT", "KEY", "") assert.NoError(t, err) - err = createTLSConfigMap(cli, mdb) + err = createTLSConfigMap(ctx, cli, mdb) assert.NoError(t, err) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) sts := appsv1.StatefulSet{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) assertStatefulSetVolumesAndVolumeMounts(t, sts, mdb.TLSOperatorCASecretNamespacedName().Name, mdb.TLSOperatorSecretNamespacedName().Name, mdb.PrometheusTLSOperatorSecretNamespacedName().Name, "") } func TestStatefulSetIsCorrectlyConfiguredWithTLSAfterChangingExistingVolumes(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSetWithTLS() - mgr := kubeClient.NewManager(&mdb) + mgr := kubeClient.NewManager(ctx, &mdb) cli := kubeClient.NewClient(mgr.GetClient()) - err := createTLSSecret(cli, mdb, "CERT", "KEY", "") + err := createTLSSecret(ctx, cli, mdb, "CERT", "KEY", "") assert.NoError(t, err) tlsCAVolumeSecretName := mdb.TLSOperatorCASecretNamespacedName().Name changedTLSCAVolumeSecretName := tlsCAVolumeSecretName + "-old" - err = createTLSSecretWithNamespaceAndName(cli, mdb.Namespace, changedTLSCAVolumeSecretName, "CERT", "KEY", "") + err = createTLSSecretWithNamespaceAndName(ctx, cli, mdb.Namespace, changedTLSCAVolumeSecretName, "CERT", "KEY", "") assert.NoError(t, err) - err = createTLSConfigMap(cli, mdb) + err = createTLSConfigMap(ctx, cli, mdb) assert.NoError(t, err) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) sts := appsv1.StatefulSet{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) assertStatefulSetVolumesAndVolumeMounts(t, sts, tlsCAVolumeSecretName, mdb.TLSOperatorSecretNamespacedName().Name, "", "") @@ -274,31 +276,32 @@ func TestStatefulSetIsCorrectlyConfiguredWithTLSAfterChangingExistingVolumes(t * } } - err = mgr.GetClient().Update(context.TODO(), &sts) + err = mgr.GetClient().Update(ctx, &sts) assert.NoError(t, err) assertStatefulSetVolumesAndVolumeMounts(t, sts, changedTLSCAVolumeSecretName, mdb.TLSOperatorSecretNamespacedName().Name, "", "") - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) sts = appsv1.StatefulSet{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) assertStatefulSetVolumesAndVolumeMounts(t, sts, tlsCAVolumeSecretName, mdb.TLSOperatorSecretNamespacedName().Name, "", "") } func TestAutomationConfigIsCorrectlyConfiguredWithTLS(t *testing.T) { + ctx := context.Background() createAC := func(mdb mdbv1.MongoDBCommunity) automationconfig.AutomationConfig { - client := kubeClient.NewClient(kubeClient.NewManager(&mdb).GetClient()) - err := createTLSSecret(client, mdb, "CERT", "KEY", "") + client := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, client, mdb, "CERT", "KEY", "") assert.NoError(t, err) - err = createTLSConfigMap(client, mdb) + err = createTLSConfigMap(ctx, client, mdb) assert.NoError(t, err) - tlsModification, err := getTLSConfigModification(client, client, mdb) + tlsModification, err := getTLSConfigModification(ctx, client, client, mdb) assert.NoError(t, err) - ac, err := buildAutomationConfig(mdb, automationconfig.Auth{}, automationconfig.AutomationConfig{}, tlsModification) + ac, err := buildAutomationConfig(mdb, false, automationconfig.Auth{}, automationconfig.AutomationConfig{}, tlsModification) assert.NoError(t, err) return ac @@ -326,6 +329,7 @@ func TestAutomationConfigIsCorrectlyConfiguredWithTLS(t *testing.T) { assert.Equal(t, "/tmp/test", process.Args26.Get("systemLog.path").String()) assert.Equal(t, "file", process.Args26.Get("systemLog.destination").String()) assert.Equal(t, process.LogRotate, automationconfig.ConvertCrdLogRotateToAC(mdb.Spec.AgentConfiguration.LogRotate)) + assert.Equal(t, process.AuditLogRotate, automationconfig.ConvertCrdLogRotateToAC(mdb.Spec.AgentConfiguration.AuditLogRotate)) } }) @@ -370,33 +374,34 @@ func TestAutomationConfigIsCorrectlyConfiguredWithTLS(t *testing.T) { } func TestTLSOperatorSecret(t *testing.T) { + ctx := context.Background() t.Run("Secret is created if it doesn't exist", func(t *testing.T) { mdb := newTestReplicaSetWithTLS() - c := kubeClient.NewClient(kubeClient.NewManager(&mdb).GetClient()) - err := createTLSSecret(c, mdb, "CERT", "KEY", "") + c := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, c, mdb, "CERT", "KEY", "") assert.NoError(t, err) - err = createTLSConfigMap(c, mdb) + err = createTLSConfigMap(ctx, c, mdb) assert.NoError(t, err) - r := NewReconciler(kubeClient.NewManagerWithClient(c)) + r := NewReconciler(kubeClient.NewManagerWithClient(c), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") - err = r.ensureTLSResources(mdb) + err = r.ensureTLSResources(ctx, mdb) assert.NoError(t, err) // Operator-managed secret should have been created and contains the // concatenated certificate and key. expectedCertificateKey := "CERT\nKEY" - certificateKey, err := secret.ReadKey(c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) + certificateKey, err := secret.ReadKey(ctx, c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) assert.NoError(t, err) assert.Equal(t, expectedCertificateKey, certificateKey) }) t.Run("Secret is updated if it already exists", func(t *testing.T) { mdb := newTestReplicaSetWithTLS() - k8sclient := kubeClient.NewClient(kubeClient.NewManager(&mdb).GetClient()) - err := createTLSSecret(k8sclient, mdb, "CERT", "KEY", "") + k8sclient := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, k8sclient, mdb, "CERT", "KEY", "") assert.NoError(t, err) - err = createTLSConfigMap(k8sclient, mdb) + err = createTLSConfigMap(ctx, k8sclient, mdb) assert.NoError(t, err) // Create operator-managed secret @@ -405,18 +410,18 @@ func TestTLSOperatorSecret(t *testing.T) { SetNamespace(mdb.TLSOperatorSecretNamespacedName().Namespace). SetField(tlsOperatorSecretFileName(""), ""). Build() - err = k8sclient.CreateSecret(s) + err = k8sclient.CreateSecret(ctx, s) assert.NoError(t, err) - r := NewReconciler(kubeClient.NewManagerWithClient(k8sclient)) + r := NewReconciler(kubeClient.NewManagerWithClient(k8sclient), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") - err = r.ensureTLSResources(mdb) + err = r.ensureTLSResources(ctx, mdb) assert.NoError(t, err) // Operator-managed secret should have been updated with the concatenated // certificate and key. expectedCertificateKey := "CERT\nKEY" - certificateKey, err := secret.ReadKey(k8sclient, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) + certificateKey, err := secret.ReadKey(ctx, k8sclient, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) assert.NoError(t, err) assert.Equal(t, expectedCertificateKey, certificateKey) }) @@ -442,63 +447,65 @@ func TestCombineCertificateAndKey(t *testing.T) { } func TestPemSupport(t *testing.T) { + ctx := context.Background() t.Run("Success if only pem is provided", func(t *testing.T) { mdb := newTestReplicaSetWithTLS() - c := kubeClient.NewClient(kubeClient.NewManager(&mdb).GetClient()) - err := createTLSSecret(c, mdb, "", "", "CERT\nKEY") + c := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, c, mdb, "", "", "CERT\nKEY") assert.NoError(t, err) - err = createTLSConfigMap(c, mdb) + err = createTLSConfigMap(ctx, c, mdb) assert.NoError(t, err) - r := NewReconciler(kubeClient.NewManagerWithClient(c)) + r := NewReconciler(kubeClient.NewManagerWithClient(c), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") - err = r.ensureTLSResources(mdb) + err = r.ensureTLSResources(ctx, mdb) assert.NoError(t, err) // Operator-managed secret should have been created and contains the // concatenated certificate and key. expectedCertificateKey := "CERT\nKEY" - certificateKey, err := secret.ReadKey(c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) + certificateKey, err := secret.ReadKey(ctx, c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) assert.NoError(t, err) assert.Equal(t, expectedCertificateKey, certificateKey) }) t.Run("Success if pem is equal to cert+key", func(t *testing.T) { mdb := newTestReplicaSetWithTLS() - c := kubeClient.NewClient(kubeClient.NewManager(&mdb).GetClient()) - err := createTLSSecret(c, mdb, "CERT", "KEY", "CERT\nKEY") + c := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, c, mdb, "CERT", "KEY", "CERT\nKEY") assert.NoError(t, err) - err = createTLSConfigMap(c, mdb) + err = createTLSConfigMap(ctx, c, mdb) assert.NoError(t, err) - r := NewReconciler(kubeClient.NewManagerWithClient(c)) + r := NewReconciler(kubeClient.NewManagerWithClient(c), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") - err = r.ensureTLSResources(mdb) + err = r.ensureTLSResources(ctx, mdb) assert.NoError(t, err) // Operator-managed secret should have been created and contains the // concatenated certificate and key. expectedCertificateKey := "CERT\nKEY" - certificateKey, err := secret.ReadKey(c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) + certificateKey, err := secret.ReadKey(ctx, c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) assert.NoError(t, err) assert.Equal(t, expectedCertificateKey, certificateKey) }) t.Run("Failure if pem is different from cert+key", func(t *testing.T) { mdb := newTestReplicaSetWithTLS() - c := kubeClient.NewClient(kubeClient.NewManager(&mdb).GetClient()) - err := createTLSSecret(c, mdb, "CERT1", "KEY1", "CERT\nKEY") + c := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, c, mdb, "CERT1", "KEY1", "CERT\nKEY") assert.NoError(t, err) - err = createTLSConfigMap(c, mdb) + err = createTLSConfigMap(ctx, c, mdb) assert.NoError(t, err) - r := NewReconciler(kubeClient.NewManagerWithClient(c)) + r := NewReconciler(kubeClient.NewManagerWithClient(c), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") - err = r.ensureTLSResources(mdb) + err = r.ensureTLSResources(ctx, mdb) assert.Error(t, err) assert.Contains(t, err.Error(), `if all of "tls.crt", "tls.key" and "tls.pem" are present in the secret, the entry for "tls.pem" must be equal to the concatenation of "tls.crt" with "tls.key"`) }) } func TestTLSConfigReferencesToCACertAreValidated(t *testing.T) { + ctx := context.Background() type args struct { caConfigMap *corev1.LocalObjectReference caCertificateSecret *corev1.LocalObjectReference @@ -531,15 +538,15 @@ func TestTLSConfigReferencesToCACertAreValidated(t *testing.T) { t.Run(testName, func(t *testing.T) { mdb := newTestReplicaSetWithTLSCaCertificateReferences(tc.caConfigMap, tc.caCertificateSecret) - mgr := kubeClient.NewManager(&mdb) + mgr := kubeClient.NewManager(ctx, &mdb) cli := kubeClient.NewClient(mgr.GetClient()) - err := createTLSSecret(cli, mdb, "cert", "key", "pem") + err := createTLSSecret(ctx, cli, mdb, "cert", "key", "pem") assert.NoError(t, err) - r := NewReconciler(mgr) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") - _, err = r.validateTLSConfig(mdb) + _, err = r.validateTLSConfig(ctx, mdb) if tc.expectedError != nil { assert.EqualError(t, err, tc.expectedError.Error()) } else { @@ -550,7 +557,7 @@ func TestTLSConfigReferencesToCACertAreValidated(t *testing.T) { } -func createTLSConfigMap(c k8sClient.Client, mdb mdbv1.MongoDBCommunity) error { +func createTLSConfigMap(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity) error { if !mdb.Spec.Security.TLS.Enabled { return nil } @@ -561,10 +568,10 @@ func createTLSConfigMap(c k8sClient.Client, mdb mdbv1.MongoDBCommunity) error { SetDataField("ca.crt", "CERT"). Build() - return c.Create(context.TODO(), &configMap) + return c.Create(ctx, &configMap) } -func createTLSSecretWithNamespaceAndName(c k8sClient.Client, namespace string, name string, crt string, key string, pem string) error { +func createTLSSecretWithNamespaceAndName(ctx context.Context, c k8sClient.Client, namespace string, name string, crt string, key string, pem string) error { sBuilder := secret.Builder(). SetName(name). SetNamespace(namespace). @@ -581,31 +588,31 @@ func createTLSSecretWithNamespaceAndName(c k8sClient.Client, namespace string, n } s := sBuilder.Build() - return c.Create(context.TODO(), &s) + return c.Create(ctx, &s) } -func createTLSSecret(c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { - return createTLSSecretWithNamespaceAndName(c, mdb.Namespace, mdb.Spec.Security.TLS.CertificateKeySecret.Name, crt, key, pem) +func createTLSSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { + return createTLSSecretWithNamespaceAndName(ctx, c, mdb.Namespace, mdb.Spec.Security.TLS.CertificateKeySecret.Name, crt, key, pem) } -func createAgentCertSecret(c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { - return createTLSSecretWithNamespaceAndName(c, mdb.Namespace, mdb.AgentCertificateSecretNamespacedName().Name, crt, key, pem) +func createAgentCertSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { + return createTLSSecretWithNamespaceAndName(ctx, c, mdb.Namespace, mdb.AgentCertificateSecretNamespacedName().Name, crt, key, pem) } -func createAgentCertPemSecret(c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { - return createTLSSecretWithNamespaceAndName(c, mdb.Namespace, mdb.AgentCertificatePemSecretNamespacedName().Name, crt, key, pem) +func createAgentCertPemSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { + return createTLSSecretWithNamespaceAndName(ctx, c, mdb.Namespace, mdb.AgentCertificatePemSecretNamespacedName().Name, crt, key, pem) } -func createPrometheusTLSSecret(c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { - return createTLSSecretWithNamespaceAndName(c, mdb.Namespace, mdb.Spec.Prometheus.TLSSecretRef.Name, crt, key, pem) +func createPrometheusTLSSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { + return createTLSSecretWithNamespaceAndName(ctx, c, mdb.Namespace, mdb.Spec.Prometheus.TLSSecretRef.Name, crt, key, pem) } -func createUserPasswordSecret(c k8sClient.Client, mdb mdbv1.MongoDBCommunity, userPasswordSecretName string, password string) error { +func createUserPasswordSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, userPasswordSecretName string, password string) error { sBuilder := secret.Builder(). SetName(userPasswordSecretName). SetNamespace(mdb.Namespace). SetField("password", password) s := sBuilder.Build() - return c.Create(context.TODO(), &s) + return c.Create(ctx, &s) } diff --git a/controllers/mongodb_users.go b/controllers/mongodb_users.go index 058ae43d8..cd99734ba 100644 --- a/controllers/mongodb_users.go +++ b/controllers/mongodb_users.go @@ -1,6 +1,7 @@ package controllers import ( + "context" "fmt" mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" @@ -12,15 +13,15 @@ import ( // ensureUserResources will check that the configured user password secrets can be found // and will start monitor them so that the reconcile process is triggered every time these secrets are updated -func (r ReplicaSetReconciler) ensureUserResources(mdb mdbv1.MongoDBCommunity) error { +func (r ReplicaSetReconciler) ensureUserResources(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { for _, user := range mdb.GetAuthUsers() { if user.Database != constants.ExternalDB { secretNamespacedName := types.NamespacedName{Name: user.PasswordSecretName, Namespace: mdb.Namespace} - if _, err := secret.ReadKey(r.client, user.PasswordSecretKey, secretNamespacedName); err != nil { + if _, err := secret.ReadKey(ctx, r.client, user.PasswordSecretKey, secretNamespacedName); err != nil { if apiErrors.IsNotFound(err) { // check for SCRAM secret as well scramSecretName := types.NamespacedName{Name: user.ScramCredentialsSecretName, Namespace: mdb.Namespace} - _, err = r.client.GetSecret(scramSecretName) + _, err = r.client.GetSecret(ctx, scramSecretName) if apiErrors.IsNotFound(err) { return fmt.Errorf(`user password secret: %s and scram secret: %s not found`, secretNamespacedName, scramSecretName) } @@ -29,7 +30,7 @@ func (r ReplicaSetReconciler) ensureUserResources(mdb mdbv1.MongoDBCommunity) er } return err } - r.secretWatcher.Watch(secretNamespacedName, mdb.NamespacedName()) + r.secretWatcher.Watch(ctx, secretNamespacedName, mdb.NamespacedName()) } } @@ -38,12 +39,18 @@ func (r ReplicaSetReconciler) ensureUserResources(mdb mdbv1.MongoDBCommunity) er // updateConnectionStringSecrets updates secrets where user specific connection strings are stored. // The client applications can mount these secrets and connect to the mongodb cluster -func (r ReplicaSetReconciler) updateConnectionStringSecrets(mdb mdbv1.MongoDBCommunity, clusterDomain string) error { +func (r ReplicaSetReconciler) updateConnectionStringSecrets(ctx context.Context, mdb mdbv1.MongoDBCommunity, clusterDomain string) error { for _, user := range mdb.GetAuthUsers() { secretName := user.ConnectionStringSecretName - existingSecret, err := r.client.GetSecret(types.NamespacedName{ + + secretNamespace := mdb.Namespace + if user.ConnectionStringSecretNamespace != "" { + secretNamespace = user.ConnectionStringSecretNamespace + } + + existingSecret, err := r.client.GetSecret(ctx, types.NamespacedName{ Name: secretName, - Namespace: mdb.Namespace, + Namespace: secretNamespace, }) if err != nil && !apiErrors.IsNotFound(err) { return err @@ -56,7 +63,7 @@ func (r ReplicaSetReconciler) updateConnectionStringSecrets(mdb mdbv1.MongoDBCom if user.Database != constants.ExternalDB { secretNamespacedName := types.NamespacedName{Name: user.PasswordSecretName, Namespace: mdb.Namespace} - pwd, err = secret.ReadKey(r.client, user.PasswordSecretKey, secretNamespacedName) + pwd, err = secret.ReadKey(ctx, r.client, user.PasswordSecretKey, secretNamespacedName) if err != nil { return err } @@ -64,7 +71,7 @@ func (r ReplicaSetReconciler) updateConnectionStringSecrets(mdb mdbv1.MongoDBCom connectionStringSecret := secret.Builder(). SetName(secretName). - SetNamespace(mdb.Namespace). + SetNamespace(secretNamespace). SetField("connectionString.standard", mdb.MongoAuthUserURI(user, pwd, clusterDomain)). SetField("connectionString.standardSrv", mdb.MongoAuthUserSRVURI(user, pwd, clusterDomain)). SetField("username", user.Username). @@ -72,9 +79,12 @@ func (r ReplicaSetReconciler) updateConnectionStringSecrets(mdb mdbv1.MongoDBCom SetOwnerReferences(mdb.GetOwnerReferences()). Build() - if err := secret.CreateOrUpdate(r.client, connectionStringSecret); err != nil { + if err := secret.CreateOrUpdate(ctx, r.client, connectionStringSecret); err != nil { return err } + + secretNamespacedName := types.NamespacedName{Name: connectionStringSecret.Name, Namespace: connectionStringSecret.Namespace} + r.secretWatcher.Watch(ctx, secretNamespacedName, mdb.NamespacedName()) } return nil diff --git a/controllers/prometheus.go b/controllers/prometheus.go index 8e6151908..cebe939fe 100644 --- a/controllers/prometheus.go +++ b/controllers/prometheus.go @@ -1,6 +1,7 @@ package controllers import ( + "context" "fmt" corev1 "k8s.io/api/core/v1" @@ -19,13 +20,13 @@ const ( ) // PrometheusModification adds Prometheus configuration to AutomationConfig. -func getPrometheusModification(getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) (automationconfig.Modification, error) { +func getPrometheusModification(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) (automationconfig.Modification, error) { if mdb.Spec.Prometheus == nil { return automationconfig.NOOP(), nil } secretNamespacedName := types.NamespacedName{Name: mdb.Spec.Prometheus.PasswordSecretRef.Name, Namespace: mdb.Namespace} - password, err := secret.ReadKey(getUpdateCreator, mdb.Spec.Prometheus.GetPasswordKey(), secretNamespacedName) + password, err := secret.ReadKey(ctx, getUpdateCreator, mdb.Spec.Prometheus.GetPasswordKey(), secretNamespacedName) if err != nil { return automationconfig.NOOP(), fmt.Errorf("could not configure Prometheus modification: %s", err) } @@ -35,7 +36,7 @@ func getPrometheusModification(getUpdateCreator secret.GetUpdateCreator, mdb mdb var scheme string if mdb.Spec.Prometheus.TLSSecretRef.Name != "" { - certKey, err = getPemOrConcatenatedCrtAndKey(getUpdateCreator, mdb, mdb.PrometheusTLSSecretNamespacedName()) + certKey, err = getPemOrConcatenatedCrtAndKey(ctx, getUpdateCreator, mdb.PrometheusTLSSecretNamespacedName()) if err != nil { return automationconfig.NOOP(), err } diff --git a/controllers/replica_set_controller.go b/controllers/replica_set_controller.go index 1687244b0..cf3e9d526 100644 --- a/controllers/replica_set_controller.go +++ b/controllers/replica_set_controller.go @@ -4,28 +4,10 @@ import ( "context" "encoding/json" "fmt" - "github.com/blang/semver" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" "os" "strconv" "strings" - "go.uber.org/zap" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apiErrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - k8sClient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/imdario/mergo" mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" @@ -47,6 +29,20 @@ import ( "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" "github.com/stretchr/objx" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + k8sClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) const ( @@ -54,8 +50,6 @@ const ( lastSuccessfulConfiguration = "mongodb.com/v1.lastSuccessfulConfiguration" lastAppliedMongoDBVersion = "mongodb.com/v1.lastAppliedMongoDBVersion" - - ignoreMdb7ErrorEnvVar = "IGNORE_MDB_7_ERROR" ) func init() { @@ -66,7 +60,7 @@ func init() { zap.ReplaceGlobals(logger) } -func NewReconciler(mgr manager.Manager) *ReplicaSetReconciler { +func NewReconciler(mgr manager.Manager, mongodbRepoUrl, mongodbImage, mongodbImageType, agentImage, versionUpgradeHookImage, readinessProbeImage string) *ReplicaSetReconciler { mgrClient := mgr.GetClient() secretWatcher := watch.New() configMapWatcher := watch.New() @@ -76,6 +70,13 @@ func NewReconciler(mgr manager.Manager) *ReplicaSetReconciler { log: zap.S(), secretWatcher: &secretWatcher, configMapWatcher: &configMapWatcher, + + mongodbRepoUrl: mongodbRepoUrl, + mongodbImage: mongodbImage, + mongodbImageType: mongodbImageType, + agentImage: agentImage, + versionUpgradeHookImage: versionUpgradeHookImage, + readinessProbeImage: readinessProbeImage, } } @@ -84,8 +85,8 @@ func (r *ReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). WithOptions(controller.Options{MaxConcurrentReconciles: 3}). For(&mdbv1.MongoDBCommunity{}, builder.WithPredicates(predicates.OnlyOnSpecChange())). - Watches(&source.Kind{Type: &corev1.Secret{}}, r.secretWatcher). - Watches(&source.Kind{Type: &corev1.ConfigMap{}}, r.configMapWatcher). + Watches(&corev1.Secret{}, r.secretWatcher). + Watches(&corev1.ConfigMap{}, r.configMapWatcher). Owns(&appsv1.StatefulSet{}). Complete(r) } @@ -99,6 +100,13 @@ type ReplicaSetReconciler struct { log *zap.SugaredLogger secretWatcher *watch.ResourceWatcher configMapWatcher *watch.ResourceWatcher + + mongodbRepoUrl string + mongodbImage string + mongodbImageType string + agentImage string + versionUpgradeHookImage string + readinessProbeImage string } // +kubebuilder:rbac:groups=mongodbcommunity.mongodb.com,resources=mongodbcommunity,verbs=get;list;watch;create;update;patch;delete @@ -117,7 +125,7 @@ func (r ReplicaSetReconciler) Reconcile(ctx context.Context, request reconcile.R // TODO: generalize preparation for resource // Fetch the MongoDB instance mdb := mdbv1.MongoDBCommunity{} - err := r.client.Get(context.TODO(), request.NamespacedName, &mdb) + err := r.client.Get(ctx, request.NamespacedName, &mdb) if err != nil { if apiErrors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. @@ -134,129 +142,107 @@ func (r ReplicaSetReconciler) Reconcile(ctx context.Context, request reconcile.R r.log.Infof("Reconciling MongoDB") r.log.Debug("Validating MongoDB.Spec") - err, lastAppliedSpec := r.validateSpec(mdb) + lastAppliedSpec, err := r.validateSpec(mdb) if err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("error validating new Spec: %s", err)). - withFailedPhase(), - ) + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("error validating new Spec: %s", err)). + withFailedPhase()) } r.log.Debug("Ensuring the service exists") - if err := r.ensureService(mdb); err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error ensuring the service (members) exists: %s", err)). - withFailedPhase(), - ) + if err := r.ensureService(ctx, mdb); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring the service (members) exists: %s", err)). + withFailedPhase()) } - isTLSValid, err := r.validateTLSConfig(mdb) + isTLSValid, err := r.validateTLSConfig(ctx, mdb) if err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error validating TLS config: %s", err)). - withFailedPhase(), - ) + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error validating TLS config: %s", err)). + withFailedPhase()) } if !isTLSValid { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Info, "TLS config is not yet valid, retrying in 10 seconds"). - withPendingPhase(10), - ) + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Info, "TLS config is not yet valid, retrying in 10 seconds"). + withPendingPhase(10)) } - if err := r.ensureTLSResources(mdb); err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error ensuring TLS resources: %s", err)). - withFailedPhase(), - ) + if err := r.ensureTLSResources(ctx, mdb); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring TLS resources: %s", err)). + withFailedPhase()) } - if err := r.ensurePrometheusTLSResources(mdb); err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error ensuring TLS resources: %s", err)). - withFailedPhase(), - ) + if err := r.ensurePrometheusTLSResources(ctx, mdb); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring TLS resources: %s", err)). + withFailedPhase()) } - if err := r.ensureUserResources(mdb); err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error ensuring User config: %s", err)). - withFailedPhase(), - ) + if err := r.ensureUserResources(ctx, mdb); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring User config: %s", err)). + withFailedPhase()) } - ready, err := r.deployMongoDBReplicaSet(mdb) + ready, err := r.deployMongoDBReplicaSet(ctx, mdb, lastAppliedSpec) if err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error deploying MongoDB ReplicaSet: %s", err)). - withFailedPhase(), - ) + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error deploying MongoDB ReplicaSet: %s", err)). + withFailedPhase()) } if !ready { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Info, "ReplicaSet is not yet ready, retrying in 10 seconds"). - withPendingPhase(10), - ) + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Info, "ReplicaSet is not yet ready, retrying in 10 seconds"). + withPendingPhase(10)) } r.log.Debug("Resetting StatefulSet UpdateStrategy to RollingUpdate") - if err := statefulset.ResetUpdateStrategy(&mdb, r.client); err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error resetting StatefulSet UpdateStrategyType: %s", err)). - withFailedPhase(), - ) + if err := statefulset.ResetUpdateStrategy(ctx, &mdb, r.client); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error resetting StatefulSet UpdateStrategyType: %s", err)). + withFailedPhase()) } if mdb.IsStillScaling() { - return status.Update(r.client.Status(), &mdb, statusOptions(). + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Info, fmt.Sprintf("Performing scaling operation, currentMembers=%d, desiredMembers=%d", mdb.CurrentReplicas(), mdb.DesiredReplicas())). withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). withStatefulSetArbiters(mdb.StatefulSetArbitersThisReconciliation()). withMongoDBArbiters(mdb.AutomationConfigArbitersThisReconciliation()). - withPendingPhase(10), - ) - } - - res, err := status.Update(r.client.Status(), &mdb, - statusOptions(). - withMongoURI(mdb.MongoURI(os.Getenv(clusterDomain))). - withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). - withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). - withStatefulSetArbiters(mdb.StatefulSetArbitersThisReconciliation()). - withMongoDBArbiters(mdb.AutomationConfigArbitersThisReconciliation()). - withMessage(None, ""). - withRunningPhase(). - withVersion(mdb.GetMongoDBVersion()), - ) + withPendingPhase(10)) + } + + res, err := status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMongoURI(mdb.MongoURI(os.Getenv(clusterDomain))). // nolint:forbidigo + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). + withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). + withStatefulSetArbiters(mdb.StatefulSetArbitersThisReconciliation()). + withMongoDBArbiters(mdb.AutomationConfigArbitersThisReconciliation()). + withMessage(None, ""). + withRunningPhase(). + withVersion(mdb.GetMongoDBVersion())) if err != nil { r.log.Errorf("Error updating the status of the MongoDB resource: %s", err) return res, err } - if err := r.updateConnectionStringSecrets(mdb, os.Getenv(clusterDomain)); err != nil { + if err := r.updateConnectionStringSecrets(ctx, mdb, os.Getenv(clusterDomain)); err != nil { // nolint:forbidigo r.log.Errorf("Could not update connection string secrets: %s", err) } if lastAppliedSpec != nil { - r.cleanupScramSecrets(mdb.Spec, *lastAppliedSpec, mdb.Namespace) - r.cleanupPemSecret(mdb.Spec, *lastAppliedSpec, mdb.Namespace) + r.cleanupScramSecrets(ctx, mdb.Spec, *lastAppliedSpec, mdb.Namespace) + r.cleanupPemSecret(ctx, mdb.Spec, *lastAppliedSpec, mdb.Namespace) + r.cleanupConnectionStringSecrets(ctx, mdb.Spec, *lastAppliedSpec, mdb.Namespace, mdb.Name) } - if err := r.updateLastSuccessfulConfiguration(mdb); err != nil { + if err := r.updateLastSuccessfulConfiguration(ctx, mdb); err != nil { r.log.Errorf("Could not save current spec as an annotation: %s", err) } @@ -270,7 +256,7 @@ func (r ReplicaSetReconciler) Reconcile(ctx context.Context, request reconcile.R } // updateLastSuccessfulConfiguration annotates the MongoDBCommunity resource with the latest configuration -func (r *ReplicaSetReconciler) updateLastSuccessfulConfiguration(mdb mdbv1.MongoDBCommunity) error { +func (r *ReplicaSetReconciler) updateLastSuccessfulConfiguration(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { currentSpec, err := json.Marshal(mdb.Spec) if err != nil { return err @@ -282,12 +268,12 @@ func (r *ReplicaSetReconciler) updateLastSuccessfulConfiguration(mdb mdbv1.Mongo // This is needed to reuse the update strategy logic in enterprise lastAppliedMongoDBVersion: mdb.Spec.Version, } - return annotations.SetAnnotations(&mdb, specAnnotations, r.client) + return annotations.SetAnnotations(ctx, &mdb, specAnnotations, r.client) } // ensureTLSResources creates any required TLS resources that the MongoDBCommunity // requires for TLS configuration. -func (r *ReplicaSetReconciler) ensureTLSResources(mdb mdbv1.MongoDBCommunity) error { +func (r *ReplicaSetReconciler) ensureTLSResources(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { if !mdb.Spec.Security.TLS.Enabled { return nil } @@ -295,16 +281,16 @@ func (r *ReplicaSetReconciler) ensureTLSResources(mdb mdbv1.MongoDBCommunity) er // require the contents. if mdb.Spec.Security.TLS.Enabled { r.log.Infof("TLS is enabled, creating/updating CA secret") - if err := ensureCASecret(r.client, r.client, r.client, mdb); err != nil { + if err := ensureCASecret(ctx, r.client, r.client, r.client, mdb); err != nil { return fmt.Errorf("could not ensure CA secret: %s", err) } r.log.Infof("TLS is enabled, creating/updating TLS secret") - if err := ensureTLSSecret(r.client, mdb); err != nil { + if err := ensureTLSSecret(ctx, r.client, mdb); err != nil { return fmt.Errorf("could not ensure TLS secret: %s", err) } if mdb.Spec.IsAgentX509() { r.log.Infof("Agent X509 authentication is enabled, creating/updating agent certificate secret") - if err := ensureAgentCertSecret(r.client, mdb); err != nil { + if err := ensureAgentCertSecret(ctx, r.client, mdb); err != nil { return fmt.Errorf("could not ensure Agent Certificate secret: %s", err) } } @@ -314,7 +300,7 @@ func (r *ReplicaSetReconciler) ensureTLSResources(mdb mdbv1.MongoDBCommunity) er // ensurePrometheusTLSResources creates any required TLS resources that the MongoDBCommunity // requires for TLS configuration. -func (r *ReplicaSetReconciler) ensurePrometheusTLSResources(mdb mdbv1.MongoDBCommunity) error { +func (r *ReplicaSetReconciler) ensurePrometheusTLSResources(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { if mdb.Spec.Prometheus == nil || mdb.Spec.Prometheus.TLSSecretRef.Name == "" { return nil } @@ -322,7 +308,7 @@ func (r *ReplicaSetReconciler) ensurePrometheusTLSResources(mdb mdbv1.MongoDBCom // the TLS secret needs to be created beforehand, as both the StatefulSet and AutomationConfig // require the contents. r.log.Infof("Prometheus TLS is enabled, creating/updating TLS secret") - if err := ensurePrometheusTLSSecret(r.client, mdb); err != nil { + if err := ensurePrometheusTLSSecret(ctx, r.client, mdb); err != nil { return fmt.Errorf("could not ensure TLS secret: %s", err) } @@ -335,18 +321,18 @@ func (r *ReplicaSetReconciler) ensurePrometheusTLSResources(mdb mdbv1.MongoDBCom // of Pods corresponding to the amount of expected arbiters. // // The returned boolean indicates that the StatefulSet is ready. -func (r *ReplicaSetReconciler) deployStatefulSet(mdb mdbv1.MongoDBCommunity) (bool, error) { +func (r *ReplicaSetReconciler) deployStatefulSet(ctx context.Context, mdb mdbv1.MongoDBCommunity) (bool, error) { r.log.Info("Creating/Updating StatefulSet") - if err := r.createOrUpdateStatefulSet(mdb, false); err != nil { + if err := r.createOrUpdateStatefulSet(ctx, mdb, false); err != nil { return false, fmt.Errorf("error creating/updating StatefulSet: %s", err) } r.log.Info("Creating/Updating StatefulSet for Arbiters") - if err := r.createOrUpdateStatefulSet(mdb, true); err != nil { + if err := r.createOrUpdateStatefulSet(ctx, mdb, true); err != nil { return false, fmt.Errorf("error creating/updating StatefulSet: %s", err) } - currentSts, err := r.client.GetStatefulSet(mdb.NamespacedName()) + currentSts, err := r.client.GetStatefulSet(ctx, mdb.NamespacedName()) if err != nil { return false, fmt.Errorf("error getting StatefulSet: %s", err) } @@ -360,15 +346,15 @@ func (r *ReplicaSetReconciler) deployStatefulSet(mdb mdbv1.MongoDBCommunity) (bo // deployAutomationConfig deploys the AutomationConfig for the MongoDBCommunity resource. // The returned boolean indicates whether or not that Agents have all reached goal state. -func (r *ReplicaSetReconciler) deployAutomationConfig(mdb mdbv1.MongoDBCommunity) (bool, error) { +func (r *ReplicaSetReconciler) deployAutomationConfig(ctx context.Context, mdb mdbv1.MongoDBCommunity, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) (bool, error) { r.log.Infof("Creating/Updating AutomationConfig") - sts, err := r.client.GetStatefulSet(mdb.NamespacedName()) + sts, err := r.client.GetStatefulSet(ctx, mdb.NamespacedName()) if err != nil && !apiErrors.IsNotFound(err) { return false, fmt.Errorf("failed to get StatefulSet: %s", err) } - ac, err := r.ensureAutomationConfig(mdb) + ac, err := r.ensureAutomationConfig(mdb, ctx, lastAppliedSpec) if err != nil { return false, fmt.Errorf("failed to ensure AutomationConfig: %s", err) } @@ -387,7 +373,7 @@ func (r *ReplicaSetReconciler) deployAutomationConfig(mdb mdbv1.MongoDBCommunity r.log.Debugf("Waiting for agents to reach version %d", ac.Version) // Note: we pass in the expected number of replicas this reconciliation as we scale members one at a time. If we were // to pass in the final member count, we would be waiting for agents that do not exist yet to be ready. - ready, err := agent.AllReachedGoalState(sts, r.client, mdb.StatefulSetReplicasThisReconciliation(), ac.Version, r.log) + ready, err := agent.AllReachedGoalState(ctx, sts, r.client, mdb.StatefulSetReplicasThisReconciliation(), ac.Version, r.log) if err != nil { return false, fmt.Errorf("failed to ensure agents have reached goal state: %s", err) } @@ -397,9 +383,9 @@ func (r *ReplicaSetReconciler) deployAutomationConfig(mdb mdbv1.MongoDBCommunity // shouldRunInOrder returns true if the order of execution of the AutomationConfig & StatefulSet // functions should be sequential or not. A value of false indicates they will run in reversed order. -func (r *ReplicaSetReconciler) shouldRunInOrder(mdb mdbv1.MongoDBCommunity) bool { +func (r *ReplicaSetReconciler) shouldRunInOrder(ctx context.Context, mdb mdbv1.MongoDBCommunity) bool { // The only case when we push the StatefulSet first is when we are ensuring TLS for the already existing ReplicaSet - sts, err := r.client.GetStatefulSet(mdb.NamespacedName()) + sts, err := r.client.GetStatefulSet(ctx, mdb.NamespacedName()) if !statefulset.IsReady(sts, mdb.StatefulSetReplicasThisReconciliation()) && mdb.Spec.Security.TLS.Enabled { r.log.Debug("Enabling TLS on a deployment with a StatefulSet that is not Ready, the Automation Config must be updated first") return true @@ -437,13 +423,13 @@ func (r *ReplicaSetReconciler) shouldRunInOrder(mdb mdbv1.MongoDBCommunity) bool // deployMongoDBReplicaSet will ensure that both the AutomationConfig secret and backing StatefulSet // have been successfully created. A boolean is returned indicating if the process is complete // and an error if there was one. -func (r *ReplicaSetReconciler) deployMongoDBReplicaSet(mdb mdbv1.MongoDBCommunity) (bool, error) { - return functions.RunSequentially(r.shouldRunInOrder(mdb), +func (r *ReplicaSetReconciler) deployMongoDBReplicaSet(ctx context.Context, mdb mdbv1.MongoDBCommunity, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) (bool, error) { + return functions.RunSequentially(r.shouldRunInOrder(ctx, mdb), func() (bool, error) { - return r.deployAutomationConfig(mdb) + return r.deployAutomationConfig(ctx, mdb, lastAppliedSpec) }, func() (bool, error) { - return r.deployStatefulSet(mdb) + return r.deployStatefulSet(ctx, mdb) }) } @@ -451,14 +437,14 @@ func (r *ReplicaSetReconciler) deployMongoDBReplicaSet(mdb mdbv1.MongoDBCommunit // // The Service definition is built from the `mdb` resource. If `isArbiter` is set to true, the Service // will be created for the arbiters Statefulset. -func (r *ReplicaSetReconciler) ensureService(mdb mdbv1.MongoDBCommunity) error { - processPortManager, err := r.createProcessPortManager(mdb) +func (r *ReplicaSetReconciler) ensureService(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { + processPortManager, err := r.createProcessPortManager(ctx, mdb) if err != nil { return err } svc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: mdb.ServiceName(), Namespace: mdb.Namespace}} - op, err := controllerutil.CreateOrUpdate(context.TODO(), r.client, svc, func() error { + op, err := controllerutil.CreateOrUpdate(ctx, r.client, svc, func() error { resourceVersion := svc.ResourceVersion // Save resourceVersion for later *svc = r.buildService(mdb, processPortManager) svc.ResourceVersion = resourceVersion @@ -477,13 +463,13 @@ func (r *ReplicaSetReconciler) ensureService(mdb mdbv1.MongoDBCommunity) error { // createProcessPortManager is a helper method for creating new ReplicaSetPortManager. // ReplicaSetPortManager needs current automation config and current pod state and the code for getting them // was extracted here as it is used in ensureService and buildAutomationConfig. -func (r *ReplicaSetReconciler) createProcessPortManager(mdb mdbv1.MongoDBCommunity) (*agent.ReplicaSetPortManager, error) { - currentAC, err := automationconfig.ReadFromSecret(r.client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) +func (r *ReplicaSetReconciler) createProcessPortManager(ctx context.Context, mdb mdbv1.MongoDBCommunity) (*agent.ReplicaSetPortManager, error) { + currentAC, err := automationconfig.ReadFromSecret(ctx, r.client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) if err != nil { return nil, fmt.Errorf("could not read existing automation config: %s", err) } - currentPodStates, err := agent.GetAllDesiredMembersAndArbitersPodState(mdb.NamespacedName(), r.client, mdb.StatefulSetReplicasThisReconciliation(), mdb.StatefulSetArbitersThisReconciliation(), currentAC.Version, r.log) + currentPodStates, err := agent.GetAllDesiredMembersAndArbitersPodState(ctx, mdb.NamespacedName(), r.client, mdb.StatefulSetReplicasThisReconciliation(), mdb.StatefulSetArbitersThisReconciliation(), currentAC.Version, r.log) if err != nil { return nil, fmt.Errorf("cannot get all pods goal state: %w", err) } @@ -491,7 +477,7 @@ func (r *ReplicaSetReconciler) createProcessPortManager(mdb mdbv1.MongoDBCommuni return agent.NewReplicaSetPortManager(r.log, mdb.Spec.AdditionalMongodConfig.GetDBPort(), currentPodStates, currentAC.Processes), nil } -func (r *ReplicaSetReconciler) createOrUpdateStatefulSet(mdb mdbv1.MongoDBCommunity, isArbiter bool) error { +func (r *ReplicaSetReconciler) createOrUpdateStatefulSet(ctx context.Context, mdb mdbv1.MongoDBCommunity, isArbiter bool) error { set := appsv1.StatefulSet{} name := mdb.NamespacedName() @@ -499,18 +485,19 @@ func (r *ReplicaSetReconciler) createOrUpdateStatefulSet(mdb mdbv1.MongoDBCommun name = mdb.ArbiterNamespacedName() } - err := r.client.Get(context.TODO(), name, &set) + err := r.client.Get(ctx, name, &set) err = k8sClient.IgnoreNotFound(err) if err != nil { return fmt.Errorf("error getting StatefulSet: %s", err) } - buildStatefulSetModificationFunction(mdb)(&set) + mongodbImage := getMongoDBImage(r.mongodbRepoUrl, r.mongodbImage, r.mongodbImageType, mdb.GetMongoDBVersion()) + buildStatefulSetModificationFunction(mdb, mongodbImage, r.agentImage, r.versionUpgradeHookImage, r.readinessProbeImage)(&set) if isArbiter { buildArbitersModificationFunction(mdb)(&set) } - if _, err = statefulset.CreateOrUpdate(r.client, set); err != nil { + if _, err = statefulset.CreateOrUpdate(ctx, r.client, set); err != nil { return fmt.Errorf("error creating/updating StatefulSet: %s", err) } return nil @@ -518,23 +505,18 @@ func (r *ReplicaSetReconciler) createOrUpdateStatefulSet(mdb mdbv1.MongoDBCommun // ensureAutomationConfig makes sure the AutomationConfig secret has been successfully created. The automation config // that was updated/created is returned. -func (r ReplicaSetReconciler) ensureAutomationConfig(mdb mdbv1.MongoDBCommunity) (automationconfig.AutomationConfig, error) { - ac, err := r.buildAutomationConfig(mdb) +func (r ReplicaSetReconciler) ensureAutomationConfig(mdb mdbv1.MongoDBCommunity, ctx context.Context, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) (automationconfig.AutomationConfig, error) { + ac, err := r.buildAutomationConfig(ctx, mdb, lastAppliedSpec) if err != nil { return automationconfig.AutomationConfig{}, fmt.Errorf("could not build automation config: %s", err) } - return automationconfig.EnsureSecret( - r.client, - types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, - mdb.GetOwnerReferences(), - ac, - ) + return automationconfig.EnsureSecret(ctx, r.client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, mdb.GetOwnerReferences(), ac) } -func buildAutomationConfig(mdb mdbv1.MongoDBCommunity, auth automationconfig.Auth, currentAc automationconfig.AutomationConfig, modifications ...automationconfig.Modification) (automationconfig.AutomationConfig, error) { - domain := getDomain(mdb.ServiceName(), mdb.Namespace, os.Getenv(clusterDomain)) - arbiterDomain := getDomain(mdb.ServiceName(), mdb.Namespace, os.Getenv(clusterDomain)) +func buildAutomationConfig(mdb mdbv1.MongoDBCommunity, isEnterprise bool, auth automationconfig.Auth, currentAc automationconfig.AutomationConfig, modifications ...automationconfig.Modification) (automationconfig.AutomationConfig, error) { + domain := getDomain(mdb.ServiceName(), mdb.Namespace, os.Getenv(clusterDomain)) // nolint:forbidigo + arbiterDomain := getDomain(mdb.ServiceName(), mdb.Namespace, os.Getenv(clusterDomain)) // nolint:forbidigo zap.S().Debugw("AutomationConfigMembersThisReconciliation", "mdb.AutomationConfigMembersThisReconciliation()", mdb.AutomationConfigMembersThisReconciliation()) @@ -544,8 +526,15 @@ func buildAutomationConfig(mdb mdbv1.MongoDBCommunity, auth automationconfig.Aut arbitersCount = mdb.Status.CurrentMongoDBArbiters } + var acOverrideSettings map[string]interface{} + var acReplicaSetId *string + if mdb.Spec.AutomationConfigOverride != nil { + acOverrideSettings = mdb.Spec.AutomationConfigOverride.ReplicaSet.Settings.Object + acReplicaSetId = mdb.Spec.AutomationConfigOverride.ReplicaSet.Id + } + return automationconfig.NewBuilder(). - IsEnterprise(guessEnterprise(mdb)). + IsEnterprise(isEnterprise). SetTopology(automationconfig.ReplicaSetTopology). SetName(mdb.Name). SetDomain(domain). @@ -558,17 +547,20 @@ func buildAutomationConfig(mdb mdbv1.MongoDBCommunity, auth automationconfig.Aut SetFCV(mdb.Spec.FeatureCompatibilityVersion). SetOptions(automationconfig.Options{DownloadBase: "/var/lib/mongodb-mms-automation"}). SetAuth(auth). + SetReplicaSetId(acReplicaSetId). + SetSettings(acOverrideSettings). + SetMemberOptions(mdb.Spec.MemberConfig). SetDataDir(mdb.GetMongodConfiguration().GetDBDataDir()). AddModifications(getMongodConfigModification(mdb)). AddModifications(modifications...). AddProcessModification(func(_ int, p *automationconfig.Process) { - automationconfig.ConfigureAgentConfiguration(mdb.Spec.AgentConfiguration.SystemLog, mdb.Spec.AgentConfiguration.LogRotate, p) + automationconfig.ConfigureAgentConfiguration(mdb.Spec.AgentConfiguration.SystemLog, mdb.Spec.AgentConfiguration.LogRotate, mdb.Spec.AgentConfiguration.AuditLogRotate, p) }). Build() } -func guessEnterprise(mdb mdbv1.MongoDBCommunity) bool { - overrideAssumption, err := strconv.ParseBool(os.Getenv(construct.MongoDBAssumeEnterpriseEnv)) +func guessEnterprise(mdb mdbv1.MongoDBCommunity, mongodbImage string) bool { + overrideAssumption, err := strconv.ParseBool(os.Getenv(construct.MongoDBAssumeEnterpriseEnv)) // nolint:forbidigo if err == nil { return overrideAssumption } @@ -585,12 +577,9 @@ func guessEnterprise(mdb mdbv1.MongoDBCommunity) bool { } } if len(overriddenImage) > 0 { - if strings.Contains(overriddenImage, construct.OfficialMongodbEnterpriseServerImageName) { - return true - } - return false + return strings.Contains(overriddenImage, construct.OfficialMongodbEnterpriseServerImageName) } - return os.Getenv(construct.MongodbImageEnv) == construct.OfficialMongodbEnterpriseServerImageName + return mongodbImage == construct.OfficialMongodbEnterpriseServerImageName } // buildService creates a Service that will be used for the Replica Set StatefulSet @@ -625,30 +614,20 @@ func (r *ReplicaSetReconciler) buildService(mdb mdbv1.MongoDBCommunity, portMana // If there has not yet been a successful configuration, the function runs the initial Spec validations. Otherwise, // it checks that the attempted Spec is valid in relation to the Spec that resulted from that last successful configuration. // The validation also returns the lastSuccessFulConfiguration Spec as mdbv1.MongoDBCommunitySpec. -func (r ReplicaSetReconciler) validateSpec(mdb mdbv1.MongoDBCommunity) (error, *mdbv1.MongoDBCommunitySpec) { - if !envvar.ReadBool(ignoreMdb7ErrorEnvVar) { - semverVersion, err := semver.Make(mdb.Spec.Version) - if err != nil { - r.log.Warnf("could not parse version %v", mdb.Spec.Version) - } else { - if semverVersion.Major >= 7 { - return fmt.Errorf("mongodb >= 7.0.0 is not supported"), nil - } - } - } +func (r ReplicaSetReconciler) validateSpec(mdb mdbv1.MongoDBCommunity) (*mdbv1.MongoDBCommunitySpec, error) { lastSuccessfulConfigurationSaved, ok := mdb.Annotations[lastSuccessfulConfiguration] if !ok { // First version of Spec - return validation.ValidateInitialSpec(mdb, r.log), nil + return nil, validation.ValidateInitialSpec(mdb, r.log) } lastSpec := mdbv1.MongoDBCommunitySpec{} err := json.Unmarshal([]byte(lastSuccessfulConfigurationSaved), &lastSpec) if err != nil { - return err, &lastSpec + return &lastSpec, err } - return validation.ValidateUpdate(mdb, lastSpec, r.log), &lastSpec + return &lastSpec, validation.ValidateUpdate(mdb, lastSpec, r.log) } func getCustomRolesModification(mdb mdbv1.MongoDBCommunity) (automationconfig.Modification, error) { @@ -662,8 +641,8 @@ func getCustomRolesModification(mdb mdbv1.MongoDBCommunity) (automationconfig.Mo }, nil } -func (r ReplicaSetReconciler) buildAutomationConfig(mdb mdbv1.MongoDBCommunity) (automationconfig.AutomationConfig, error) { - tlsModification, err := getTLSConfigModification(r.client, r.client, mdb) +func (r ReplicaSetReconciler) buildAutomationConfig(ctx context.Context, mdb mdbv1.MongoDBCommunity, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) (automationconfig.AutomationConfig, error) { + tlsModification, err := getTLSConfigModification(ctx, r.client, r.client, mdb) if err != nil { return automationconfig.AutomationConfig{}, fmt.Errorf("could not configure TLS modification: %s", err) } @@ -673,39 +652,44 @@ func (r ReplicaSetReconciler) buildAutomationConfig(mdb mdbv1.MongoDBCommunity) return automationconfig.AutomationConfig{}, fmt.Errorf("could not configure custom roles: %s", err) } - currentAC, err := automationconfig.ReadFromSecret(r.client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAC, err := automationconfig.ReadFromSecret(ctx, r.client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) if err != nil { return automationconfig.AutomationConfig{}, fmt.Errorf("could not read existing automation config: %s", err) } auth := automationconfig.Auth{} - if err := authentication.Enable(&auth, r.client, &mdb, mdb.AgentCertificateSecretNamespacedName()); err != nil { + if err := authentication.Enable(ctx, &auth, r.client, &mdb, mdb.AgentCertificateSecretNamespacedName()); err != nil { return automationconfig.AutomationConfig{}, err } + if lastAppliedSpec != nil { + authentication.AddRemovedUsers(&auth, mdb, lastAppliedSpec) + } + prometheusModification := automationconfig.NOOP() if mdb.Spec.Prometheus != nil { secretNamespacedName := types.NamespacedName{Name: mdb.Spec.Prometheus.PasswordSecretRef.Name, Namespace: mdb.Namespace} - r.secretWatcher.Watch(secretNamespacedName, mdb.NamespacedName()) + r.secretWatcher.Watch(ctx, secretNamespacedName, mdb.NamespacedName()) - prometheusModification, err = getPrometheusModification(r.client, mdb) + prometheusModification, err = getPrometheusModification(ctx, r.client, mdb) if err != nil { return automationconfig.AutomationConfig{}, fmt.Errorf("could not enable TLS on Prometheus endpoint: %s", err) } } if mdb.Spec.IsAgentX509() { - r.secretWatcher.Watch(mdb.AgentCertificateSecretNamespacedName(), mdb.NamespacedName()) - r.secretWatcher.Watch(mdb.AgentCertificatePemSecretNamespacedName(), mdb.NamespacedName()) + r.secretWatcher.Watch(ctx, mdb.AgentCertificateSecretNamespacedName(), mdb.NamespacedName()) + r.secretWatcher.Watch(ctx, mdb.AgentCertificatePemSecretNamespacedName(), mdb.NamespacedName()) } - processPortManager, err := r.createProcessPortManager(mdb) + processPortManager, err := r.createProcessPortManager(ctx, mdb) if err != nil { return automationconfig.AutomationConfig{}, err } automationConfig, err := buildAutomationConfig( mdb, + guessEnterprise(mdb, r.mongodbImage), auth, currentAC, tlsModification, @@ -755,16 +739,10 @@ func getMongodConfigModification(mdb mdbv1.MongoDBCommunity) automationconfig.Mo } } -// buildStatefulSet takes a MongoDB resource and converts it into +// buildStatefulSetModificationFunction takes a MongoDB resource and converts it into // the corresponding stateful set -func buildStatefulSet(mdb mdbv1.MongoDBCommunity) (appsv1.StatefulSet, error) { - sts := appsv1.StatefulSet{} - buildStatefulSetModificationFunction(mdb)(&sts) - return sts, nil -} - -func buildStatefulSetModificationFunction(mdb mdbv1.MongoDBCommunity) statefulset.Modification { - commonModification := construct.BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb) +func buildStatefulSetModificationFunction(mdb mdbv1.MongoDBCommunity, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage string) statefulset.Modification { + commonModification := construct.BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage, true) return statefulset.Apply( commonModification, statefulset.WithOwnerReference(mdb.GetOwnerReferences()), @@ -804,3 +782,18 @@ func getDomain(service, namespace, clusterName string) string { func isPreReadinessInitContainerStatefulSet(sts appsv1.StatefulSet) bool { return container.GetByName(construct.ReadinessProbeContainerName, sts.Spec.Template.Spec.InitContainers) == nil } + +func getMongoDBImage(repoUrl, mongodbImage, mongodbImageType, version string) string { + if strings.HasSuffix(repoUrl, "/") { + repoUrl = strings.TrimRight(repoUrl, "/") + } + mongoImageName := mongodbImage + for _, officialUrl := range construct.OfficialMongodbRepoUrls { + if repoUrl == officialUrl { + return fmt.Sprintf("%s/%s:%s-%s", repoUrl, mongoImageName, version, mongodbImageType) + } + } + + // This is the old images backwards compatibility code path. + return fmt.Sprintf("%s/%s:%s", repoUrl, mongoImageName, version) +} diff --git a/controllers/replicaset_controller_test.go b/controllers/replicaset_controller_test.go index d0f8ec050..d7f2eb8da 100644 --- a/controllers/replicaset_controller_test.go +++ b/controllers/replicaset_controller_test.go @@ -4,12 +4,13 @@ import ( "context" "encoding/json" "fmt" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/x509" "os" "reflect" "testing" "time" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/x509" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" @@ -45,9 +46,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -func init() { - os.Setenv(construct.AgentImageEnv, "agent-image") -} +const ( + AgentImage = "fake-agentImage" +) func newTestReplicaSet() mdbv1.MongoDBCommunity { return mdbv1.MongoDBCommunity{ @@ -87,6 +88,9 @@ func newTestReplicaSetWithSystemLogAndLogRotate() mdbv1.MongoDBCommunity { LogRotate: &automationconfig.CrdLogRotate{ SizeThresholdMB: "1", }, + AuditLogRotate: &automationconfig.CrdLogRotate{ + SizeThresholdMB: "1", + }, SystemLog: &automationconfig.SystemLog{ Destination: automationconfig.File, Path: "/tmp/test", @@ -153,17 +157,18 @@ func newTestReplicaSetWithTLSCaCertificateReferences(caConfigMap, caCertificateS } func TestKubernetesResources_AreCreated(t *testing.T) { + ctx := context.Background() // TODO: Create builder/yaml fixture of some type to construct MDB objects for unit tests mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) s := corev1.Secret{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, &s) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, &s) assert.NoError(t, err) assert.Equal(t, mdb.Namespace, s.Namespace) assert.Equal(t, mdb.AutomationConfigSecretName(), s.Name) @@ -171,39 +176,24 @@ func TestKubernetesResources_AreCreated(t *testing.T) { assert.NotEmpty(t, s.Data[automationconfig.ConfigKey]) } -func TestKubernetesResources_MongoDB7IsRejected(t *testing.T) { - mdb := newTestReplicaSet() - mdb.Spec.Version = "7.0.0" - - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) - - assert.NoError(t, err) - assert.Equal(t, true, res.Requeue) - assert.Equal(t, time.Duration(0), res.RequeueAfter) -} - func TestStatefulSet_IsCorrectlyConfigured(t *testing.T) { - t.Setenv(construct.MongodbRepoUrl, "docker.io/mongodb") - t.Setenv(construct.MongodbImageEnv, "mongodb-community-server") + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "docker.io/mongodb", "mongodb-community-server", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) sts := appsv1.StatefulSet{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) assert.Len(t, sts.Spec.Template.Spec.Containers, 2) agentContainer := sts.Spec.Template.Spec.Containers[1] assert.Equal(t, construct.AgentName, agentContainer.Name) - assert.Equal(t, os.Getenv(construct.AgentImageEnv), agentContainer.Image) + assert.Equal(t, AgentImage, agentContainer.Image) expectedProbe := probes.New(construct.DefaultReadiness()) assert.True(t, reflect.DeepEqual(&expectedProbe, agentContainer.ReadinessProbe)) @@ -223,66 +213,42 @@ func TestGuessEnterprise(t *testing.T) { type testConfig struct { setArgs func(t *testing.T) mdb mdbv1.MongoDBCommunity + mongodbImage string expectedEnterprise bool } tests := map[string]testConfig{ "No override and Community image": { - setArgs: func(t *testing.T) { - t.Setenv(construct.MongodbRepoUrl, "docker.io/mongodb") - t.Setenv(construct.MongodbImageEnv, "mongodb-community-server") - }, + setArgs: func(t *testing.T) {}, mdb: mdbv1.MongoDBCommunity{}, + mongodbImage: "mongodb-community-server", expectedEnterprise: false, }, "No override and Enterprise image": { - setArgs: func(t *testing.T) { - t.Setenv(construct.MongodbRepoUrl, "docker.io/mongodb") - t.Setenv(construct.MongodbImageEnv, "mongodb-enterprise-server") - }, + setArgs: func(t *testing.T) {}, mdb: mdbv1.MongoDBCommunity{}, + mongodbImage: "mongodb-enterprise-server", expectedEnterprise: true, }, "Assuming enterprise manually": { setArgs: func(t *testing.T) { - t.Setenv(construct.MongodbRepoUrl, "docker.io/mongodb") - t.Setenv(construct.MongodbImageEnv, "mongodb-community-server") t.Setenv(construct.MongoDBAssumeEnterpriseEnv, "true") }, mdb: mdbv1.MongoDBCommunity{}, + mongodbImage: "mongodb-community-server", expectedEnterprise: true, }, "Assuming community manually": { setArgs: func(t *testing.T) { - t.Setenv(construct.MongodbRepoUrl, "docker.io/mongodb") - t.Setenv(construct.MongodbImageEnv, "mongodb-enterprise-server") t.Setenv(construct.MongoDBAssumeEnterpriseEnv, "false") }, mdb: mdbv1.MongoDBCommunity{}, - expectedEnterprise: false, - }, - "Enterprise with different repo": { - setArgs: func(t *testing.T) { - t.Setenv(construct.MongodbRepoUrl, "some_other_repo.com/some_other_org") - t.Setenv(construct.MongodbImageEnv, "mongodb-enterprise-server") - }, - mdb: mdbv1.MongoDBCommunity{}, - expectedEnterprise: true, - }, - "Community with different repo": { - setArgs: func(t *testing.T) { - t.Setenv(construct.MongodbRepoUrl, "some_other_repo.com/some_other_org") - t.Setenv(construct.MongodbImageEnv, "mongodb-community-server") - }, - mdb: mdbv1.MongoDBCommunity{}, + mongodbImage: "mongodb-enterprise-server", expectedEnterprise: false, }, // This one is a corner case. We don't expect users to fall here very often as there are // dedicated variables to control this type of behavior. "Enterprise with StatefulSet override": { - setArgs: func(t *testing.T) { - t.Setenv(construct.MongodbRepoUrl, "some_other_repo.com/some_other_org") - t.Setenv(construct.MongodbImageEnv, "mongodb-community-server") - }, + setArgs: func(t *testing.T) {}, mdb: mdbv1.MongoDBCommunity{ Spec: mdbv1.MongoDBCommunitySpec{ StatefulSetConfiguration: mdbv1.StatefulSetConfiguration{ @@ -303,13 +269,11 @@ func TestGuessEnterprise(t *testing.T) { }, }, }, + mongodbImage: "mongodb-community-server", expectedEnterprise: true, }, "Enterprise with StatefulSet override to Community": { - setArgs: func(t *testing.T) { - t.Setenv(construct.MongodbRepoUrl, "some_other_repo.com/some_other_org") - t.Setenv(construct.MongodbImageEnv, "mongodb-enterprise-server") - }, + setArgs: func(t *testing.T) {}, mdb: mdbv1.MongoDBCommunity{ Spec: mdbv1.MongoDBCommunitySpec{ StatefulSetConfiguration: mdbv1.StatefulSetConfiguration{ @@ -330,6 +294,7 @@ func TestGuessEnterprise(t *testing.T) { }, }, }, + mongodbImage: "mongodb-enterprise-server", expectedEnterprise: false, }, } @@ -337,7 +302,7 @@ func TestGuessEnterprise(t *testing.T) { t.Run(testName, func(t *testing.T) { testConfig := tests[testName] testConfig.setArgs(t) - calculatedEnterprise := guessEnterprise(testConfig.mdb) + calculatedEnterprise := guessEnterprise(testConfig.mdb, testConfig.mongodbImage) assert.Equal(t, testConfig.expectedEnterprise, calculatedEnterprise) }) } @@ -353,39 +318,40 @@ func getVolumeByName(sts appsv1.StatefulSet, volumeName string) (corev1.Volume, } func TestChangingVersion_ResultsInRollingUpdateStrategyType(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) + mgr := client.NewManager(ctx, &mdb) mgrClient := mgr.GetClient() - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) assertReconciliationSuccessful(t, res, err) // fetch updated resource after first reconciliation - _ = mgrClient.Get(context.TODO(), mdb.NamespacedName(), &mdb) + _ = mgrClient.Get(ctx, mdb.NamespacedName(), &mdb) sts := appsv1.StatefulSet{} - err = mgrClient.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgrClient.Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) mdbRef := &mdb mdbRef.Spec.Version = "4.2.3" - _ = mgrClient.Update(context.TODO(), &mdb) + _ = mgrClient.Update(ctx, &mdb) // agents start the upgrade, they are not all ready sts.Status.UpdatedReplicas = 1 sts.Status.ReadyReplicas = 2 - err = mgrClient.Update(context.TODO(), &sts) + err = mgrClient.Update(ctx, &sts) assert.NoError(t, err) - _ = mgrClient.Get(context.TODO(), mdb.NamespacedName(), &sts) + _ = mgrClient.Get(ctx, mdb.NamespacedName(), &sts) // reconcilliation is successful - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) sts = appsv1.StatefulSet{} - err = mgrClient.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgrClient.Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type, @@ -397,16 +363,16 @@ func TestBuildStatefulSet_ConfiguresUpdateStrategyCorrectly(t *testing.T) { mdb := newTestReplicaSet() mdb.Spec.Version = "4.0.0" mdb.Annotations[annotations.LastAppliedMongoDBVersion] = "4.0.0" - sts, err := buildStatefulSet(mdb) - assert.NoError(t, err) + sts := appsv1.StatefulSet{} + buildStatefulSetModificationFunction(mdb, "fake-mongodbImage", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage")(&sts) assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) }) t.Run("On No Version Change, First Version", func(t *testing.T) { mdb := newTestReplicaSet() mdb.Spec.Version = "4.0.0" delete(mdb.Annotations, annotations.LastAppliedMongoDBVersion) - sts, err := buildStatefulSet(mdb) - assert.NoError(t, err) + sts := appsv1.StatefulSet{} + buildStatefulSetModificationFunction(mdb, "fake-mongodbImage", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage")(&sts) assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) }) t.Run("On Version Change", func(t *testing.T) { @@ -422,67 +388,68 @@ func TestBuildStatefulSet_ConfiguresUpdateStrategyCorrectly(t *testing.T) { assert.NoError(t, err) mdb.Annotations[annotations.LastAppliedMongoDBVersion] = string(bytes) - sts, err := buildStatefulSet(mdb) - - assert.NoError(t, err) + sts := appsv1.StatefulSet{} + buildStatefulSetModificationFunction(mdb, "fake-mongodbImage", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage")(&sts) assert.Equal(t, appsv1.OnDeleteStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) }) } func TestService_isCorrectlyCreatedAndUpdated(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) svc := corev1.Service{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) assert.NoError(t, err) assert.Equal(t, svc.Spec.Type, corev1.ServiceTypeClusterIP) assert.Equal(t, svc.Spec.Selector["app"], mdb.ServiceName()) assert.Len(t, svc.Spec.Ports, 1) assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 27017, Name: "mongodb"}) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) } func TestService_usesCustomMongodPortWhenSpecified(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() mongodConfig := objx.New(map[string]interface{}{}) mongodConfig.Set("net.port", 1000.) mdb.Spec.AdditionalMongodConfig.Object = mongodConfig - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) svc := corev1.Service{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) assert.NoError(t, err) assert.Equal(t, svc.Spec.Type, corev1.ServiceTypeClusterIP) assert.Equal(t, svc.Spec.Selector["app"], mdb.ServiceName()) assert.Len(t, svc.Spec.Ports, 1) assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 1000, Name: "mongodb"}) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) } -func createOrUpdatePodsWithVersions(t *testing.T, c k8sClient.Client, name types.NamespacedName, versions []string) { +func createOrUpdatePodsWithVersions(ctx context.Context, t *testing.T, c k8sClient.Client, name types.NamespacedName, versions []string) { for i, version := range versions { - createPodWithAgentAnnotation(t, c, types.NamespacedName{ + createPodWithAgentAnnotation(ctx, t, c, types.NamespacedName{ Namespace: name.Namespace, Name: fmt.Sprintf("%s-%d", name.Name, i), }, version) } } -func createPodWithAgentAnnotation(t *testing.T, c k8sClient.Client, name types.NamespacedName, versionStr string) { +func createPodWithAgentAnnotation(ctx context.Context, t *testing.T, c k8sClient.Client, name types.NamespacedName, versionStr string) { pod := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name.Name, @@ -493,10 +460,10 @@ func createPodWithAgentAnnotation(t *testing.T, c k8sClient.Client, name types.N }, } - err := c.Create(context.TODO(), &pod) + err := c.Create(ctx, &pod) if err != nil && apiErrors.IsAlreadyExists(err) { - err = c.Update(context.TODO(), &pod) + err = c.Update(ctx, &pod) assert.NoError(t, err) } @@ -504,6 +471,7 @@ func createPodWithAgentAnnotation(t *testing.T, c k8sClient.Client, name types.N } func TestService_changesMongodPortOnRunningClusterWithArbiters(t *testing.T) { + ctx := context.Background() mdb := newScramReplicaSet(mdbv1.MongoDBUser{ Name: "testuser", PasswordSecretRef: mdbv1.SecretKeyReference{ @@ -518,53 +486,53 @@ func TestService_changesMongodPortOnRunningClusterWithArbiters(t *testing.T) { const oldPort = automationconfig.DefaultDBPort const newPort = 8000 - mgr := client.NewManager(&mdb) + mgr := client.NewManager(ctx, &mdb) - r := NewReconciler(mgr) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") t.Run("Prepare cluster with arbiters and change port", func(t *testing.T) { - err := createUserPasswordSecret(mgr.Client, mdb, "password-secret-name", "pass") + err := createUserPasswordSecret(ctx, mgr.Client, mdb, "password-secret-name", "pass") assert.NoError(t, err) mdb.Spec.Arbiters = 1 - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: namespacedName}) + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) assertReconciliationSuccessful(t, res, err) - assertServicePorts(t, mgr.Client, mdb, map[int]string{ + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ oldPort: "mongodb", }) - _ = assertAutomationConfigVersion(t, mgr.Client, mdb, 1) + _ = assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 1) - setStatefulSetReadyReplicas(t, mgr.GetClient(), mdb, 3) - setArbiterStatefulSetReadyReplicas(t, mgr.GetClient(), mdb, 1) - createOrUpdatePodsWithVersions(t, mgr.GetClient(), namespacedName, []string{"1", "1", "1"}) - createOrUpdatePodsWithVersions(t, mgr.GetClient(), arbiterNamespacedName, []string{"1"}) + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"1", "1", "1"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"1"}) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: namespacedName}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) assertReconciliationSuccessful(t, res, err) - assertServicePorts(t, mgr.Client, mdb, map[int]string{ + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ oldPort: "mongodb", }) - _ = assertAutomationConfigVersion(t, mgr.Client, mdb, 1) - assertStatefulsetReady(t, mgr, namespacedName, 3) - assertStatefulsetReady(t, mgr, arbiterNamespacedName, 1) + _ = assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 1) + assertStatefulsetReady(ctx, t, mgr, namespacedName, 3) + assertStatefulsetReady(ctx, t, mgr, arbiterNamespacedName, 1) mdb.Spec.AdditionalMongodConfig = mdbv1.NewMongodConfiguration() mdb.Spec.AdditionalMongodConfig.SetDBPort(newPort) - err = mgr.GetClient().Update(context.TODO(), &mdb) + err = mgr.GetClient().Update(ctx, &mdb) assert.NoError(t, err) - assertConnectionStringSecretPorts(t, mgr.GetClient(), mdb, oldPort, newPort) + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) }) t.Run("Port should be changed only in the process #0", func(t *testing.T) { // port changes should be performed one at a time // should set port #0 to new one - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: namespacedName}) + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) require.NoError(t, err) assert.True(t, res.Requeue) - currentAc := assertAutomationConfigVersion(t, mgr.Client, mdb, 2) + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 2) require.Len(t, currentAc.Processes, 4) assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) assert.Equal(t, oldPort, currentAc.Processes[1].GetPort()) @@ -572,24 +540,24 @@ func TestService_changesMongodPortOnRunningClusterWithArbiters(t *testing.T) { assert.Equal(t, oldPort, currentAc.Processes[3].GetPort()) // not all ports are changed, so there are still two ports in the service - assertServicePorts(t, mgr.Client, mdb, map[int]string{ + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ oldPort: "mongodb", newPort: "mongodb-new", }) - assertConnectionStringSecretPorts(t, mgr.GetClient(), mdb, oldPort, newPort) + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) }) t.Run("Ports should be changed in processes #0,#1", func(t *testing.T) { - setStatefulSetReadyReplicas(t, mgr.GetClient(), mdb, 3) - setArbiterStatefulSetReadyReplicas(t, mgr.GetClient(), mdb, 1) - createOrUpdatePodsWithVersions(t, mgr.GetClient(), namespacedName, []string{"2", "2", "2"}) - createOrUpdatePodsWithVersions(t, mgr.GetClient(), arbiterNamespacedName, []string{"2"}) + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"2", "2", "2"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"2"}) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: namespacedName}) + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) require.NoError(t, err) assert.True(t, res.Requeue) - currentAc := assertAutomationConfigVersion(t, mgr.Client, mdb, 3) + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 3) require.Len(t, currentAc.Processes, 4) assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) assert.Equal(t, newPort, currentAc.Processes[1].GetPort()) @@ -597,24 +565,24 @@ func TestService_changesMongodPortOnRunningClusterWithArbiters(t *testing.T) { assert.Equal(t, oldPort, currentAc.Processes[3].GetPort()) // not all ports are changed, so there are still two ports in the service - assertServicePorts(t, mgr.Client, mdb, map[int]string{ + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ oldPort: "mongodb", newPort: "mongodb-new", }) - assertConnectionStringSecretPorts(t, mgr.GetClient(), mdb, oldPort, newPort) + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) }) t.Run("Ports should be changed in processes #0,#1,#2", func(t *testing.T) { - setStatefulSetReadyReplicas(t, mgr.GetClient(), mdb, 3) - setArbiterStatefulSetReadyReplicas(t, mgr.GetClient(), mdb, 1) - createOrUpdatePodsWithVersions(t, mgr.GetClient(), namespacedName, []string{"3", "3", "3"}) - createOrUpdatePodsWithVersions(t, mgr.GetClient(), arbiterNamespacedName, []string{"3"}) + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"3", "3", "3"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"3"}) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: namespacedName}) + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) require.NoError(t, err) assert.True(t, res.Requeue) - currentAc := assertAutomationConfigVersion(t, mgr.Client, mdb, 4) + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 4) require.Len(t, currentAc.Processes, 4) assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) assert.Equal(t, newPort, currentAc.Processes[1].GetPort()) @@ -622,24 +590,24 @@ func TestService_changesMongodPortOnRunningClusterWithArbiters(t *testing.T) { assert.Equal(t, oldPort, currentAc.Processes[3].GetPort()) // not all ports are changed, so there are still two ports in the service - assertServicePorts(t, mgr.Client, mdb, map[int]string{ + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ oldPort: "mongodb", newPort: "mongodb-new", }) - assertConnectionStringSecretPorts(t, mgr.GetClient(), mdb, oldPort, newPort) + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) }) t.Run("Ports should be changed in all processes", func(t *testing.T) { - setStatefulSetReadyReplicas(t, mgr.GetClient(), mdb, 3) - setArbiterStatefulSetReadyReplicas(t, mgr.GetClient(), mdb, 1) - createOrUpdatePodsWithVersions(t, mgr.GetClient(), namespacedName, []string{"4", "4", "4"}) - createOrUpdatePodsWithVersions(t, mgr.GetClient(), arbiterNamespacedName, []string{"4"}) + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"4", "4", "4"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"4"}) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assert.NoError(t, err) assert.True(t, res.Requeue) - currentAc := assertAutomationConfigVersion(t, mgr.Client, mdb, 5) + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 5) require.Len(t, currentAc.Processes, 4) assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) assert.Equal(t, newPort, currentAc.Processes[1].GetPort()) @@ -647,58 +615,58 @@ func TestService_changesMongodPortOnRunningClusterWithArbiters(t *testing.T) { assert.Equal(t, newPort, currentAc.Processes[3].GetPort()) // all the ports are changed but there are still two service ports for old and new port until the next reconcile - assertServicePorts(t, mgr.Client, mdb, map[int]string{ + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ oldPort: "mongodb", newPort: "mongodb-new", }) - assertConnectionStringSecretPorts(t, mgr.GetClient(), mdb, oldPort, newPort) + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) }) t.Run("At the end there should be only new port in the service", func(t *testing.T) { - setStatefulSetReadyReplicas(t, mgr.GetClient(), mdb, 3) - setArbiterStatefulSetReadyReplicas(t, mgr.GetClient(), mdb, 1) - createOrUpdatePodsWithVersions(t, mgr.GetClient(), namespacedName, []string{"5", "5", "5"}) - createOrUpdatePodsWithVersions(t, mgr.GetClient(), arbiterNamespacedName, []string{"5"}) + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"5", "5", "5"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"5"}) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: namespacedName}) + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) assert.NoError(t, err) // no need to requeue, port change is finished assert.False(t, res.Requeue) // there should not be any changes in config anymore - currentAc := assertAutomationConfigVersion(t, mgr.Client, mdb, 5) + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 5) require.Len(t, currentAc.Processes, 4) assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) assert.Equal(t, newPort, currentAc.Processes[1].GetPort()) assert.Equal(t, newPort, currentAc.Processes[2].GetPort()) assert.Equal(t, newPort, currentAc.Processes[3].GetPort()) - assertServicePorts(t, mgr.Client, mdb, map[int]string{ + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ newPort: "mongodb", }) // only at the end, when all pods are ready we have updated connection strings - assertConnectionStringSecretPorts(t, mgr.GetClient(), mdb, newPort, oldPort) + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, newPort, oldPort) }) } // assertConnectionStringSecretPorts checks that connection string secret has expectedPort and does not have notExpectedPort. -func assertConnectionStringSecretPorts(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, expectedPort int, notExpectedPort int) { +func assertConnectionStringSecretPorts(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, expectedPort int, notExpectedPort int) { connectionStringSecret := corev1.Secret{} scramUsers := mdb.GetAuthUsers() require.Len(t, scramUsers, 1) - secretNamespacedName := types.NamespacedName{Name: scramUsers[0].ConnectionStringSecretName, Namespace: mdb.Namespace} - err := c.Get(context.TODO(), secretNamespacedName, &connectionStringSecret) + secretNamespacedName := types.NamespacedName{Name: scramUsers[0].ConnectionStringSecretName, Namespace: scramUsers[0].ConnectionStringSecretNamespace} + err := c.Get(ctx, secretNamespacedName, &connectionStringSecret) require.NoError(t, err) require.Contains(t, connectionStringSecret.Data, "connectionString.standard") assert.Contains(t, string(connectionStringSecret.Data["connectionString.standard"]), fmt.Sprintf("%d", expectedPort)) assert.NotContains(t, string(connectionStringSecret.Data["connectionString.standard"]), fmt.Sprintf("%d", notExpectedPort)) } -func assertServicePorts(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, expectedServicePorts map[int]string) { +func assertServicePorts(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, expectedServicePorts map[int]string) { svc := corev1.Service{} - err := c.Get(context.TODO(), types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + err := c.Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) require.NoError(t, err) assert.Equal(t, corev1.ServiceTypeClusterIP, svc.Spec.Type) assert.Equal(t, mdb.ServiceName(), svc.Spec.Selector["app"]) @@ -712,21 +680,22 @@ func assertServicePorts(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommu assert.Equal(t, expectedServicePorts, actualServicePorts) } -func assertAutomationConfigVersion(t *testing.T, c client.Client, mdb mdbv1.MongoDBCommunity, expectedVersion int) automationconfig.AutomationConfig { - ac, err := automationconfig.ReadFromSecret(c, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) +func assertAutomationConfigVersion(ctx context.Context, t *testing.T, c client.Client, mdb mdbv1.MongoDBCommunity, expectedVersion int) automationconfig.AutomationConfig { + ac, err := automationconfig.ReadFromSecret(ctx, c, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) require.NoError(t, err) assert.Equal(t, expectedVersion, ac.Version) return ac } -func assertStatefulsetReady(t *testing.T, mgr manager.Manager, name types.NamespacedName, expectedReplicas int) { +func assertStatefulsetReady(ctx context.Context, t *testing.T, mgr manager.Manager, name types.NamespacedName, expectedReplicas int) { sts := appsv1.StatefulSet{} - err := mgr.GetClient().Get(context.TODO(), name, &sts) + err := mgr.GetClient().Get(ctx, name, &sts) require.NoError(t, err) assert.True(t, statefulset.IsReady(sts, expectedReplicas)) } func TestService_configuresPrometheusCustomPorts(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() mdb.Spec.Prometheus = &mdbv1.Prometheus{ Username: "username", @@ -740,22 +709,20 @@ func TestService_configuresPrometheusCustomPorts(t *testing.T) { mongodConfig.Set("net.port", 1000.) mdb.Spec.AdditionalMongodConfig.Object = mongodConfig - mgr := client.NewManager(&mdb) - err := secret.CreateOrUpdate(mgr.Client, - secret.Builder(). - SetName("secret"). - SetNamespace(mdb.Namespace). - SetField("password", "my-password"). - Build(), - ) + mgr := client.NewManager(ctx, &mdb) + err := secret.CreateOrUpdate(ctx, mgr.Client, secret.Builder(). + SetName("secret"). + SetNamespace(mdb.Namespace). + SetField("password", "my-password"). + Build()) assert.NoError(t, err) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) svc := corev1.Service{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) assert.NoError(t, err) assert.Equal(t, svc.Spec.Type, corev1.ServiceTypeClusterIP) assert.Equal(t, svc.Spec.Selector["app"], mdb.ServiceName()) @@ -765,11 +732,12 @@ func TestService_configuresPrometheusCustomPorts(t *testing.T) { assert.Equal(t, svc.Labels["app"], mdb.ServiceName()) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) } func TestService_configuresPrometheus(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() mdb.Spec.Prometheus = &mdbv1.Prometheus{ Username: "username", @@ -778,22 +746,20 @@ func TestService_configuresPrometheus(t *testing.T) { }, } - mgr := client.NewManager(&mdb) - err := secret.CreateOrUpdate(mgr.Client, - secret.Builder(). - SetName("secret"). - SetNamespace(mdb.Namespace). - SetField("password", "my-password"). - Build(), - ) + mgr := client.NewManager(ctx, &mdb) + err := secret.CreateOrUpdate(ctx, mgr.Client, secret.Builder(). + SetName("secret"). + SetNamespace(mdb.Namespace). + SetField("password", "my-password"). + Build()) assert.NoError(t, err) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) svc := corev1.Service{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) assert.NoError(t, err) assert.Len(t, svc.Spec.Ports, 2) @@ -802,65 +768,69 @@ func TestService_configuresPrometheus(t *testing.T) { } func TestCustomNetPort_Configuration(t *testing.T) { - svc, _ := performReconciliationAndGetService(t, "specify_net_port.yaml") + ctx := context.Background() + svc, _ := performReconciliationAndGetService(ctx, t, "specify_net_port.yaml") assert.Equal(t, corev1.ServiceTypeClusterIP, svc.Spec.Type) assert.Len(t, svc.Spec.Ports, 1) assert.Equal(t, corev1.ServicePort{Port: 40333, Name: "mongodb"}, svc.Spec.Ports[0]) } func TestAutomationConfig_versionIsBumpedOnChange(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.Equal(t, 1, currentAc.Version) mdb.Spec.Members++ - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) - _ = mgr.GetClient().Update(context.TODO(), &mdb) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + _ = mgr.GetClient().Update(ctx, &mdb) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err = automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err = automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.Equal(t, 2, currentAc.Version) } func TestAutomationConfig_versionIsNotBumpedWithNoChanges(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.Equal(t, currentAc.Version, 1) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err = automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err = automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.Equal(t, currentAc.Version, 1) } func TestAutomationConfigFCVIsNotIncreasedWhenUpgradingMinorVersion(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() mdb.Spec.Version = "4.2.2" - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.Len(t, currentAc.Processes, 3) assert.Equal(t, currentAc.Processes[0].FeatureCompatibilityVersion, "4.2") @@ -868,11 +838,11 @@ func TestAutomationConfigFCVIsNotIncreasedWhenUpgradingMinorVersion(t *testing.T // Upgrading minor version does not change the FCV on the automationConfig mdbRef := &mdb mdbRef.Spec.Version = "4.4.0" - _ = mgr.Client.Update(context.TODO(), mdbRef) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + _ = mgr.Client.Update(ctx, mdbRef) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err = automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err = automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.Len(t, currentAc.Processes, 3) assert.Equal(t, currentAc.Processes[0].FeatureCompatibilityVersion, "4.2") @@ -880,6 +850,7 @@ func TestAutomationConfigFCVIsNotIncreasedWhenUpgradingMinorVersion(t *testing.T } func TestAutomationConfig_CustomMongodConfig(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() mongodConfig := objx.New(map[string]interface{}{}) @@ -888,12 +859,12 @@ func TestAutomationConfig_CustomMongodConfig(t *testing.T) { mongodConfig.Set("arbitrary.config.path", "value") mdb.Spec.AdditionalMongodConfig.Object = mongodConfig - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) for _, p := range currentAc.Processes { @@ -911,36 +882,33 @@ func TestAutomationConfig_CustomMongodConfig(t *testing.T) { } func TestExistingPasswordAndKeyfile_AreUsedWhenTheSecretExists(t *testing.T) { + ctx := context.Background() mdb := newScramReplicaSet() - mgr := client.NewManager(&mdb) + mgr := client.NewManager(ctx, &mdb) c := mgr.Client keyFileNsName := mdb.GetAgentKeyfileSecretNamespacedName() - err := secret.CreateOrUpdate(c, - secret.Builder(). - SetName(keyFileNsName.Name). - SetNamespace(keyFileNsName.Namespace). - SetField(constants.AgentKeyfileKey, "my-keyfile"). - Build(), - ) + err := secret.CreateOrUpdate(ctx, c, secret.Builder(). + SetName(keyFileNsName.Name). + SetNamespace(keyFileNsName.Namespace). + SetField(constants.AgentKeyfileKey, "my-keyfile"). + Build()) assert.NoError(t, err) passwordNsName := mdb.GetAgentPasswordSecretNamespacedName() - err = secret.CreateOrUpdate(c, - secret.Builder(). - SetName(passwordNsName.Name). - SetNamespace(passwordNsName.Namespace). - SetField(constants.AgentPasswordKey, "my-pass"). - Build(), - ) + err = secret.CreateOrUpdate(ctx, c, secret.Builder(). + SetName(passwordNsName.Name). + SetNamespace(passwordNsName.Namespace). + SetField(constants.AgentPasswordKey, "my-pass"). + Build()) assert.NoError(t, err) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.NotEmpty(t, currentAc.Auth.KeyFileWindows) assert.False(t, currentAc.Auth.Disabled) @@ -952,23 +920,26 @@ func TestExistingPasswordAndKeyfile_AreUsedWhenTheSecretExists(t *testing.T) { } func TestScramIsConfigured(t *testing.T) { - assertReplicaSetIsConfiguredWithScram(t, newScramReplicaSet()) + ctx := context.Background() + assertReplicaSetIsConfiguredWithScram(ctx, t, newScramReplicaSet()) } func TestScramIsConfiguredWhenNotSpecified(t *testing.T) { - assertReplicaSetIsConfiguredWithScram(t, newTestReplicaSet()) + ctx := context.Background() + assertReplicaSetIsConfiguredWithScram(ctx, t, newTestReplicaSet()) } func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() mdb.Spec.Members = 5 - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, 5, mdb.Status.CurrentMongoDBMembers) @@ -976,73 +947,74 @@ func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testin // scale members from five to three mdb.Spec.Members = 3 - err = mgr.GetClient().Update(context.TODO(), &mdb) + err = mgr.GetClient().Update(ctx, &mdb) assert.NoError(t, err) - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) assert.NoError(t, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, true, res.Requeue) assert.Equal(t, 4, mdb.Status.CurrentMongoDBMembers) - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) assert.NoError(t, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, false, res.Requeue) assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) } func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) // scale members from three to five mdb.Spec.Members = 5 - err = mgr.GetClient().Update(context.TODO(), &mdb) + err = mgr.GetClient().Update(ctx, &mdb) assert.NoError(t, err) - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) assert.NoError(t, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, true, res.Requeue) assert.Equal(t, 4, mdb.Status.CurrentMongoDBMembers) - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) assert.NoError(t, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, false, res.Requeue) @@ -1050,37 +1022,39 @@ func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing. } func TestIgnoreUnknownUsers(t *testing.T) { + ctx := context.Background() t.Run("Ignore Unkown Users set to true", func(t *testing.T) { mdb := newTestReplicaSet() ignoreUnknownUsers := true mdb.Spec.Security.Authentication.IgnoreUnknownUsers = &ignoreUnknownUsers - assertAuthoritativeSet(t, mdb, false) + assertAuthoritativeSet(ctx, t, mdb, false) }) t.Run("IgnoreUnknownUsers is not set", func(t *testing.T) { mdb := newTestReplicaSet() mdb.Spec.Security.Authentication.IgnoreUnknownUsers = nil - assertAuthoritativeSet(t, mdb, false) + assertAuthoritativeSet(ctx, t, mdb, false) }) t.Run("IgnoreUnknownUsers set to false", func(t *testing.T) { mdb := newTestReplicaSet() ignoreUnknownUsers := false mdb.Spec.Security.Authentication.IgnoreUnknownUsers = &ignoreUnknownUsers - assertAuthoritativeSet(t, mdb, true) + assertAuthoritativeSet(ctx, t, mdb, true) }) } func TestAnnotationsAreAppliedToResource(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.NotNil(t, mdb.Annotations) @@ -1090,13 +1064,13 @@ func TestAnnotationsAreAppliedToResource(t *testing.T) { // assertAuthoritativeSet asserts that a reconciliation of the given MongoDBCommunity resource // results in the AuthoritativeSet of the created AutomationConfig to have the expectedValue provided. -func assertAuthoritativeSet(t *testing.T, mdb mdbv1.MongoDBCommunity, expectedValue bool) { - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) +func assertAuthoritativeSet(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity, expectedValue bool) { + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - s, err := mgr.Client.GetSecret(types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + s, err := mgr.Client.GetSecret(ctx, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) bytes := s.Data[automationconfig.ConfigKey] @@ -1106,13 +1080,13 @@ func assertAuthoritativeSet(t *testing.T, mdb mdbv1.MongoDBCommunity, expectedVa assert.Equal(t, expectedValue, ac.Auth.AuthoritativeSet) } -func assertReplicaSetIsConfiguredWithScram(t *testing.T, mdb mdbv1.MongoDBCommunity) { - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) +func assertReplicaSetIsConfiguredWithScram(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity) { + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) t.Run("Automation Config is configured with SCRAM", func(t *testing.T) { assert.NotEmpty(t, currentAc.Auth.Key) assert.NoError(t, err) @@ -1122,31 +1096,31 @@ func assertReplicaSetIsConfiguredWithScram(t *testing.T, mdb mdbv1.MongoDBCommun }) t.Run("Secret with password was created", func(t *testing.T) { secretNsName := mdb.GetAgentPasswordSecretNamespacedName() - s, err := mgr.Client.GetSecret(secretNsName) + s, err := mgr.Client.GetSecret(ctx, secretNsName) assert.NoError(t, err) assert.Equal(t, s.Data[constants.AgentPasswordKey], []byte(currentAc.Auth.AutoPwd)) }) t.Run("Secret with keyfile was created", func(t *testing.T) { secretNsName := mdb.GetAgentKeyfileSecretNamespacedName() - s, err := mgr.Client.GetSecret(secretNsName) + s, err := mgr.Client.GetSecret(ctx, secretNsName) assert.NoError(t, err) assert.Equal(t, s.Data[constants.AgentKeyfileKey], []byte(currentAc.Auth.Key)) }) } -func assertReplicaSetIsConfiguredWithScramTLS(t *testing.T, mdb mdbv1.MongoDBCommunity) { - mgr := client.NewManager(&mdb) +func assertReplicaSetIsConfiguredWithScramTLS(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity) { + mgr := client.NewManager(ctx, &mdb) newClient := client.NewClient(mgr.GetClient()) - err := createTLSSecret(newClient, mdb, "CERT", "KEY", "") + err := createTLSSecret(ctx, newClient, mdb, "CERT", "KEY", "") assert.NoError(t, err) - err = createTLSConfigMap(newClient, mdb) + err = createTLSConfigMap(ctx, newClient, mdb) assert.NoError(t, err) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) t.Run("Automation Config is configured with SCRAM", func(t *testing.T) { assert.Empty(t, currentAc.TLSConfig.AutoPEMKeyFilePath) assert.NotEmpty(t, currentAc.Auth.Key) @@ -1157,36 +1131,36 @@ func assertReplicaSetIsConfiguredWithScramTLS(t *testing.T, mdb mdbv1.MongoDBCom }) t.Run("Secret with password was created", func(t *testing.T) { secretNsName := mdb.GetAgentPasswordSecretNamespacedName() - s, err := mgr.Client.GetSecret(secretNsName) + s, err := mgr.Client.GetSecret(ctx, secretNsName) assert.NoError(t, err) assert.Equal(t, s.Data[constants.AgentPasswordKey], []byte(currentAc.Auth.AutoPwd)) }) t.Run("Secret with keyfile was created", func(t *testing.T) { secretNsName := mdb.GetAgentKeyfileSecretNamespacedName() - s, err := mgr.Client.GetSecret(secretNsName) + s, err := mgr.Client.GetSecret(ctx, secretNsName) assert.NoError(t, err) assert.Equal(t, s.Data[constants.AgentKeyfileKey], []byte(currentAc.Auth.Key)) }) } -func assertReplicaSetIsConfiguredWithX509(t *testing.T, mdb mdbv1.MongoDBCommunity) { - mgr := client.NewManager(&mdb) +func assertReplicaSetIsConfiguredWithX509(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity) { + mgr := client.NewManager(ctx, &mdb) newClient := client.NewClient(mgr.GetClient()) - err := createTLSSecret(newClient, mdb, "CERT", "KEY", "") + err := createTLSSecret(ctx, newClient, mdb, "CERT", "KEY", "") assert.NoError(t, err) - err = createTLSConfigMap(newClient, mdb) + err = createTLSConfigMap(ctx, newClient, mdb) assert.NoError(t, err) crt, key, err := x509.CreateAgentCertificate() assert.NoError(t, err) - err = createAgentCertSecret(newClient, mdb, crt, key, "") + err = createAgentCertSecret(ctx, newClient, mdb, crt, key, "") assert.NoError(t, err) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) t.Run("Automation Config is configured with X509", func(t *testing.T) { assert.NotEmpty(t, currentAc.TLSConfig.AutoPEMKeyFilePath) @@ -1200,56 +1174,61 @@ func assertReplicaSetIsConfiguredWithX509(t *testing.T, mdb mdbv1.MongoDBCommuni }) t.Run("Secret with password was not created", func(t *testing.T) { secretNsName := mdb.GetAgentPasswordSecretNamespacedName() - _, err := mgr.Client.GetSecret(secretNsName) + _, err := mgr.Client.GetSecret(ctx, secretNsName) assert.Error(t, err) }) t.Run("Secret with keyfile was created", func(t *testing.T) { secretNsName := mdb.GetAgentKeyfileSecretNamespacedName() - s, err := mgr.Client.GetSecret(secretNsName) + s, err := mgr.Client.GetSecret(ctx, secretNsName) assert.NoError(t, err) assert.Equal(t, s.Data[constants.AgentKeyfileKey], []byte(currentAc.Auth.Key)) }) } func TestX509andSCRAMIsConfiguredWithX509Agent(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSetWithTLS() mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509", "SCRAM"} mdb.Spec.Security.Authentication.AgentMode = "X509" - assertReplicaSetIsConfiguredWithX509(t, mdb) + assertReplicaSetIsConfiguredWithX509(ctx, t, mdb) } func TestX509andSCRAMIsConfiguredWithSCRAMAgent(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSetWithTLS() mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509", "SCRAM"} mdb.Spec.Security.Authentication.AgentMode = "SCRAM" - assertReplicaSetIsConfiguredWithScramTLS(t, mdb) + assertReplicaSetIsConfiguredWithScramTLS(ctx, t, mdb) } func TestX509IsConfigured(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSetWithTLS() mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509"} - assertReplicaSetIsConfiguredWithX509(t, mdb) + assertReplicaSetIsConfiguredWithX509(ctx, t, mdb) } func TestReplicaSet_IsScaledUpToDesiredMembers_WhenFirstCreated(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) } func TestVolumeClaimTemplates_Configuration(t *testing.T) { - sts, _ := performReconciliationAndGetStatefulSet(t, "volume_claim_templates_mdb.yaml") + ctx := context.Background() + sts, _ := performReconciliationAndGetStatefulSet(ctx, t, "volume_claim_templates_mdb.yaml") assert.Len(t, sts.Spec.VolumeClaimTemplates, 3) @@ -1264,7 +1243,8 @@ func TestVolumeClaimTemplates_Configuration(t *testing.T) { } func TestChangeDataVolume_Configuration(t *testing.T) { - sts, _ := performReconciliationAndGetStatefulSet(t, "change_data_volume.yaml") + ctx := context.Background() + sts, _ := performReconciliationAndGetStatefulSet(ctx, t, "change_data_volume.yaml") assert.Len(t, sts.Spec.VolumeClaimTemplates, 2) dataVolume := sts.Spec.VolumeClaimTemplates[0] @@ -1277,7 +1257,8 @@ func TestChangeDataVolume_Configuration(t *testing.T) { } func TestCustomStorageClass_Configuration(t *testing.T) { - sts, _ := performReconciliationAndGetStatefulSet(t, "custom_storage_class.yaml") + ctx := context.Background() + sts, _ := performReconciliationAndGetStatefulSet(ctx, t, "custom_storage_class.yaml") dataVolume := sts.Spec.VolumeClaimTemplates[0] @@ -1293,7 +1274,8 @@ func TestCustomStorageClass_Configuration(t *testing.T) { } func TestCustomTaintsAndTolerations_Configuration(t *testing.T) { - sts, _ := performReconciliationAndGetStatefulSet(t, "tolerations_example.yaml") + ctx := context.Background() + sts, _ := performReconciliationAndGetStatefulSet(ctx, t, "tolerations_example.yaml") assert.Len(t, sts.Spec.Template.Spec.Tolerations, 2) assert.Equal(t, "example-key", sts.Spec.Template.Spec.Tolerations[0].Key) @@ -1306,7 +1288,8 @@ func TestCustomTaintsAndTolerations_Configuration(t *testing.T) { } func TestCustomDataDir_Configuration(t *testing.T) { - sts, c := performReconciliationAndGetStatefulSet(t, "specify_data_dir.yaml") + ctx := context.Background() + sts, c := performReconciliationAndGetStatefulSet(ctx, t, "specify_data_dir.yaml") agentContainer := container.GetByName("mongodb-agent", sts.Spec.Template.Spec.Containers) assert.NotNil(t, agentContainer) @@ -1318,7 +1301,7 @@ func TestCustomDataDir_Configuration(t *testing.T) { lastCommand := mongoContainer.Command[len(agentContainer.Command)-1] assert.Contains(t, lastCommand, "/some/path/db", "startup command should be using the newly specified path") - ac, err := automationconfig.ReadFromSecret(c, types.NamespacedName{Name: "example-mongodb-config", Namespace: "test-ns"}) + ac, err := automationconfig.ReadFromSecret(ctx, c, types.NamespacedName{Name: "example-mongodb-config", Namespace: "test-ns"}) assert.NoError(t, err) for _, p := range ac.Processes { @@ -1328,15 +1311,16 @@ func TestCustomDataDir_Configuration(t *testing.T) { } func TestInconsistentReplicas(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() stsReplicas := new(int32) *stsReplicas = 3 mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.Replicas = stsReplicas mdb.Spec.Members = 4 - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - _, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + _, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assert.NoError(t, err) } @@ -1350,34 +1334,34 @@ func assertVolumeMountPath(t *testing.T, mounts []corev1.VolumeMount, name, path t.Fatalf("volume with name %s was not present!", name) } -func performReconciliationAndGetStatefulSet(t *testing.T, filePath string) (appsv1.StatefulSet, client.Client) { +func performReconciliationAndGetStatefulSet(ctx context.Context, t *testing.T, filePath string) (appsv1.StatefulSet, client.Client) { mdb, err := loadTestFixture(filePath) assert.NoError(t, err) - mgr := client.NewManager(&mdb) - assert.NoError(t, generatePasswordsForAllUsers(mdb, mgr.Client)) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + mgr := client.NewManager(ctx, &mdb) + assert.NoError(t, generatePasswordsForAllUsers(ctx, mdb, mgr.Client)) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) assertReconciliationSuccessful(t, res, err) - sts, err := mgr.Client.GetStatefulSet(mdb.NamespacedName()) + sts, err := mgr.Client.GetStatefulSet(ctx, mdb.NamespacedName()) assert.NoError(t, err) return sts, mgr.Client } -func performReconciliationAndGetService(t *testing.T, filePath string) (corev1.Service, client.Client) { +func performReconciliationAndGetService(ctx context.Context, t *testing.T, filePath string) (corev1.Service, client.Client) { mdb, err := loadTestFixture(filePath) assert.NoError(t, err) - mgr := client.NewManager(&mdb) - assert.NoError(t, generatePasswordsForAllUsers(mdb, mgr.Client)) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + mgr := client.NewManager(ctx, &mdb) + assert.NoError(t, generatePasswordsForAllUsers(ctx, mdb, mgr.Client)) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) assertReconciliationSuccessful(t, res, err) - svc, err := mgr.Client.GetService(types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}) + svc, err := mgr.Client.GetService(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}) assert.NoError(t, err) return svc, mgr.Client } -func generatePasswordsForAllUsers(mdb mdbv1.MongoDBCommunity, c client.Client) error { +func generatePasswordsForAllUsers(ctx context.Context, mdb mdbv1.MongoDBCommunity, c client.Client) error { for _, user := range mdb.Spec.Users { key := "password" @@ -1391,7 +1375,7 @@ func generatePasswordsForAllUsers(mdb mdbv1.MongoDBCommunity, c client.Client) e SetField(key, "GAGTQK2ccRRaxJFudI5y"). Build() - if err := c.CreateSecret(passwordSecret); err != nil { + if err := c.CreateSecret(ctx, passwordSecret); err != nil { return err } } @@ -1407,27 +1391,27 @@ func assertReconciliationSuccessful(t *testing.T, result reconcile.Result, err e // makeStatefulSetReady updates the StatefulSet corresponding to the // provided MongoDB resource to mark it as ready for the case of `statefulset.IsReady` -func makeStatefulSetReady(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity) { - setStatefulSetReadyReplicas(t, c, mdb, mdb.StatefulSetReplicasThisReconciliation()) +func makeStatefulSetReady(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity) { + setStatefulSetReadyReplicas(ctx, t, c, mdb, mdb.StatefulSetReplicasThisReconciliation()) } -func setStatefulSetReadyReplicas(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, readyReplicas int) { +func setStatefulSetReadyReplicas(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, readyReplicas int) { sts := appsv1.StatefulSet{} - err := c.Get(context.TODO(), mdb.NamespacedName(), &sts) + err := c.Get(ctx, mdb.NamespacedName(), &sts) assert.NoError(t, err) sts.Status.ReadyReplicas = int32(readyReplicas) sts.Status.UpdatedReplicas = int32(mdb.StatefulSetReplicasThisReconciliation()) - err = c.Update(context.TODO(), &sts) + err = c.Update(ctx, &sts) assert.NoError(t, err) } -func setArbiterStatefulSetReadyReplicas(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, readyReplicas int) { +func setArbiterStatefulSetReadyReplicas(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, readyReplicas int) { sts := appsv1.StatefulSet{} - err := c.Get(context.TODO(), mdb.ArbiterNamespacedName(), &sts) + err := c.Get(ctx, mdb.ArbiterNamespacedName(), &sts) assert.NoError(t, err) sts.Status.ReadyReplicas = int32(readyReplicas) sts.Status.UpdatedReplicas = int32(mdb.StatefulSetArbitersThisReconciliation()) - err = c.Update(context.TODO(), &sts) + err = c.Update(ctx, &sts) assert.NoError(t, err) } @@ -1456,3 +1440,74 @@ func marshalRuntimeObjectFromYAMLBytes(bytes []byte, obj runtime.Object) error { } return json.Unmarshal(jsonBytes, &obj) } + +func TestGetMongoDBImage(t *testing.T) { + type testConfig struct { + mongodbRepoUrl string + mongodbImage string + mongodbImageType string + version string + expectedImage string + } + tests := map[string]testConfig{ + "Default UBI8 Community image": { + mongodbRepoUrl: "docker.io/mongodb", + mongodbImage: "mongodb-community-server", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "docker.io/mongodb/mongodb-community-server:6.0.5-ubi8", + }, + "Overridden UBI8 Enterprise image": { + mongodbRepoUrl: "docker.io/mongodb", + mongodbImage: "mongodb-enterprise-server", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "docker.io/mongodb/mongodb-enterprise-server:6.0.5-ubi8", + }, + "Overridden UBI8 Enterprise image from Quay": { + mongodbRepoUrl: "quay.io/mongodb", + mongodbImage: "mongodb-enterprise-server", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "quay.io/mongodb/mongodb-enterprise-server:6.0.5-ubi8", + }, + "Overridden Ubuntu Community image": { + mongodbRepoUrl: "docker.io/mongodb", + mongodbImage: "mongodb-community-server", + mongodbImageType: "ubuntu2204", + version: "6.0.5", + expectedImage: "docker.io/mongodb/mongodb-community-server:6.0.5-ubuntu2204", + }, + "Overridden UBI Community image": { + mongodbRepoUrl: "docker.io/mongodb", + mongodbImage: "mongodb-community-server", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "docker.io/mongodb/mongodb-community-server:6.0.5-ubi8", + }, + "Docker Inc images": { + mongodbRepoUrl: "docker.io", + mongodbImage: "mongo", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "docker.io/mongo:6.0.5", + }, + "Deprecated AppDB images defined the old way": { + mongodbRepoUrl: "quay.io", + mongodbImage: "mongodb/mongodb-enterprise-appdb-database-ubi", + // In this example, we intentionally don't use the suffix from the env. variable and let users + // define it in the version instead. There are some known customers who do this. + // This is a backwards compatibility case. + mongodbImageType: "will-be-ignored", + version: "5.0.14-ent", + expectedImage: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:5.0.14-ent", + }, + } + for testName := range tests { + t.Run(testName, func(t *testing.T) { + testConfig := tests[testName] + image := getMongoDBImage(testConfig.mongodbRepoUrl, testConfig.mongodbImage, testConfig.mongodbImageType, testConfig.version) + assert.Equal(t, testConfig.expectedImage, image) + }) + } +} diff --git a/controllers/validation/validation.go b/controllers/validation/validation.go index 53c431ea2..3d84cc1c0 100644 --- a/controllers/validation/validation.go +++ b/controllers/validation/validation.go @@ -92,7 +92,7 @@ func validateUsers(mdb mdbv1.MongoDBCommunity) error { previousUser.Username, user.Username)) } else { - connectionStringSecretNameMap[connectionStringSecretName] = user + scramSecretNameMap[scramSecretName] = user } if user.Database == constants.ExternalDB { @@ -100,7 +100,7 @@ func validateUsers(mdb mdbv1.MongoDBCommunity) error { return fmt.Errorf("X.509 user %s present but X.509 is not enabled", user.Username) } if user.PasswordSecretKey != "" { - return fmt.Errorf("X509 user %s shoul not have a password secret key", user.Username) + return fmt.Errorf("X509 user %s should not have a password secret key", user.Username) } if user.PasswordSecretName != "" { return fmt.Errorf("X509 user %s should not have a password secret name", user.Username) diff --git a/controllers/watch/watch.go b/controllers/watch/watch.go index 376bbfcf2..9522c53c3 100644 --- a/controllers/watch/watch.go +++ b/controllers/watch/watch.go @@ -1,11 +1,13 @@ package watch import ( + "context" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -16,6 +18,8 @@ type ResourceWatcher struct { watched map[types.NamespacedName][]types.NamespacedName } +var _ handler.EventHandler = &ResourceWatcher{} + // New will create a new ResourceWatcher with no watched objects. func New() ResourceWatcher { return ResourceWatcher{ @@ -24,7 +28,7 @@ func New() ResourceWatcher { } // Watch will add a new object to watch. -func (w ResourceWatcher) Watch(watchedName, dependentName types.NamespacedName) { +func (w ResourceWatcher) Watch(ctx context.Context, watchedName, dependentName types.NamespacedName) { existing, hasExisting := w.watched[watchedName] if !hasExisting { existing = []types.NamespacedName{} @@ -38,19 +42,19 @@ func (w ResourceWatcher) Watch(watchedName, dependentName types.NamespacedName) w.watched[watchedName] = append(existing, dependentName) } -func (w ResourceWatcher) Create(event event.CreateEvent, queue workqueue.RateLimitingInterface) { +func (w ResourceWatcher) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { w.handleEvent(event.Object, queue) } -func (w ResourceWatcher) Update(event event.UpdateEvent, queue workqueue.RateLimitingInterface) { +func (w ResourceWatcher) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { w.handleEvent(event.ObjectOld, queue) } -func (w ResourceWatcher) Delete(event event.DeleteEvent, queue workqueue.RateLimitingInterface) { +func (w ResourceWatcher) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { w.handleEvent(event.Object, queue) } -func (w ResourceWatcher) Generic(event event.GenericEvent, queue workqueue.RateLimitingInterface) { +func (w ResourceWatcher) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { w.handleEvent(event.Object, queue) } diff --git a/controllers/watch/watch_test.go b/controllers/watch/watch_test.go index 027c1b78d..ab8c522be 100644 --- a/controllers/watch/watch_test.go +++ b/controllers/watch/watch_test.go @@ -1,6 +1,7 @@ package watch import ( + "context" "testing" "k8s.io/apimachinery/pkg/types" @@ -19,6 +20,7 @@ import ( ) func TestWatcher(t *testing.T) { + ctx := context.Background() obj := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", @@ -45,9 +47,9 @@ func TestWatcher(t *testing.T) { watcher := New() queue := controllertest.Queue{Interface: workqueue.New()} - watcher.Create(event.CreateEvent{ + watcher.Create(ctx, event.CreateEvent{ Object: obj, - }, queue) + }, &queue) // Ensure no reconciliation is queued if object is not watched. assert.Equal(t, 0, queue.Len()) @@ -56,12 +58,12 @@ func TestWatcher(t *testing.T) { t.Run("Multiple objects to reconcile", func(t *testing.T) { watcher := New() queue := controllertest.Queue{Interface: workqueue.New()} - watcher.Watch(objNsName, mdb1.NamespacedName()) - watcher.Watch(objNsName, mdb2.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb2.NamespacedName()) - watcher.Create(event.CreateEvent{ + watcher.Create(ctx, event.CreateEvent{ Object: obj, - }, queue) + }, &queue) // Ensure multiple reconciliations are enqueued. assert.Equal(t, 2, queue.Len()) @@ -70,11 +72,11 @@ func TestWatcher(t *testing.T) { t.Run("Create event", func(t *testing.T) { watcher := New() queue := controllertest.Queue{Interface: workqueue.New()} - watcher.Watch(objNsName, mdb1.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) - watcher.Create(event.CreateEvent{ + watcher.Create(ctx, event.CreateEvent{ Object: obj, - }, queue) + }, &queue) assert.Equal(t, 1, queue.Len()) }) @@ -82,12 +84,12 @@ func TestWatcher(t *testing.T) { t.Run("Update event", func(t *testing.T) { watcher := New() queue := controllertest.Queue{Interface: workqueue.New()} - watcher.Watch(objNsName, mdb1.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) - watcher.Update(event.UpdateEvent{ + watcher.Update(ctx, event.UpdateEvent{ ObjectOld: obj, ObjectNew: obj, - }, queue) + }, &queue) assert.Equal(t, 1, queue.Len()) }) @@ -95,11 +97,11 @@ func TestWatcher(t *testing.T) { t.Run("Delete event", func(t *testing.T) { watcher := New() queue := controllertest.Queue{Interface: workqueue.New()} - watcher.Watch(objNsName, mdb1.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) - watcher.Delete(event.DeleteEvent{ + watcher.Delete(ctx, event.DeleteEvent{ Object: obj, - }, queue) + }, &queue) assert.Equal(t, 1, queue.Len()) }) @@ -107,17 +109,18 @@ func TestWatcher(t *testing.T) { t.Run("Generic event", func(t *testing.T) { watcher := New() queue := controllertest.Queue{Interface: workqueue.New()} - watcher.Watch(objNsName, mdb1.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) - watcher.Generic(event.GenericEvent{ + watcher.Generic(ctx, event.GenericEvent{ Object: obj, - }, queue) + }, &queue) assert.Equal(t, 1, queue.Len()) }) } func TestWatcherAdd(t *testing.T) { + ctx := context.Background() watcher := New() assert.Empty(t, watcher.watched) @@ -137,17 +140,17 @@ func TestWatcherAdd(t *testing.T) { } // Ensure single object can be added to empty watchlist. - watcher.Watch(watchedName, mdb1.NamespacedName()) + watcher.Watch(ctx, watchedName, mdb1.NamespacedName()) assert.Len(t, watcher.watched, 1) assert.Equal(t, []types.NamespacedName{mdb1.NamespacedName()}, watcher.watched[watchedName]) // Ensure object can only be watched once. - watcher.Watch(watchedName, mdb1.NamespacedName()) + watcher.Watch(ctx, watchedName, mdb1.NamespacedName()) assert.Len(t, watcher.watched, 1) assert.Equal(t, []types.NamespacedName{mdb1.NamespacedName()}, watcher.watched[watchedName]) // Ensure a single object can be watched for multiple reconciliations. - watcher.Watch(watchedName, mdb2.NamespacedName()) + watcher.Watch(ctx, watchedName, mdb2.NamespacedName()) assert.Len(t, watcher.watched, 1) assert.Equal(t, []types.NamespacedName{ mdb1.NamespacedName(), diff --git a/deploy/openshift/operator_openshift.yaml b/deploy/openshift/operator_openshift.yaml index ea5e0a6ac..b7011a1cc 100644 --- a/deploy/openshift/operator_openshift.yaml +++ b/deploy/openshift/operator_openshift.yaml @@ -47,16 +47,16 @@ spec: - name: OPERATOR_NAME value: mongodb-kubernetes-operator - name: AGENT_IMAGE - value: quay.io/mongodb/mongodb-agent:12.0.25.7724-1 + value: quay.io/mongodb/mongodb-agent-ubi:108.0.6.8796-1 - name: READINESS_PROBE_IMAGE - value: quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.17 + value: quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.23 - name: VERSION_UPGRADE_HOOK_IMAGE - value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.8 + value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.10 - name: MONGODB_IMAGE value: mongo - name: MONGODB_REPO_URL value: quay.io - image: quay.io/mongodb/mongodb-kubernetes-operator:0.8.3 + image: quay.io/mongodb/mongodb-kubernetes-operator:0.13.0 imagePullPolicy: Always name: mongodb-kubernetes-operator resources: diff --git a/docs/README.md b/docs/README.md index 2d49ef706..7475a0d10 100644 --- a/docs/README.md +++ b/docs/README.md @@ -6,5 +6,6 @@ - [MongoDB Community Kubernetes Operator Architecture](architecture.md) - [Install and Upgrade the Community Kubernetes Operator](install-upgrade.md) - [Deploy and Configure MongoDBCommunity Resources](deploy-configure.md) +- [Configure Logging of the MongoDB components](logging.md) - [Create Database Users](users.md) - [Secure MongoDBCommunity Resources](secure.md) diff --git a/docs/RELEASE_NOTES.md b/docs/RELEASE_NOTES.md index 88c2b3c5f..6109fac02 100644 --- a/docs/RELEASE_NOTES.md +++ b/docs/RELEASE_NOTES.md @@ -1,21 +1,18 @@ -# MongoDB Kubernetes Operator 0.8.3 +# MongoDB Kubernetes Operator 0.13.0 + +## Dependency updates + - Updated kubernetes dependencies to 1.30 + - Bumped Go dependency to 1.24 + - Updated packages `crypto`, `net` and `oauth2` to remediate multiple CVEs ## MongoDBCommunity Resource + - Added support for overriding the ReplicaSet ID ([#1656](https://github.com/mongodb/mongodb-kubernetes-operator/pull/1656)). + +## Improvements + - Refactored environment variable propagation ([#1676](https://github.com/mongodb/mongodb-kubernetes-operator/pull/1676)). + - Introduced a linter to limit inappropriate usage of environment variables within the codebase ([#1690](https://github.com/mongodb/mongodb-kubernetes-operator/pull/1690)). + +## Security & Dependency Updates + - **CVE Updates**: Updated packages `crypto`, `net` and `oauth2` to remediate multiple CVEs + - Upgraded to Go 1.24 and Kubernetes dependencies to 1.30.x . -- Changes - - Introduced support for X.509 authentication for client and agent - - `spec.security.authentication.modes` now supports value `X509` - - The agent authentication mode will default to the value in `spec.security.authentication.modes` if there is only one specified. - - Otherwise, agent authentication will need to be specified through `spec.security.authentication.agentMode`. - - When agent authentication is set to `X509`, the field `spec.security.authentication.agentCertificateSecretRef` can be set (default is `agent-certs`). - - The secret that `agentCertificateSecretRef` points to should contain a signed X.509 certificate (under the `tls.crt` key) and a private key (under `tls.key`) for the agent. - - X.509 users can be added the same way as before under `spec.users`. The `db` field must be set to `$external` for X.509 authentication. - - For these users, `scramCredentialsSecretName` and `passwordSecretRef` should **not** be set. - - Sample resource [yaml](config/samples/mongodb.com_v1_mongodbcommunity_x509.yaml) - - Sample agent certificate [yaml](config/samples/external_access/agent-certificate.yaml) - - Add support for configuring [logRotate](https://www.mongodb.com/docs/ops-manager/current/reference/cluster-configuration/#mongodb-instances) on the automation-agent. The settings can be found under `processes[n].logRotate.`. - - Additionally, [systemLog](https://www.mongodb.com/docs/manual/reference/configuration-options/#systemlog-options) can now be configured. In particular the settings: `path`, `destination` and `logAppend`. - - MongoDB 7.0.0 and onwards is not supported. Supporting it requires a newer Automation Agent version. Until a new version is available, the Operator will fail all deployments with this version. To ignore this error and force the Operator to reconcile these resources, use `IGNORE_MDB_7_ERROR` environment variable and set it to `true`. - - Introduced support for ARM64 architecture - - A manifest supporting both AMD64 and ARCH64 architectures is released for each version. - - `ubuntu` based images are deprecated, users should move to `ubi` images next release. \ No newline at end of file diff --git a/docs/build_operator_locally.md b/docs/build_operator_locally.md index d1219c654..33dfff340 100644 --- a/docs/build_operator_locally.md +++ b/docs/build_operator_locally.md @@ -40,7 +40,7 @@ git submodule update --init ``` -5. Build and deploy the operator: +5. Build and deploy the operator. Also add `IMG_BUILD_ARGS=--insecure` as described [here](contributing.md#deploying-the-operator) if necessary: ```sh # builds all required images and then deploys the operator diff --git a/docs/contributing.md b/docs/contributing.md index c3de85acc..139d11b71 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -58,10 +58,9 @@ to be able to run properly. Create a json file with the following content: "repo_url": "localhost:5000", "operator_image": "mongodb-kubernetes-operator", "e2e_image": "community-operator-e2e", - "version_upgrade_hook_image": "community-operator-version-upgrade-post-start-hook", - "agent_image_ubuntu": "mongodb-agent-dev", - "agent_image_ubi": "mongodb-agent-ubi-dev", - "readiness_probe_image": "mongodb-kubernetes-readiness", + "version_upgrade_hook_image": "mongodb-kubernetes-operator-version-upgrade-post-start-hook", + "agent_image": "mongodb-agent-ubi-dev", + "readiness_probe_image": "mongodb-kubernetes-readinessprobe", "s3_bucket": "" } ``` @@ -73,10 +72,8 @@ to be able to run properly. Create a json file with the following content: 3. `operator_image` will be used as the name of the operator deployment, and the name of the operator image when build. 4. `e2e_image` the name of e2e test image that will be built. 5. `version_upgrade_hook_image` the name of the version upgrade post start hook image. -6. `image_type` this can be either `ubi` or `ubuntu` and determines the distro of the images built. (currently only the agent image has multiple distros) -7. `agent_image_ubuntu` the name of the ubuntu agent image. -8. `agent_image_ubi` the name of the ubi agent image. -9. `s3_bucket` the S3 bucket that Dockerfiles will be pushed to as part of the release process. Note: this is only required when running the release tasks locally. +6. `agent_image` the name of the agent image. +7. `s3_bucket` the S3 bucket that Dockerfiles will be pushed to as part of the release process. Note: this is only required when running the release tasks locally. You can set the `MONGODB_COMMUNITY_CONFIG` environment variable to be the absolute path of this file. @@ -135,6 +132,14 @@ make operator-image deploy This will build and deploy the operator to namespace specified in your configuration file. +If you are using a local docker registry you should run the following command. +The additional `IMG_BUILD_ARGS=--insecure` variable will add the `--insecure` flag to the command creating the manifests. +This is necessary if your local registry is not secure. Read more about the flag on the [documentatio](https://docs.docker.com/reference/cli/docker/manifest/#working-with-insecure-registries) + +```sh +IMG_BUILD_ARGS=--insecure make operator-image deploy +``` + #### See the operator deployment ```sh @@ -143,7 +148,7 @@ kubectl get pods #### (Optional) Create a MongoDBCommunity Resource -Follow the steps outlined [here](./deploy-configure.md) to deploy some resource. +Follow the steps outlined [here](./deploy-configure.md) to deploy some resources. #### Cleanup To remove the operator and any created resources you can run @@ -152,7 +157,7 @@ To remove the operator and any created resources you can run make undeploy ``` -Alternatively, you can run the operator locally with +Alternatively, you can run the operator locally. Make sure you follow the steps outlined in [run-operator-locally.md](run-operator-locally.md) ```sh make run @@ -171,7 +176,8 @@ make test ### E2E Tests If this is the first time running E2E tests, you will need to ensure that you have built and pushed -all images required by the E2E tests. You can do this by running. +all images required by the E2E tests. You can do this by running the following command, +or with the additional `IMG_BUILD_ARGS=--insecure` described above. ```sh make all-images @@ -183,7 +189,7 @@ For subsequent tests you can use make e2e-k8s test= ``` -This will only re-build the e2e test image. +This will only re-build the e2e test image. Add `IMG_BUILD_ARGS=--insecure` if necessary We have built a simple mechanism to run E2E tests on your cluster using a runner that deploys a series of Kubernetes objects, runs them, and awaits for their @@ -202,7 +208,7 @@ replica_set_scale ... ``` -The tests should run individually using the runner like this: +The tests should run individually using the runner like this, or additionally with `IMG_BUILD_ARGS=--insecure`: ```sh make e2e-k8s test=replica_set diff --git a/docs/external_access.md b/docs/external_access.md index ce9226b78..40adb279c 100644 --- a/docs/external_access.md +++ b/docs/external_access.md @@ -1,4 +1,4 @@ -## Enabling External Access to MongoDB deployment +## Enable External Access to a MongoDB Deployment This guide assumes that the operator is installed and a MongoDB deployment is yet to be done but you have a chosen namespace that you are installing into. We will install cert-manager and then generate certificates and configure split-horizon to support internal and external DNS names for configuring external access to the replicaset. diff --git a/docs/how-to-release.md b/docs/how-to-release.md index 0d23ab6c2..f92412433 100644 --- a/docs/how-to-release.md +++ b/docs/how-to-release.md @@ -1,17 +1,25 @@ ## How to Release - * Prepare release PR: - * Pull the changes in the helm-charts sub module folder to get the latest main. - * `cd helm-charts && git pull origin main`. + * Pull the changes in the helm-charts submodule folder to get the latest main. + * `cd helm-charts` + * `git submodule update --init` - if submodule was not initialised before + * `git pull origin main` * Update any changing versions in [release.json](../release.json). + * `operator` - always when doing a release + * `version-upgrade-hook` - whenever we make changes in the [versionhook](../cmd/versionhook) files + * `readiness-probe` - whenever we make changes in the [readiness](../cmd/readiness) files + * `agent` - newest version available in `ops-manager` `conf-hosted.properties` file under `automation.agent.version` + * `agent-tools-version` - newest version available in `ops-manager` `conf-hosted.properties` file under `mongotools.version` * Ensure that [the release notes](./RELEASE_NOTES.md) are up to date for this release. + * all merged PRs have a covered entry in the release notes. For example, you can use `git log v0.11.0..HEAD --reverse --oneline` to get the list of commits after previous release * Run `python scripts/ci/update_release.py` to update the relevant yaml manifests. - * Copy `CRD`s to Helm Chart - - `cp config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml helm-charts/charts/community-operator-crds/templates/mongodbcommunity.mongodb.com_mongodbcommunity.yaml` - - commit changes to the [helm-charts submodule](https://github.com/mongodb/helm-charts) and create a PR against it ([similar to this one](https://github.com/mongodb/helm-charts/pull/163)). - - do not merge helm-charts PR until release PR is merged and the images are pushed to quay.io. - - do not commit the submodule change in the release pr of the community repository. + * **use venv and then `python3 -m pip install -r requirements.txt`** + * Copy ``CRD`s`` to Helm Chart + * `cp config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml helm-charts/charts/community-operator-crds/templates/mongodbcommunity.mongodb.com_mongodbcommunity.yaml` + * commit changes to the [helm-charts submodule](https://github.com/mongodb/helm-charts) and create a PR against it ([similar to this one](https://github.com/mongodb/helm-charts/pull/163)). + * do not merge helm-charts PR until release PR is merged and the images are pushed to quay.io. + * do not commit the submodule change in the release pr of the community repository. * Commit all changes (except for the submodule change) * Create a PR with the title `Release MongoDB Kubernetes Operator v` (the title must match this pattern). * Wait for the tests to pass and merge the PR. diff --git a/docs/install-upgrade.md b/docs/install-upgrade.md index 1b0972edb..3deb68a06 100644 --- a/docs/install-upgrade.md +++ b/docs/install-upgrade.md @@ -76,8 +76,8 @@ Use one of the following procedures to install the Operator using Helm: ##### Install in the Default Namespace using Helm -To install the Custom Resource Definitions and the Community Operator in -the `default` namespace using Helm, run the install command from the +To install the Custom Resource Definitions and the Community Operator in +the `default` namespace using Helm, run the install command from the terminal: ``` helm install community-operator mongodb/community-operator @@ -91,9 +91,9 @@ include `--set community-operator-crds.enabled=false` when installing the Operat ##### Install in a Different Namespace using Helm -To install the Custom Resource Definitions and the Community Operator in -a different namespace using Helm, run the install -command with the `--namespace` flag from the terminal. Include the `--create-namespace` +To install the Custom Resource Definitions and the Community Operator in +a different namespace using Helm, run the install +command with the `--namespace` flag from the terminal. Include the `--create-namespace` flag if you are creating a new namespace. ``` helm install community-operator mongodb/community-operator --namespace mongodb [--create-namespace] @@ -159,6 +159,10 @@ To configure the Operator to watch resources in other namespaces: kubectl apply -k config/rbac --namespace ``` + *Note: If you need the operator to have permission over multiple namespaces, for ex: when configuring the operator to have the `connectionStringSecret` in a different `namespace`, make sure + to apply the `RBAC` in all the relevant namespaces.* + + 5. [Install the operator](#procedure-using-kubectl). ##### Configure the MongoDB Docker Image or Container Registry @@ -170,11 +174,11 @@ for MongoDB Docker images: 1. In the Operator [resource definition](../config/manager/manager.yaml), set the `MONGODB_IMAGE` and `MONGODB_REPO_URL` environment variables: - **NOTE:** Use the official - [MongoDB Community Server images](https://hub.docker.com/r/mongodb/mongodb-community-server). + **NOTE:** Use the official + [MongoDB Community Server images](https://hub.docker.com/r/mongodb/mongodb-community-server). Official images provide the following advantages: - - They are rebuilt daily for the latest upstream + - They are rebuilt daily for the latest upstream vulnerability fixes. - MongoDB tests, maintains, and supports them. @@ -290,7 +294,7 @@ Make sure you run commands in the correct namespace. ``` kubectl delete pod -0 ``` - d. You're done. Now Kubernetes will create the pod fresh, causing the migration to run and then the pod to start up. Then kubernetes will proceed creating the next pod until it reaches the number specified in your cr. + d. You're done. Now Kubernetes will create the pod fresh, causing the migration to run and then the pod to start up. Then kubernetes will proceed creating the next pod until it reaches the number specified in your cr. ## Rotating TLS certificate for the MongoDB deployment @@ -306,4 +310,4 @@ kubectl apply -f - *`secret_name` is what you've specified under `Spec.Security.TLS.CertificateKeySecret.Name`*. If you're using a tool like cert-manager, you can follow [these instructions](https://cert-manager.io/docs/usage/certificate/#renewal) to rotate the certificate. -The operator should would watch the secret change and re-trigger a reconcile process. +The operator should would watch the secret change and re-trigger a reconcile process. diff --git a/docs/logging.md b/docs/logging.md new file mode 100644 index 000000000..021ae48ed --- /dev/null +++ b/docs/logging.md @@ -0,0 +1,33 @@ +# Configure Logging in MongoDB Community + +This section describes the components which are logging either to a file or stdout, +how to configure them and what their defaults are. + +## MongoDB Processes +### Configuration +The exposed CRD options can be seen [in the crd yaml](https://github.com/mongodb/mongodb-kubernetes-operator/blob/74d13f189566574b862e5670b366b61ec5b65923/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml#L105-L117). +Additionally, more information regarding configuring systemLog can be found [in the official documentation of systemLog](https://www.mongodb.com/docs/manual/reference/configuration-options/#core-options)]. +`spec.agent.systemLog.destination` configures the logging destination of the mongod process. +### Default Values +By default, MongoDB sends all log output to standard output. + +## MongoDB Agent +### Configuration +`spec.agent.logFile` can be used to configure the output file of the mongoDB agent logging. +The agent will log to standard output with the following setting: `/dev/stdout`. +### Default Values +By default, the MongoDB agent logs to `/var/log/mongodb-mms-automation/automation-agent.log` + +## ReadinessProbe +### Configuration & Default Values +The readinessProbe can be configured via Environment variables. +Below is a table with each environment variable, its explanation and its default value. + +| Environment Variable | Explanation | Default Value | +|---------------------------------|-------------------------------------------------------------------------|-----------------------------------------------| +| READINESS_PROBE_LOGGER_BACKUPS | maximum number of old log files to retain | 5 | +| READINESS_PROBE_LOGGER_MAX_SIZE | maximum size in megabytes | 5 | +| READINESS_PROBE_LOGGER_MAX_AGE | maximum number of days to retain old log files | none | +| READINESS_PROBE_LOGGER_COMPRESS | if the rotated log files should be compressed | false | +| MDB_WITH_AGENT_FILE_LOGGING | whether we should also log to stdout (which shows in kubectl describe) | true | +| LOG_FILE_PATH | path of the logfile of the readinessProbe. | /var/log/mongodb-mms-automation/readiness.log | \ No newline at end of file diff --git a/docs/run-operator-locally.md b/docs/run-operator-locally.md index 6b78a883e..c742f54b7 100644 --- a/docs/run-operator-locally.md +++ b/docs/run-operator-locally.md @@ -1,6 +1,6 @@ # Quick start for building and running the operator locally -This document contains a quickstart guide to build and running (+debugging) the operator locally. +This document contains a quickstart guide to build and running and debugging the operator locally. Being able to run and build the binary locally can help with faster feedback-cycles. ## Prerequisites @@ -12,22 +12,29 @@ Being able to run and build the binary locally can help with faster feedback-cyc - `KUBECONFIG` environment variable pointing at a file - **Note**: either of these are necessary to be able to run the operator locally - Have a folder `.community-operator-dev` +- *Optional - if you want to export the environment variables, you can run the following command*: `source .community-operator-dev/local-test.export.env`. ( These environment variables are generated with the `make generate-env-file`) ## Goals - Run the operator locally as a binary (optionally in debug mode) in command line or in an IDE - Run e2e tests locally ## Running The Operator locally -1. Use the dedicated make target which exports the needed environment variables and builds & runs the operator binary - -```sh -make run -``` +1. Use the dedicated make target which exports the needed environment variables and builds & runs the operator binary. + + Before doing that you need to add 2 more fields to the `config.json` file found in [contributing.md](contributing.md), because the python script looks for them in the file: + - `mdb_local_operator`: needs to be set to `true`, to allow for the operator to be run locally + - `kubeconfig`: needs to be set to the path of the `kubeconfig` configuration file, for example `$HOME/.kube/config` + + Then you can run the command: + + ```sh + make run + ``` 2. For debugging one can use the following make target, which uses [dlv](https://github.com/go-delve/delve): -```sh -make debug -``` + ```sh + make debug + ``` ## Running e2e tests with the local operator - Our [e2e tests](../test/e2e), contains sub-steps that will install the following helm-chart: [operator.yaml](../helm-charts/charts/community-operator/templates/operator.yaml) diff --git a/docs/secure.md b/docs/secure.md index f2b7d877a..e1a1e8631 100644 --- a/docs/secure.md +++ b/docs/secure.md @@ -36,20 +36,21 @@ To secure connections to MongoDBCommunity resources with TLS using `cert-manager helm repo update ``` -1. Install `cert-manager`: +2. Install `cert-manager`: ``` - helm install cert-manager jetstack/cert-manager --namespace cert-manager \ - --create-namespace --set installCRDs=true + helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --set crds.enabled=true ``` -1. Create a TLS-secured MongoDBCommunity resource: +3. Create a TLS-secured MongoDBCommunity resource: + + This assumes you already have the operator installed in namespace `` ``` helm upgrade --install community-operator mongodb/community-operator \ - --namespace mongodb --set resource.tls.useCertManager=true \ + --namespace --set resource.tls.useCertManager=true \ --set createResource=true --set resource.tls.enabled=true \ - --set namespace=mongodb --create-namespace + --set namespace= ``` This creates a resource secured with TLS and generates the necessary @@ -72,21 +73,21 @@ To secure connections to MongoDBCommunity resources with TLS using `cert-manager 1. Test your connection over TLS by - - Connecting to a `mongod` container using `kubectl`: + - Connecting to a `mongod` container inside a pod using `kubectl`: ``` - kubectl exec -it mongodb-replica-set -c mongod -- bash + kubectl exec -it -c mongod -- bash ``` - Where `mongodb-replica-set` is the name of your MongoDBCommunity resource + Where `mongodb-replica-set-pod` is the name of a pod from your MongoDBCommunity resource - Then, use `mongosh` to connect over TLS: + For how to get the connection string look at [Deploy A Replica Set](deploy-configure.md#deploy-a-replica-set) ``` - mongosh --tls --tlsCAFile /var/lib/tls/ca/ca.crt --tlsCertificateKeyFile \ - /var/lib/tls/server/*.pem \ - --host .-svc..svc.cluster.local + mongosh "" --tls --tlsCAFile /var/lib/tls/ca/ca.crt --tlsCertificateKeyFile /var/lib/tls/server/*.pem ``` Where `mongodb-replica-set` is the name of your MongoDBCommunity - resource and `namespace` is the namespace of your deployment. \ No newline at end of file + resource, `namespace` is the namespace of your deployment + and `connection-string` is a connection string for your `-svc` service. \ No newline at end of file diff --git a/docs/users.md b/docs/users.md index a1980e4fb..96a44570a 100644 --- a/docs/users.md +++ b/docs/users.md @@ -84,6 +84,6 @@ You cannot disable SCRAM authentication. - To authenticate to your MongoDBCommunity resource, run the following command: ``` - mongo "mongodb://..svc.cluster.local:27017/?replicaSet=" --username --password --authenticationDatabase + mongosh "mongodb://-svc..svc.cluster.local:27017/?replicaSet=" --username --password --authenticationDatabase ``` - To change a user's password, create and apply a new secret resource definition with a `metadata.name` that is the same as the name specified in `passwordSecretRef.name` of the MongoDB CRD. The Operator will automatically regenerate credentials. diff --git a/docs/x509-auth.md b/docs/x509-auth.md new file mode 100644 index 000000000..61a53a93c --- /dev/null +++ b/docs/x509-auth.md @@ -0,0 +1,129 @@ +# Enable X.509 Authentication + +You can use Helm or `kubectl` to enable X.509 authentication for the +MongoDB Agent and client. + +## Prerequisites + +1. Add the `cert-manager` repository to your `helm` repository list and + ensure it's up to date: + + ``` + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Install `cert-manager`: + + ``` + helm install cert-manager jetstack/cert-manager --namespace cert-manager \ + --create-namespace --set installCRDs=true + ``` + +## Use Helm to Enable X.509 Authentication + +You can use Helm to install and deploy the MongoDB Community Kubernetes +Operator with X.509 Authentication enabled for the MongoDB Agent and +client. To learn more, see [Install the Operator using Helm](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/docs/install-upgrade.md#install-the-operator-using-helm). + +1. To deploy the MongoDB Community Kubernetes Operator, copy and paste + the following command and replace the `` variable with the + namespace: + + **Note:** + + The following command deploys a sample resource with X.509 enabled + for both the MongoDB Agent and client authentication. It also creates + a sample X.509 user and the certificate that the user can use to + authenticate. + + ``` + helm upgrade --install community-operator mongodb/community-operator \ + --namespace --set namespace= --create-namespace \ + --set resource.tls.useCertManager=true --set resource.tls.enabled=true \ + --set resource.tls.useX509=true --set resource.tls.sampleX509User=true \ + --set createResource=true + ``` + +## Use `kubectl` to Enable X.509 Authentication + +You can use Helm to install and deploy the MongoDB Community Kubernetes +Operator with X.509 Authentication enabled for the MongoDB Agent and +client. + +1. To install the MongoDB Community Kubernetes Operator, see + [Install the Operator using kubectl](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/docs/install-upgrade.md#install-the-operator-using-kubectl). + +1. To create a CA, ConfigMap, secrets, issuer, and certificate, see + [Enable External Access to a MongoDB Deployment](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/docs/external_access.md). + +1. Create a YAML file for the MongoDB Agent certificate. For an example, + see [agent-certificate.yaml](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/config/samples/external_access/agent-certificate.yaml). + + **Note:** + + - For the `spec.issuerRef.name` parameter, specify the + `cert-manager` issuer that you created previously. + - For the `spec.secretName` parameter, specify the same + value as the `spec.security.authentication.agentCertificateSecretRef` + parameter in your resource. This secret should contain a signed + X.509 certificate and a private key for the MongoDB agent. + +1. To apply the file, copy and paste the following command and replace + the `` variable with the name of your MongoDB Agent + certificate and the `` variable with the namespace: + + ``` + kubectl apply -f .yaml --namespace + ``` + +1. Create a YAML file for your resource. For an example, see + [mongodb.com_v1_mongodbcommunity_x509.yaml](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/config/samples/mongodb.com_v1_mongodbcommunity_x509.yaml). + + **Note:** + + - For the `spec.security.tls.certificateKeySecretRef.name` parameter, + specify a reference to the secret that contains the private key and + certificate to use for TLS. The operator expects the PEM encoded key + and certificate available at "tls.key" and "tls.crt". Use the same + format used for the standard "kubernetes.io/tls" Secret type, but no + specific type is required. Alternatively, you can provide + an entry called "tls.pem" that contains the concatenation of the + certificate and key. If all of "tls.pem", "tls.crt" and "tls.key" + are present, the "tls.pem" entry needs to equal the concatenation + of "tls.crt" and "tls.key". + + - For the `spec.security.tls.caConfigMapRef.name` parameter, specify + the ConfigMap that you created previously. + + - For the `spec.authentication.modes` parameter, specify `X509`. + + - If you have multiple authentication modes, specify the + `spec.authentication.agentMode` parameter. + + - The `spec.authentication.agentCertificateSecretRef` parameter + defaults to `agent-certs`. + + - For the `spec.users.db` parameter, specify `$external`. + + - Do not set the `spec.users.scramCredentialsSecretName` parameter + and the `spec.users.passwordSecretRef` parameters. + +1. To apply the file, copy and paste the following command and replace + the `` variable with your resource and the `` + variable with the namespace: + + ``` + kubectl apply -f .yaml --namespace + ``` + +1. Create a YAML file for the client certificate. For an example, see + [cert-x509.yaml](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/config/samples/external_access/cert-x509.yaml). + +1. To apply the file, copy and paste the following command and replace + the `` variable with the name of your client + certificate and the `` variable with the namespace: + + ``` + kubectl apply -f .yaml --namespace + ``` diff --git a/go.mod b/go.mod index ec3632120..35b8ccebc 100644 --- a/go.mod +++ b/go.mod @@ -1,90 +1,90 @@ module github.com/mongodb/mongodb-kubernetes-operator -go 1.21 +go 1.24.0 require ( github.com/blang/semver v3.5.1+incompatible - github.com/go-logr/logr v1.2.4 + github.com/go-logr/logr v1.4.2 github.com/hashicorp/go-multierror v1.1.1 github.com/imdario/mergo v0.3.15 - github.com/spf13/cast v1.5.1 - github.com/stretchr/objx v0.5.0 - github.com/stretchr/testify v1.8.4 + github.com/spf13/cast v1.7.1 + github.com/stretchr/objx v0.5.2 + github.com/stretchr/testify v1.10.0 github.com/xdg/stringprep v1.0.3 - go.mongodb.org/mongo-driver v1.12.0 - go.uber.org/zap v1.24.0 + go.mongodb.org/mongo-driver v1.16.0 + go.uber.org/zap v1.27.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 - k8s.io/api v0.25.12 - k8s.io/apimachinery v0.25.12 - k8s.io/client-go v0.25.12 - sigs.k8s.io/controller-runtime v0.12.3 - sigs.k8s.io/yaml v1.3.0 + k8s.io/api v0.30.10 + k8s.io/apimachinery v0.30.10 + k8s.io/client-go v0.30.10 + k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 + sigs.k8s.io/controller-runtime v0.18.7 + sigs.k8s.io/yaml v1.4.0 ) +require google.golang.org/protobuf v1.33.0 // indirect + require ( - cloud.google.com/go v0.97.0 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.8.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.3 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.9 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.13.6 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect + github.com/montanaflynn/stats v0.7.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.12.1 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/prometheus/client_golang v1.18.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.11.0 // indirect - golang.org/x/net v0.13.0 // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.37.0 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.23.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.24.14 // indirect - k8s.io/component-base v0.24.14 // indirect - k8s.io/klog/v2 v2.70.1 // indirect - k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect - k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + k8s.io/apiextensions-apiserver v0.30.1 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 4ac7044d5..4e2b54c85 100644 --- a/go.sum +++ b/go.sum @@ -1,336 +1,131 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= -github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= @@ -341,480 +136,112 @@ github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= -go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.mongodb.org/mongo-driver v1.16.0 h1:tpRsfBJMROVHKpdGyc1BBEzzjDUWjItxbVSZ8Ls4BQ4= +go.mongodb.org/mongo-driver v1.16.0/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.25.12 h1:vMyRHX3SASysor6zk81DsYXbkVdvzQEIL4gA+6+j6mQ= -k8s.io/api v0.25.12/go.mod h1:pAGhdr4HvJlOa1g26QpNeiQLNnzc6nwU92MQSqY2pBk= -k8s.io/apiextensions-apiserver v0.24.14 h1:ktxuWE03e7yXj472uiJa009QQbnV+zLlJqzLQU/9OSM= -k8s.io/apiextensions-apiserver v0.24.14/go.mod h1:DwzZPn3zq6ooevBGEmEwA4yOMyfjmPtUYkU8Uc/o0YY= -k8s.io/apimachinery v0.25.12 h1:xLVMeHrUfO4Eq2CK60YS+ElVYv0AUNSGVYdHKZFBHRE= -k8s.io/apimachinery v0.25.12/go.mod h1:IFwbcNi3gKkfDhuy0VYu3+BwbxbiIov3p6FR8ge1Epc= -k8s.io/client-go v0.25.12 h1:LSwQNUqm368OjEoITifwM8+P/B+7wxvZ+yPKbFanVWI= -k8s.io/client-go v0.25.12/go.mod h1:WD2cp9N7NLyz2jMoq49vC6+8HKkjhqaDkk93l3eJO0M= -k8s.io/component-base v0.24.14 h1:wKMSPRV1Ud8FByaOA6sE63iSEoOn299PjXAQel+6dEg= -k8s.io/component-base v0.24.14/go.mod h1:fvCLkVgILslt0LrXaPRyZal9A+uxs8FdMZb33IkSenA= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= -sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +k8s.io/api v0.30.10 h1:2YvzRF/BELgCvxbQqFKaan5hnj2+y7JOuqu2WpVk3gg= +k8s.io/api v0.30.10/go.mod h1:Hyz3ZuK7jVLJBUFvwzDSGwxHuDdsrGs5RzF16wfHIn4= +k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= +k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= +k8s.io/apimachinery v0.30.10 h1:UflKuJeSSArttm05wjYP0GwpTlvjnMbDKFn6F7rKkKU= +k8s.io/apimachinery v0.30.10/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.10 h1:C0oWM82QMvosIl/IdJhWfTUb7rIxM52rNSutFBknAVY= +k8s.io/client-go v0.30.10/go.mod h1:OfTvt0yuo8VpMViOsgvYQb+tMJQLNWVBqXWkzdFXSq4= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.18.7 h1:WDnx8LTRY8Fn1j/7B+S/R9MeDjWNAzpDBoaSvMSrQME= +sigs.k8s.io/controller-runtime v0.18.7/go.mod h1:L9r3fUZhID7Q9eK9mseNskpaTg2n11f/tlb8odyzJ4Y= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/helm-charts b/helm-charts index fee420b69..c6b6488a2 160000 --- a/helm-charts +++ b/helm-charts @@ -1 +1 @@ -Subproject commit fee420b69f207199e79976844998eef3b3bb204f +Subproject commit c6b6488a2a84cb806eadac0e286b6060914082d5 diff --git a/inventories/e2e-inventory.yaml b/inventories/e2e-inventory.yaml index 8b65252d2..c2247dff4 100644 --- a/inventories/e2e-inventory.yaml +++ b/inventories/e2e-inventory.yaml @@ -1,14 +1,15 @@ vars: registry: + architecture: amd64 images: - - name: e2e-arm64 + - name: e2e vars: context: . template_context: scripts/dev/templates inputs: - - e2e_image - platform: linux/arm64 + - image + platform: linux/$(inputs.params.architecture) stages: - name: e2e-template task_type: dockerfile_template @@ -30,41 +31,7 @@ images: quay.expires-after: 48h output: - - registry: $(inputs.params.registry)/$(inputs.params.e2e_image) - tag: $(inputs.params.version_id)-arm64 - - registry: $(inputs.params.registry)/$(inputs.params.e2e_image) - tag: latest-arm64 - - - name: e2e-amd64 - vars: - context: . - template_context: scripts/dev/templates - inputs: - - e2e_image - platform: linux/amd64 - stages: - - name: e2e-template - task_type: dockerfile_template - distro: e2e - - inputs: - - builder - - base_image - - output: - - dockerfile: scripts/dev/templates/Dockerfile.ubi-$(inputs.params.version_id) - - - name: e2e-build - task_type: docker_build - - dockerfile: scripts/dev/templates/Dockerfile.ubi-$(inputs.params.version_id) - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/$(inputs.params.e2e_image) - tag: $(inputs.params.version_id)-amd64 - - registry: $(inputs.params.registry)/$(inputs.params.e2e_image) - tag: latest-amd64 - + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.version_id)-$(inputs.params.architecture) + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: latest-$(inputs.params.architecture) diff --git a/inventories/operator-inventory.yaml b/inventories/operator-inventory.yaml index 3a9a440c2..ab08796a8 100644 --- a/inventories/operator-inventory.yaml +++ b/inventories/operator-inventory.yaml @@ -1,17 +1,18 @@ vars: registry: + architecture: amd64 images: - - name: operator-ubi-amd64 + - name: operator vars: context: . template_context: scripts/dev/templates/operator inputs: - - operator_image - - operator_image_dev + - image + - image_dev - platform: linux/amd64 + platform: linux/$(inputs.params.architecture) stages: # @@ -29,8 +30,8 @@ images: quay.expires-after: 48h output: - - registry: $(inputs.params.registry)/$(inputs.params.operator_image_dev) - tag: $(inputs.params.version_id)-context-amd64 + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-context-$(inputs.params.architecture) - name: operator-template-dev task_type: dockerfile_template @@ -51,16 +52,16 @@ images: - version_id buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.operator_image_dev):$(inputs.params.version_id)-context-amd64 + imagebase: $(inputs.params.registry)/$(inputs.params.image_dev):$(inputs.params.version_id)-context-$(inputs.params.architecture) labels: quay.expires-after: 48h output: - - registry: $(inputs.params.registry)/$(inputs.params.operator_image_dev) - tag: $(inputs.params.version_id)-amd64 - - registry: $(inputs.params.registry)/$(inputs.params.operator_image_dev) - tag: latest-amd64 + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-$(inputs.params.architecture) + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: latest-$(inputs.params.architecture) # # Release build stages @@ -82,8 +83,8 @@ images: builder_image: $(inputs.params.builder_image) output: - - registry: $(inputs.params.registry)/$(inputs.params.operator_image) - tag: $(inputs.params.release_version)-context-amd64 + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-context-$(inputs.params.architecture) - name: operator-template-release task_type: dockerfile_template @@ -107,125 +108,11 @@ images: dockerfile: scripts/dev/templates/operator/Dockerfile.operator-$(inputs.params.release_version) buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.operator_image):$(inputs.params.release_version)-context-amd64 + imagebase: $(inputs.params.registry)/$(inputs.params.image):$(inputs.params.release_version)-context-$(inputs.params.architecture) labels: quay.expires-after: Never output: - - registry: $(inputs.params.registry)/$(inputs.params.operator_image) - tag: $(inputs.params.release_version)-amd64 - - - name: operator-ubi-arm64 - vars: - context: . - template_context: scripts/dev/templates/operator - - inputs: - - operator_image - - operator_image_dev - - platform: linux/arm64 - - stages: - # - # Dev build stages - # - - name: operator-builder-dev - task_type: docker_build - tags: [ "ubi" ] - dockerfile: scripts/dev/templates/operator/Dockerfile.builder - - buildargs: - builder_image: $(inputs.params.builder_image) - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/$(inputs.params.operator_image_dev) - tag: $(inputs.params.version_id)-context-arm64 - - - name: operator-template-dev - task_type: dockerfile_template - tags: [ "ubi" ] - template_file_extension: operator - inputs: - - base_image - - output: - - dockerfile: scripts/dev/templates/operator/Dockerfile.operator-$(inputs.params.version_id) - - - name: operator-build-dev - task_type: docker_build - tags: [ "ubi" ] - dockerfile: scripts/dev/templates/operator/Dockerfile.operator-$(inputs.params.version_id) - - inputs: - - version_id - - buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.operator_image_dev):$(inputs.params.version_id)-context-arm64 - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/$(inputs.params.operator_image_dev) - tag: $(inputs.params.version_id)-arm64 - - registry: $(inputs.params.registry)/$(inputs.params.operator_image_dev) - tag: latest-arm64 - - # - # Release build stages - # - - name: operator-builder-release - task_type: docker_build - tags: [ "ubi", "release" ] - - inputs: - - builder_image - - release_version - - dockerfile: scripts/dev/templates/operator/Dockerfile.builder - - labels: - quay.expires-after: Never - - buildargs: - builder_image: $(inputs.params.builder_image) - - output: - - registry: $(inputs.params.registry)/$(inputs.params.operator_image) - tag: $(inputs.params.release_version)-context-arm64 - - - name: operator-template-release - task_type: dockerfile_template - tags: [ "ubi", "release" ] - template_file_extension: operator - inputs: - - base_image - - release_version - - output: - - dockerfile: scripts/dev/templates/operator/Dockerfile.operator-$(inputs.params.release_version) - - dockerfile: $(inputs.params.s3_bucket)/mongodb-kubernetes-operator/$(inputs.params.release_version)/ubi/Dockerfile - - - name: operator-build-release - task_type: docker_build - tags: [ "ubi", "release" ] - - inputs: - - release_version - - dockerfile: scripts/dev/templates/operator/Dockerfile.operator-$(inputs.params.release_version) - - buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.operator_image):$(inputs.params.release_version)-context-arm64 - - labels: - quay.expires-after: Never - - output: - - registry: $(inputs.params.registry)/$(inputs.params.operator_image) - tag: $(inputs.params.release_version)-arm64 + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-$(inputs.params.architecture) diff --git a/inventory.yaml b/inventory.yaml index d442b5fae..e2a37214c 100644 --- a/inventory.yaml +++ b/inventory.yaml @@ -1,241 +1,39 @@ vars: registry: + # Default value but overwritten in pipeline.py + architecture: amd64 images: - - name: agent-ubuntu - vars: - context: . - template_context: scripts/dev/templates/agent - - inputs: - - agent_version - - tools_version - - agent_image - - agent_image_dev - - platform: linux/amd64 - stages: - - name: agent-ubuntu-context - task_type: docker_build - dockerfile: scripts/dev/templates/agent/Dockerfile.builder - tags: [ "ubuntu" ] - buildargs: - agent_version: $(inputs.params.agent_version) - tools_version: $(inputs.params.tools_version) - agent_distro: linux_x86_64 - tools_distro: ubuntu1604-x86_64 - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/$(inputs.params.agent_image_dev) - tag: $(inputs.params.version_id)-context - - - name: agent-template-ubuntu - task_type: dockerfile_template - tags: [ "ubuntu" ] - distro: ubuntu - - output: - - dockerfile: scripts/dev/templates/agent/Dockerfile.ubuntu-$(inputs.params.version_id) - - - name: agent-ubuntu-build - task_type: docker_build - tags: [ "ubuntu" ] - - dockerfile: scripts/dev/templates/agent/Dockerfile.ubuntu-$(inputs.params.version_id) - - buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.agent_image_dev):$(inputs.params.version_id)-context - agent_version: $(inputs.params.agent_version) - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/$(inputs.params.agent_image_dev) - tag: $(inputs.params.version_id) - - registry: $(inputs.params.registry)/$(inputs.params.agent_image_dev) - tag: latest - - - name: agent-template-ubuntu-s3 - task_type: dockerfile_template - tags: [ "ubuntu", "release" ] - distro: ubuntu - - inputs: - - release_version - - s3_bucket - - output: - - dockerfile: $(inputs.params.s3_bucket)/mongodb-agent/$(inputs.params.release_version)/ubuntu/Dockerfile - - - name: agent-context-ubuntu-release - task_type: docker_build - dockerfile: scripts/dev/templates/agent/Dockerfile.builder - tags: [ "ubuntu", "release" ] - buildargs: - agent_version: $(inputs.params.agent_version) - tools_version: $(inputs.params.tools_version) - agent_distro: linux_x86_64 - tools_distro: ubuntu1604-x86_64 - - labels: - quay.expires-after: Never - - output: - - registry: $(inputs.params.registry)/$(inputs.params.agent_image) - tag: $(inputs.params.agent_version)-context - - - name: agent-ubuntu-release - task_type: docker_build - tags: [ "ubuntu", "release" ] - distro: ubuntu - - dockerfile: scripts/dev/templates/agent/Dockerfile.ubuntu-$(inputs.params.version_id) - - buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.agent_image):$(inputs.params.agent_version)-context - agent_version: $(inputs.params.agent_version) - - labels: - quay.expires-after: Never - - output: - - registry: $(inputs.params.registry)/$(inputs.params.agent_image) - tag: $(inputs.params.agent_version) - - - name: agent-ubi-amd64 - vars: - context: . - template_context: scripts/dev/templates/agent - - inputs: - - agent_version - - tools_version - - agent_image - - agent_image_dev - - platform: linux/amd64 - stages: - - name: agent-ubi-context - task_type: docker_build - dockerfile: scripts/dev/templates/agent/Dockerfile.builder - tags: [ "ubi" ] - buildargs: - agent_version: $(inputs.params.agent_version) - tools_version: $(inputs.params.tools_version) - agent_distro: rhel7_x86_64 - tools_distro: rhel70-x86_64 - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/$(inputs.params.agent_image_dev) - tag: $(inputs.params.version_id)-context-amd64 - - - name: agent-template-ubi - task_type: dockerfile_template - distro: ubi - tags: [ "ubi" ] - - output: - - dockerfile: scripts/dev/templates/agent/Dockerfile.ubi-$(inputs.params.version_id) - - - name: agent-ubi-build - task_type: docker_build - tags: [ "ubi" ] - - dockerfile: scripts/dev/templates/agent/Dockerfile.ubi-$(inputs.params.version_id) - - buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.agent_image_dev):$(inputs.params.version_id)-context-amd64 - agent_version: $(inputs.params.agent_version) - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/$(inputs.params.agent_image_dev) - tag: $(inputs.params.version_id)-amd64 - - registry: $(inputs.params.registry)/$(inputs.params.agent_image_dev) - tag: latest-amd64 - - - name: agent-template-ubi-s3 - task_type: dockerfile_template - tags: [ "ubi", "release" ] - distro: ubi - - inputs: - - release_version - - output: - - dockerfile: $(inputs.params.s3_bucket)/mongodb-agent/$(inputs.params.release_version)/ubi/Dockerfile - - - name: agent-context-ubi-release - task_type: docker_build - dockerfile: scripts/dev/templates/agent/Dockerfile.builder - tags: [ "ubi", "release" ] - buildargs: - agent_version: $(inputs.params.agent_version) - tools_version: $(inputs.params.tools_version) - agent_distro: rhel7_x86_64 - tools_distro: rhel70-x86_64 - - labels: - quay.expires-after: Never - - output: - - registry: $(inputs.params.registry)/$(inputs.params.agent_image) - tag: $(inputs.params.agent_version)-context-amd64 - - - name: agent-ubi-release - task_type: docker_build - tags: [ "ubi", "release" ] - dockerfile: scripts/dev/templates/agent/Dockerfile.ubi-$(inputs.params.version_id) - buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.agent_image):$(inputs.params.agent_version)-context-amd64 - agent_version: $(inputs.params.agent_version) - - labels: - quay.expires-after: Never - - output: - - registry: $(inputs.params.registry)/$(inputs.params.agent_image) - tag: $(inputs.params.agent_version)-amd64 - - - name: agent-ubi-arm64 + - name: agent vars: context: . template_context: scripts/dev/templates/agent inputs: - - agent_version + - release_version - tools_version - - agent_image - - agent_image_dev + - image + - image_dev - platform: linux/arm64 + platform: linux/$(inputs.params.architecture) stages: - - name: agent-ubi-context + - name: mongodb-agent-context task_type: docker_build dockerfile: scripts/dev/templates/agent/Dockerfile.builder tags: [ "ubi" ] buildargs: - agent_version: $(inputs.params.agent_version) + agent_version: $(inputs.params.release_version) tools_version: $(inputs.params.tools_version) - agent_distro: amzn2_aarch64 - tools_distro: rhel82-aarch64 + agent_distro: $(inputs.params.agent_distro) + tools_distro: $(inputs.params.tools_distro) labels: quay.expires-after: 48h output: - - registry: $(inputs.params.registry)/$(inputs.params.agent_image_dev) - tag: $(inputs.params.version_id)-context-arm64 + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-context-$(inputs.params.architecture) - name: agent-template-ubi task_type: dockerfile_template @@ -245,24 +43,24 @@ images: output: - dockerfile: scripts/dev/templates/agent/Dockerfile.ubi-$(inputs.params.version_id) - - name: agent-ubi-build + - name: mongodb-agent-build task_type: docker_build tags: [ "ubi" ] dockerfile: scripts/dev/templates/agent/Dockerfile.ubi-$(inputs.params.version_id) buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.agent_image_dev):$(inputs.params.version_id)-context-arm64 - agent_version: $(inputs.params.agent_version) + imagebase: $(inputs.params.registry)/$(inputs.params.image_dev):$(inputs.params.version_id)-context-$(inputs.params.architecture) + agent_version: $(inputs.params.release_version) labels: quay.expires-after: 48h output: - - registry: $(inputs.params.registry)/$(inputs.params.agent_image_dev) - tag: $(inputs.params.version_id)-arm64 - - registry: $(inputs.params.registry)/$(inputs.params.agent_image_dev) - tag: latest-arm64 + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-$(inputs.params.architecture) + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: latest-$(inputs.params.architecture) - name: agent-template-ubi-s3 task_type: dockerfile_template @@ -280,44 +78,44 @@ images: dockerfile: scripts/dev/templates/agent/Dockerfile.builder tags: [ "ubi", "release" ] buildargs: - agent_version: $(inputs.params.agent_version) + agent_version: $(inputs.params.release_version) tools_version: $(inputs.params.tools_version) - agent_distro: amzn2_aarch64 - tools_distro: rhel82-aarch64 + agent_distro: $(inputs.params.agent_distro) + tools_distro: $(inputs.params.tools_distro) labels: quay.expires-after: Never output: - - registry: $(inputs.params.registry)/$(inputs.params.agent_image) - tag: $(inputs.params.agent_version)-context-arm64 + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-context-$(inputs.params.architecture) - - name: agent-ubi-release + - name: mongodb-agent-release task_type: docker_build tags: [ "ubi", "release" ] dockerfile: scripts/dev/templates/agent/Dockerfile.ubi-$(inputs.params.version_id) buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.agent_image):$(inputs.params.agent_version)-context-arm64 - agent_version: $(inputs.params.agent_version) + imagebase: $(inputs.params.registry)/$(inputs.params.image):$(inputs.params.release_version)-context-$(inputs.params.architecture) + agent_version: $(inputs.params.release_version) labels: quay.expires-after: Never output: - - registry: $(inputs.params.registry)/$(inputs.params.agent_image) - tag: $(inputs.params.agent_version)-arm64 + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-$(inputs.params.architecture) - - name: readiness-probe-init-amd64 + - name: readiness-probe vars: context: . template_context: scripts/dev/templates/readiness inputs: - - readiness_probe_image - - readiness_probe_image_dev + - image + - image_dev - platform: linux/amd64 + platform: linux/$(inputs.params.architecture) stages: - name: readiness-init-context-build task_type: docker_build @@ -330,8 +128,8 @@ images: builder_image: $(inputs.params.builder_image) output: - - registry: $(inputs.params.registry)/$(inputs.params.readiness_probe_image_dev) - tag: $(inputs.params.version_id)-context-amd64 + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-context-$(inputs.params.architecture) - name: readiness-template-ubi task_type: dockerfile_template @@ -350,17 +148,17 @@ images: dockerfile: scripts/dev/templates/readiness/Dockerfile.readiness-$(inputs.params.version_id) buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.readiness_probe_image_dev):$(inputs.params.version_id)-context-amd64 + imagebase: $(inputs.params.registry)/$(inputs.params.image_dev):$(inputs.params.version_id)-context-$(inputs.params.architecture) labels: quay.expires-after: 48h output: - - registry: $(inputs.params.registry)/$(inputs.params.readiness_probe_image_dev) - tag: $(inputs.params.version_id)-amd64 - - registry: $(inputs.params.registry)/$(inputs.params.readiness_probe_image_dev) - tag: latest-amd64 + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-$(inputs.params.architecture) + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: latest-$(inputs.params.architecture) - name: readiness-init-context-release task_type: docker_build @@ -378,8 +176,8 @@ images: - builder_image output: - - registry: $(inputs.params.registry)/$(inputs.params.readiness_probe_image) - tag: $(inputs.params.release_version)-context-amd64 + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-context-$(inputs.params.architecture) - name: readiness-template-release task_type: dockerfile_template @@ -399,215 +197,7 @@ images: tags: [ "readiness-probe", "release" , "ubi" ] buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.readiness_probe_image):$(inputs.params.release_version)-context-amd64 - - labels: - quay.expires-after: Never - - inputs: - - base_image - - release_version - - output: - - registry: $(inputs.params.registry)/$(inputs.params.readiness_probe_image) - tag: $(inputs.params.release_version)-amd64 - - - name: readiness-probe-init-arm64 - vars: - context: . - template_context: scripts/dev/templates/readiness - - inputs: - - readiness_probe_image - - readiness_probe_image_dev - - platform: linux/arm64 - stages: - - name: readiness-init-context-build - task_type: docker_build - dockerfile: scripts/dev/templates/readiness/Dockerfile.builder - tags: [ "readiness-probe", "ubi" ] - labels: - quay.expires-after: 48h - - buildargs: - builder_image: $(inputs.params.builder_image) - - output: - - registry: $(inputs.params.registry)/$(inputs.params.readiness_probe_image_dev) - tag: $(inputs.params.version_id)-context-arm64 - - - name: readiness-template-ubi - task_type: dockerfile_template - tags: [ "ubi" ] - template_file_extension: readiness - - inputs: - - base_image - - output: - - dockerfile: scripts/dev/templates/readiness/Dockerfile.readiness-$(inputs.params.version_id) - - - name: readiness-init-build - task_type: docker_build - tags: [ "readiness-probe", "ubi" ] - dockerfile: scripts/dev/templates/readiness/Dockerfile.readiness-$(inputs.params.version_id) - - buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.readiness_probe_image_dev):$(inputs.params.version_id)-context-arm64 - - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/$(inputs.params.readiness_probe_image_dev) - tag: $(inputs.params.version_id)-arm64 - - registry: $(inputs.params.registry)/$(inputs.params.readiness_probe_image_dev) - tag: latest-arm64 - - - name: readiness-init-context-release - task_type: docker_build - dockerfile: scripts/dev/templates/readiness/Dockerfile.builder - tags: [ "readiness-probe", "release" , "ubi" ] - - labels: - quay.expires-after: Never - - buildargs: - builder_image: $(inputs.params.builder_image) - - inputs: - - release_version - - builder_image - - output: - - registry: $(inputs.params.registry)/$(inputs.params.readiness_probe_image) - tag: $(inputs.params.release_version)-context-arm64 - - - name: readiness-template-release - task_type: dockerfile_template - tags: [ "readiness-probe", "release", "ubi" ] - template_file_extension: readiness - inputs: - - base_image - - release_version - - output: - - dockerfile: scripts/dev/templates/readiness/Dockerfile.readiness-$(inputs.params.release_version) - - dockerfile: $(inputs.params.s3_bucket)/mongodb-kubernetes-readinessprobe/$(inputs.params.release_version)/ubi/Dockerfile - - - name: readiness-init-build-release - task_type: docker_build - dockerfile: scripts/dev/templates/readiness/Dockerfile.readiness-$(inputs.params.release_version) - tags: [ "readiness-probe", "release" , "ubi" ] - - buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.readiness_probe_image):$(inputs.params.release_version)-context-arm64 - - labels: - quay.expires-after: Never - - inputs: - - base_image - - release_version - - output: - - registry: $(inputs.params.registry)/$(inputs.params.readiness_probe_image) - tag: $(inputs.params.release_version)-arm64 - - - name: version-post-start-hook-init-amd64 - vars: - context: . - template_context: scripts/dev/templates/versionhook - - inputs: - - version_post_start_hook_image - - version_post_start_hook_image_dev - - platform: linux/amd64 - stages: - - name: version-post-start-hook-init-context-build - task_type: docker_build - dockerfile: scripts/dev/templates/versionhook/Dockerfile.builder - tags: [ "post-start-hook", "ubi" ] - - buildargs: - builder_image: $(inputs.params.builder_image) - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image_dev) - tag: $(inputs.params.version_id)-context-amd64 - - - name: version-post-start-hook-template-ubi - task_type: dockerfile_template - tags: [ "ubi" ] - template_file_extension: versionhook - - inputs: - - base_image - - output: - - dockerfile: scripts/dev/templates/versionhook/Dockerfile.versionhook-$(inputs.params.version_id) - - - name: version-post-start-hook-init-build - task_type: docker_build - dockerfile: scripts/dev/templates/versionhook/Dockerfile.versionhook-$(inputs.params.version_id) - tags: [ "post-start-hook", "ubi" ] - - buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image_dev):$(inputs.params.version_id)-context-amd64 - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image_dev) - tag: $(inputs.params.version_id)-amd64 - - registry: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image_dev) - tag: latest-amd64 - - - name: version-post-start-hook-init-context-release - task_type: docker_build - dockerfile: scripts/dev/templates/versionhook/Dockerfile.builder - tags: [ "release", "post-start-hook", "ubi", ] - - labels: - quay.expires-after: Never - - buildargs: - builder_image: $(inputs.params.builder_image) - - inputs: - - release_version - - builder_image - - output: - - registry: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image) - tag: $(inputs.params.release_version)-context-amd64 - - - name: versionhook-template-release - task_type: dockerfile_template - tags: [ "post-start-hook", "release", "ubi" ] - template_file_extension: versionhook - inputs: - - base_image - - release_version - - output: - - dockerfile: scripts/dev/templates/versionhook/Dockerfile.versionhook-$(inputs.params.release_version) - - dockerfile: $(inputs.params.s3_bucket)/mongodb-kubernetes-operator-version-upgrade-post-start-hook/$(inputs.params.release_version)/ubi/Dockerfile - - - name: version-post-start-hook-init-build-release - task_type: docker_build - dockerfile: scripts/dev/templates/versionhook/Dockerfile.versionhook-$(inputs.params.release_version) - tags: [ "release", "post-start-hook", "ubi" ] - - buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image):$(inputs.params.release_version)-context-amd64 + imagebase: $(inputs.params.registry)/$(inputs.params.image):$(inputs.params.release_version)-context-$(inputs.params.architecture) labels: quay.expires-after: Never @@ -617,21 +207,21 @@ images: - release_version output: - - registry: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image) - tag: $(inputs.params.release_version)-amd64 + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-$(inputs.params.architecture) - - name: version-post-start-hook-init-arm64 + - name: version-upgrade-hook vars: context: . template_context: scripts/dev/templates/versionhook inputs: - - version_post_start_hook_image - - version_post_start_hook_image_dev + - image + - image_dev - platform: linux/arm64 + platform: linux/$(inputs.params.architecture) stages: - - name: version-post-start-hook-init-context-build + - name: version-upgrade-hook-context-build task_type: docker_build dockerfile: scripts/dev/templates/versionhook/Dockerfile.builder tags: [ "post-start-hook", "ubi" ] @@ -643,8 +233,8 @@ images: quay.expires-after: 48h output: - - registry: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image_dev) - tag: $(inputs.params.version_id)-context-arm64 + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-context-$(inputs.params.architecture) - name: version-post-start-hook-template-ubi task_type: dockerfile_template @@ -657,24 +247,24 @@ images: output: - dockerfile: scripts/dev/templates/versionhook/Dockerfile.versionhook-$(inputs.params.version_id) - - name: version-post-start-hook-init-build + - name: version-upgrade-hook-build task_type: docker_build dockerfile: scripts/dev/templates/versionhook/Dockerfile.versionhook-$(inputs.params.version_id) tags: [ "post-start-hook", "ubi" ] buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image_dev):$(inputs.params.version_id)-context-arm64 + imagebase: $(inputs.params.registry)/$(inputs.params.image_dev):$(inputs.params.version_id)-context-$(inputs.params.architecture) labels: quay.expires-after: 48h output: - - registry: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image_dev) - tag: $(inputs.params.version_id)-arm64 - - registry: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image_dev) - tag: latest-arm64 + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-$(inputs.params.architecture) + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: latest-$(inputs.params.architecture) - - name: version-post-start-hook-init-context-release + - name: version-upgrade-hook-context-release task_type: docker_build dockerfile: scripts/dev/templates/versionhook/Dockerfile.builder tags: [ "release", "post-start-hook", "ubi", ] @@ -690,8 +280,8 @@ images: - builder_image output: - - registry: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image) - tag: $(inputs.params.release_version)-context-arm64 + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-context-$(inputs.params.architecture) - name: versionhook-template-release task_type: dockerfile_template @@ -705,13 +295,13 @@ images: - dockerfile: scripts/dev/templates/versionhook/Dockerfile.versionhook-$(inputs.params.release_version) - dockerfile: $(inputs.params.s3_bucket)/mongodb-kubernetes-operator-version-upgrade-post-start-hook/$(inputs.params.release_version)/ubi/Dockerfile - - name: version-post-start-hook-init-build-release + - name: version-upgrade-hook-build-release task_type: docker_build dockerfile: scripts/dev/templates/versionhook/Dockerfile.versionhook-$(inputs.params.release_version) tags: [ "release", "post-start-hook", "ubi" ] buildargs: - imagebase: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image):$(inputs.params.release_version)-context-arm64 + imagebase: $(inputs.params.registry)/$(inputs.params.image):$(inputs.params.release_version)-context-$(inputs.params.architecture) labels: quay.expires-after: Never @@ -721,5 +311,5 @@ images: - release_version output: - - registry: $(inputs.params.registry)/$(inputs.params.version_post_start_hook_image) - tag: $(inputs.params.release_version)-arm64 \ No newline at end of file + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-$(inputs.params.architecture) \ No newline at end of file diff --git a/licenses.csv b/licenses.csv new file mode 100644 index 000000000..931e7a9e4 --- /dev/null +++ b/licenses.csv @@ -0,0 +1,201 @@ +github.com/beorn7/perks/quantile,https://github.com/beorn7/perks/blob/v1.0.1/LICENSE,MIT +github.com/blang/semver,https://github.com/blang/semver/blob/v3.5.1/LICENSE,MIT +github.com/cespare/xxhash/v2,https://github.com/cespare/xxhash/blob/v2.1.2/LICENSE.txt,MIT +github.com/davecgh/go-spew/spew,https://github.com/davecgh/go-spew/blob/v1.1.1/LICENSE,ISC +github.com/emicklei/go-restful/v3,https://github.com/emicklei/go-restful/blob/v3.9.0/LICENSE,MIT +github.com/evanphx/json-patch,https://github.com/evanphx/json-patch/blob/v4.12.0/LICENSE,BSD-3-Clause +github.com/evanphx/json-patch/v5,https://github.com/evanphx/json-patch/blob/v5.6.0/v5/LICENSE,BSD-3-Clause +github.com/fsnotify/fsnotify,https://github.com/fsnotify/fsnotify/blob/v1.6.0/LICENSE,BSD-3-Clause +github.com/go-logr/logr,https://github.com/go-logr/logr/blob/v1.4.1/LICENSE,Apache-2.0 +github.com/go-openapi/jsonpointer,https://github.com/go-openapi/jsonpointer/blob/v0.19.5/LICENSE,Apache-2.0 +github.com/go-openapi/jsonreference,https://github.com/go-openapi/jsonreference/blob/v0.20.0/LICENSE,Apache-2.0 +github.com/go-openapi/swag,https://github.com/go-openapi/swag/blob/v0.19.14/LICENSE,Apache-2.0 +github.com/gogo/protobuf,https://github.com/gogo/protobuf/blob/v1.3.2/LICENSE,BSD-3-Clause +github.com/golang/groupcache/lru,https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE,Apache-2.0 +github.com/golang/protobuf,https://github.com/golang/protobuf/blob/v1.5.2/LICENSE,BSD-3-Clause +github.com/golang/snappy,https://github.com/golang/snappy/blob/v0.0.3/LICENSE,BSD-3-Clause +github.com/google/gnostic,https://github.com/google/gnostic/blob/v0.5.7-v3refs/LICENSE,Apache-2.0 +github.com/google/go-cmp/cmp,https://github.com/google/go-cmp/blob/v0.5.9/LICENSE,BSD-3-Clause +github.com/google/gofuzz,https://github.com/google/gofuzz/blob/v1.1.0/LICENSE,Apache-2.0 +github.com/google/uuid,https://github.com/google/uuid/blob/v1.3.0/LICENSE,BSD-3-Clause +github.com/hashicorp/errwrap,https://github.com/hashicorp/errwrap/blob/v1.0.0/LICENSE,MPL-2.0 +github.com/hashicorp/go-multierror,https://github.com/hashicorp/go-multierror/blob/v1.1.1/LICENSE,MPL-2.0 +github.com/imdario/mergo,https://github.com/imdario/mergo/blob/v0.3.15/LICENSE,BSD-3-Clause +github.com/josharian/intern,https://github.com/josharian/intern/blob/v1.0.0/license.md,MIT +github.com/json-iterator/go,https://github.com/json-iterator/go/blob/v1.1.12/LICENSE,MIT +github.com/klauspost/compress,https://github.com/klauspost/compress/blob/v1.13.6/LICENSE,Apache-2.0 +github.com/klauspost/compress/internal/snapref,https://github.com/klauspost/compress/blob/v1.13.6/internal/snapref/LICENSE,BSD-3-Clause +github.com/klauspost/compress/zstd/internal/xxhash,https://github.com/klauspost/compress/blob/v1.13.6/zstd/internal/xxhash/LICENSE.txt,MIT +github.com/mailru/easyjson,https://github.com/mailru/easyjson/blob/v0.7.6/LICENSE,MIT +github.com/matttproud/golang_protobuf_extensions/pbutil,https://github.com/matttproud/golang_protobuf_extensions/blob/v1.0.2/LICENSE,Apache-2.0 +github.com/moby/spdystream,https://github.com/moby/spdystream/blob/v0.2.0/LICENSE,Apache-2.0 +github.com/modern-go/concurrent,https://github.com/modern-go/concurrent/blob/bacd9c7ef1dd/LICENSE,Apache-2.0 +github.com/modern-go/reflect2,https://github.com/modern-go/reflect2/blob/v1.0.2/LICENSE,Apache-2.0 +github.com/mongodb/mongodb-kubernetes-operator/api/v1,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/api/v1,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/cmd/manager,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/cmd/readiness,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/cmd/readiness,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/cmd/readiness/testdata,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/cmd/versionhook,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers/construct,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers/construct,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers/predicates,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers/validation,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers/watch,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers/watch,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/agent,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/agent,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/mocks,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scram,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scram,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scramcredentials,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scramcredentials,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/x509,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/x509,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/helm,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/configmap,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/configmap,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/lifecycle,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/persistentvolumeclaim,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/pod,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/podtemplatespec,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/podtemplatespec,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/probes,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/resourcerequirements,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/resourcerequirements,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/service,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/headless,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/headless,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/health,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/health,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/pod,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/pod,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/secret,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/apierrors,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/apierrors,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/functions,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/generate,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/merge,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/merge,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/state,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/state,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/versions,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/versions,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/feature_compatibility_version,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/prometheus,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_arbiter,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_authentication,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_change_version,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_connection_string_options,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_cross_namespace_deploy,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_custom_annotations_test_test,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_custom_persistent_volume,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_custom_role,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade_4_5,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade_5_6,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade_6_7,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_mongod_config,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_mongod_port_change_with_arbiters,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_mongod_readiness,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_mount_connection_string,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_multiple,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_operator_upgrade,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_recovery,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_scale,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_scale_down,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_tls,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_tls_recreate_mdbc,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_tls_rotate,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_tls_rotate_delete_sts,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_tls_upgrade,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_x509,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/statefulset_arbitrary_config,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/statefulset_arbitrary_config_update,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/statefulset_delete,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/tlstests,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/wait,Unknown,Unknown +github.com/montanaflynn/stats,https://github.com/montanaflynn/stats/blob/1bf9dbcd8cbe/LICENSE,MIT +github.com/munnerz/goautoneg,https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE,BSD-3-Clause +github.com/pkg/errors,https://github.com/pkg/errors/blob/v0.9.1/LICENSE,BSD-2-Clause +github.com/pmezard/go-difflib/difflib,https://github.com/pmezard/go-difflib/blob/v1.0.0/LICENSE,BSD-3-Clause +github.com/prometheus/client_golang/prometheus,https://github.com/prometheus/client_golang/blob/v1.14.0/LICENSE,Apache-2.0 +github.com/prometheus/client_model/go,https://github.com/prometheus/client_model/blob/v0.3.0/LICENSE,Apache-2.0 +github.com/prometheus/common,https://github.com/prometheus/common/blob/v0.37.0/LICENSE,Apache-2.0 +github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg,https://github.com/prometheus/common/blob/v0.37.0/internal/bitbucket.org/ww/goautoneg/README.txt,BSD-3-Clause +github.com/prometheus/procfs,https://github.com/prometheus/procfs/blob/v0.8.0/LICENSE,Apache-2.0 +github.com/spf13/cast,https://github.com/spf13/cast/blob/v1.6.0/LICENSE,MIT +github.com/spf13/pflag,https://github.com/spf13/pflag/blob/v1.0.5/LICENSE,BSD-3-Clause +github.com/stretchr/objx,https://github.com/stretchr/objx/blob/v0.5.1/LICENSE,MIT +github.com/stretchr/testify,https://github.com/stretchr/testify/blob/v1.8.4/LICENSE,MIT +github.com/xdg-go/pbkdf2,https://github.com/xdg-go/pbkdf2/blob/v1.0.0/LICENSE,Apache-2.0 +github.com/xdg-go/scram,https://github.com/xdg-go/scram/blob/v1.1.2/LICENSE,Apache-2.0 +github.com/xdg-go/stringprep,https://github.com/xdg-go/stringprep/blob/v1.0.4/LICENSE,Apache-2.0 +github.com/xdg/stringprep,https://github.com/xdg/stringprep/blob/v1.0.3/LICENSE,Apache-2.0 +github.com/youmark/pkcs8,https://github.com/youmark/pkcs8/blob/1be2e3e5546d/LICENSE,MIT +go.mongodb.org/mongo-driver,https://github.com/mongodb/mongo-go-driver/blob/v1.13.1/LICENSE,Apache-2.0 +go.uber.org/multierr,https://github.com/uber-go/multierr/blob/v1.10.0/LICENSE.txt,MIT +go.uber.org/zap,https://github.com/uber-go/zap/blob/v1.26.0/LICENSE.txt,MIT +golang.org/x/crypto,https://cs.opensource.google/go/x/crypto/+/v0.17.0:LICENSE,BSD-3-Clause +golang.org/x/net,https://cs.opensource.google/go/x/net/+/v0.17.0:LICENSE,BSD-3-Clause +golang.org/x/oauth2,https://cs.opensource.google/go/x/oauth2/+/ee480838:LICENSE,BSD-3-Clause +golang.org/x/sync,https://cs.opensource.google/go/x/sync/+/v0.1.0:LICENSE,BSD-3-Clause +golang.org/x/sys/unix,https://cs.opensource.google/go/x/sys/+/v0.15.0:LICENSE,BSD-3-Clause +golang.org/x/term,https://cs.opensource.google/go/x/term/+/v0.15.0:LICENSE,BSD-3-Clause +golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.14.0:LICENSE,BSD-3-Clause +golang.org/x/time/rate,https://cs.opensource.google/go/x/time/+/v0.3.0:LICENSE,BSD-3-Clause +gomodules.xyz/jsonpatch/v2,https://github.com/gomodules/jsonpatch/blob/v2.2.0/v2/LICENSE,Apache-2.0 +google.golang.org/protobuf,https://github.com/protocolbuffers/protobuf-go/blob/v1.28.1/LICENSE,BSD-3-Clause +gopkg.in/inf.v0,https://github.com/go-inf/inf/blob/v0.9.1/LICENSE,BSD-3-Clause +gopkg.in/natefinch/lumberjack.v2,https://github.com/natefinch/lumberjack/blob/v2.2.1/LICENSE,MIT +gopkg.in/yaml.v2,https://github.com/go-yaml/yaml/blob/v2.4.0/LICENSE,Apache-2.0 +gopkg.in/yaml.v3,https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE,MIT +k8s.io/api,https://github.com/kubernetes/api/blob/v0.26.10/LICENSE,Apache-2.0 +k8s.io/apiextensions-apiserver/pkg,https://github.com/kubernetes/apiextensions-apiserver/blob/v0.26.10/LICENSE,Apache-2.0 +k8s.io/apimachinery/pkg,https://github.com/kubernetes/apimachinery/blob/v0.26.10/LICENSE,Apache-2.0 +k8s.io/apimachinery/third_party/forked/golang,https://github.com/kubernetes/apimachinery/blob/v0.26.10/third_party/forked/golang/LICENSE,BSD-3-Clause +k8s.io/client-go,https://github.com/kubernetes/client-go/blob/v0.26.10/LICENSE,Apache-2.0 +k8s.io/component-base/config,https://github.com/kubernetes/component-base/blob/v0.26.10/LICENSE,Apache-2.0 +k8s.io/klog/v2,https://github.com/kubernetes/klog/blob/v2.80.1/LICENSE,Apache-2.0 +k8s.io/kube-openapi/pkg,https://github.com/kubernetes/kube-openapi/blob/172d655c2280/LICENSE,Apache-2.0 +k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json,https://github.com/kubernetes/kube-openapi/blob/172d655c2280/pkg/internal/third_party/go-json-experiment/json/LICENSE,BSD-3-Clause +k8s.io/kube-openapi/pkg/validation/spec,https://github.com/kubernetes/kube-openapi/blob/172d655c2280/pkg/validation/spec/LICENSE,Apache-2.0 +k8s.io/utils,https://github.com/kubernetes/utils/blob/99ec85e7a448/LICENSE,Apache-2.0 +k8s.io/utils/internal/third_party/forked/golang/net,https://github.com/kubernetes/utils/blob/99ec85e7a448/internal/third_party/forked/golang/LICENSE,BSD-3-Clause +sigs.k8s.io/controller-runtime,https://github.com/kubernetes-sigs/controller-runtime/blob/v0.14.7/LICENSE,Apache-2.0 +sigs.k8s.io/json,https://github.com/kubernetes-sigs/json/blob/f223a00ba0e2/LICENSE,Apache-2.0 +sigs.k8s.io/structured-merge-diff/v4,https://github.com/kubernetes-sigs/structured-merge-diff/blob/v4.2.3/LICENSE,Apache-2.0 +sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE,Apache-2.0 +sigs.k8s.io/yaml/goyaml.v2,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE,Apache-2.0 diff --git a/pipeline.py b/pipeline.py index 6350a757b..f288d1a5b 100644 --- a/pipeline.py +++ b/pipeline.py @@ -1,273 +1,140 @@ import argparse import json -import sys import subprocess -from typing import Dict, Optional +import sys +from typing import Dict, List, Set +from scripts.ci.base_logger import logger +from scripts.ci.images_signing import ( + sign_image, + verify_signature, + mongodb_artifactory_login, +) +from scripts.dev.dev_config import load_config, DevConfig from sonar.sonar import process_image -from scripts.dev.dev_config import load_config, DevConfig +# These image names must correspond to prefixes in release.json, developer configuration and inventories +VALID_IMAGE_NAMES = { + "agent", + "readiness-probe", + "version-upgrade-hook", + "operator", + "e2e", +} -VALID_IMAGE_NAMES = frozenset( - [ - "agent-ubi", - "agent-ubuntu", - "readiness-probe-init", - "version-post-start-hook-init", - "operator-ubi", - "e2e", - ] -) +AGENT_DISTRO_KEY = "agent_distro" +TOOLS_DISTRO_KEY = "tools_distro" -DEFAULT_IMAGE_TYPE = "ubi" -DEFAULT_NAMESPACE = "default" +AGENT_DISTROS_PER_ARCH = { + "amd64": {AGENT_DISTRO_KEY: "rhel8_x86_64", TOOLS_DISTRO_KEY: "rhel88-x86_64"}, + "arm64": {AGENT_DISTRO_KEY: "amzn2_aarch64", TOOLS_DISTRO_KEY: "rhel88-aarch64"}, +} -def _load_release() -> Dict: +def load_release() -> Dict: with open("release.json") as f: - release = json.loads(f.read()) - return release - - -def _build_agent_args(config: DevConfig) -> Dict[str, str]: - release = _load_release() - return { - "agent_version": release["mongodb-agent"]["version"], - "release_version": release["mongodb-agent"]["version"], - "tools_version": release["mongodb-agent"]["tools_version"], - "agent_image": config.agent_image, - "agent_image_dev": config.agent_dev_image, - "registry": config.repo_url, - "s3_bucket": config.s3_bucket, - } - - -def build_agent_image_ubi(config: DevConfig) -> None: - args = _build_agent_args(config) - args["agent_image"] = config.agent_image_ubi - args["agent_image_dev"] = config.agent_dev_image_ubi - config.ensure_tag_is_run("ubi") - - sonar_build_image( - "agent-ubi-amd64", - config, - args=args, - ) - sonar_build_image( - "agent-ubi-arm64", - config, - args=args, - ) - - create_and_push_manifest(config, config.agent_dev_image_ubi) - - if config.gh_run_id is not None and config.gh_run_id != "": - create_and_push_manifest(config, config.agent_dev_image_ubi, config.gh_run_id) - - if "release" in config.include_tags: - create_and_push_manifest(config, config.agent_image_ubi, args["agent_version"]) - create_and_push_manifest( - config, config.agent_image_ubi, args["agent_version"] + "-context" - ) + return json.load(f) -def build_agent_image_ubuntu(config: DevConfig) -> None: - image_name = "agent-ubuntu" - args = _build_agent_args(config) - args["agent_image"] = config.agent_image_ubuntu - args["agent_image_dev"] = config.agent_dev_image_ubuntu - config.ensure_tag_is_run("ubuntu") +def build_image_args(config: DevConfig, image_name: str) -> Dict[str, str]: + release = load_release() - sonar_build_image( - image_name, - config, - args=args, - ) - - -def build_readiness_probe_image(config: DevConfig) -> None: - release = _load_release() - config.ensure_tag_is_run("readiness-probe") - config.ensure_tag_is_run("ubi") - - sonar_build_image( - "readiness-probe-init-amd64", - config, - args={ - "builder": "true", - "base_image": "registry.access.redhat.com/ubi8/ubi-minimal:latest", - "registry": config.repo_url, - "release_version": release["readiness-probe"], - "readiness_probe_image": config.readiness_probe_image, - "readiness_probe_image_dev": config.readiness_probe_image_dev, - "builder_image": release["golang-builder-image"], - "s3_bucket": config.s3_bucket, - }, - ) - - sonar_build_image( - "readiness-probe-init-arm64", - config, - args={ - "builder": "true", - "base_image": "registry.access.redhat.com/ubi8/ubi-minimal:latest", - "registry": config.repo_url, - "release_version": release["readiness-probe"], - "readiness_probe_image": config.readiness_probe_image, - "readiness_probe_image_dev": config.readiness_probe_image_dev, - "builder_image": release["golang-builder-image"], - "s3_bucket": config.s3_bucket, - }, - ) - - create_and_push_manifest(config, config.readiness_probe_image_dev) + # Naming in pipeline : readiness-probe, naming in dev config : readiness_probe_image + image_name_prefix = image_name.replace("-", "_") - if config.gh_run_id is not None and config.gh_run_id != "": - create_and_push_manifest( - config, config.readiness_probe_image_dev, config.gh_run_id - ) - - if "release" in config.include_tags: - create_and_push_manifest( - config, config.readiness_probe_image, release["readiness-probe"] - ) - create_and_push_manifest( - config, - config.readiness_probe_image, - release["readiness-probe"] + "-context", - ) - - -def build_version_post_start_hook_image(config: DevConfig) -> None: - release = _load_release() - config.ensure_tag_is_run("post-start-hook") - config.ensure_tag_is_run("ubi") - - sonar_build_image( - "version-post-start-hook-init-amd64", - config, - args={ - "builder": "true", - "base_image": "registry.access.redhat.com/ubi8/ubi-minimal:latest", - "registry": config.repo_url, - "release_version": release["version-upgrade-hook"], - "version_post_start_hook_image": config.version_upgrade_hook_image, - "version_post_start_hook_image_dev": config.version_upgrade_hook_image_dev, - "builder_image": release["golang-builder-image"], - "s3_bucket": config.s3_bucket, - }, - ) - - sonar_build_image( - "version-post-start-hook-init-arm64", - config, - args={ - "builder": "true", - "base_image": "registry.access.redhat.com/ubi8/ubi-minimal:latest", - "registry": config.repo_url, - "release_version": release["version-upgrade-hook"], - "version_post_start_hook_image": config.version_upgrade_hook_image, - "version_post_start_hook_image_dev": config.version_upgrade_hook_image_dev, - "builder_image": release["golang-builder-image"], - "s3_bucket": config.s3_bucket, - }, - ) + # Default config + arguments = { + "builder": "true", + # Defaults to "" if empty, e2e has no release version + "release_version": release.get(image_name, ""), + "tools_version": "", + "image": getattr(config, f"{image_name_prefix}_image"), + # Defaults to "" if empty, e2e has no dev image + "image_dev": getattr(config, f"{image_name_prefix}_image_dev", ""), + "registry": config.repo_url, + "s3_bucket": config.s3_bucket, + "builder_image": release["golang-builder-image"], + "base_image": "registry.access.redhat.com/ubi8/ubi-minimal:latest", + "inventory": "inventory.yaml", + "skip_tags": config.skip_tags, # Include skip_tags + "include_tags": config.include_tags, # Include include_tags + } - create_and_push_manifest(config, config.version_upgrade_hook_image_dev) + # Handle special cases + if image_name == "operator": + arguments["inventory"] = "inventories/operator-inventory.yaml" - if config.gh_run_id is not None and config.gh_run_id != "": - create_and_push_manifest( - config, config.version_upgrade_hook_image_dev, config.gh_run_id - ) + if image_name == "e2e": + arguments.pop("builder", None) + arguments["base_image"] = release["golang-builder-image"] + arguments["inventory"] = "inventories/e2e-inventory.yaml" - if "release" in config.include_tags: - create_and_push_manifest( - config, config.version_upgrade_hook_image, release["version-upgrade-hook"] - ) - create_and_push_manifest( - config, - config.version_upgrade_hook_image, - release["version-upgrade-hook"] + "-context", - ) + if image_name == "agent": + arguments["tools_version"] = release["agent-tools-version"] + return arguments -def build_operator_ubi_image(config: DevConfig) -> None: - release = _load_release() - config.ensure_tag_is_run("ubi") - sonar_build_image( - "operator-ubi-amd64", - config, - args={ - "registry": config.repo_url, - "builder": "true", - "builder_image": release["golang-builder-image"], - "base_image": "registry.access.redhat.com/ubi8/ubi-minimal:latest", - "operator_image": config.operator_image, - "operator_image_dev": config.operator_image_dev, - "release_version": release["mongodb-kubernetes-operator"], - "s3_bucket": config.s3_bucket, - }, - inventory="inventories/operator-inventory.yaml", - ) - sonar_build_image( - "operator-ubi-arm64", - config, - args={ - "registry": config.repo_url, - "builder": "true", - "builder_image": release["golang-builder-image"], - "base_image": "registry.access.redhat.com/ubi8/ubi-minimal:latest", - "operator_image": config.operator_image, - "operator_image_dev": config.operator_image_dev, - "release_version": release["mongodb-kubernetes-operator"], - "s3_bucket": config.s3_bucket, - }, - inventory="inventories/operator-inventory.yaml", - ) - create_and_push_manifest(config, config.operator_image_dev) +def sign_and_verify(registry: str, tag: str) -> None: + sign_image(registry, tag) + verify_signature(registry, tag) - if config.gh_run_id is not None and config.gh_run_id != "": - create_and_push_manifest(config, config.operator_image_dev, config.gh_run_id) - if "release" in config.include_tags: - create_and_push_manifest( - config, config.operator_image, release["mongodb-kubernetes-operator"] +def build_and_push_image( + image_name: str, + config: DevConfig, + args: Dict[str, str], + architectures: Set[str], + release: bool, + sign: bool, + insecure: bool = False, +) -> None: + if sign: + mongodb_artifactory_login() + for arch in architectures: + image_tag = f"{image_name}" + args["architecture"] = arch + if image_name == "agent": + args[AGENT_DISTRO_KEY] = AGENT_DISTROS_PER_ARCH[arch][AGENT_DISTRO_KEY] + args[TOOLS_DISTRO_KEY] = AGENT_DISTROS_PER_ARCH[arch][TOOLS_DISTRO_KEY] + process_image( + image_tag, + build_args=args, + inventory=args["inventory"], + skip_tags=args["skip_tags"], + include_tags=args["include_tags"], ) - create_and_push_manifest( - config, - config.operator_image, - release["mongodb-kubernetes-operator"] + "-context", + if release: + registry = args["registry"] + "/" + args["image"] + context_tag = args["release_version"] + "-context-" + arch + release_tag = args["release_version"] + "-" + arch + if sign: + sign_and_verify(registry, context_tag) + sign_and_verify(registry, release_tag) + + if args["image_dev"]: + image_to_push = args["image_dev"] + elif image_name == "e2e": + # If no image dev (only e2e is concerned) we push the normal image + image_to_push = args["image"] + else: + raise Exception("Dev image must be specified") + + push_manifest(config, architectures, image_to_push, insecure) + + if config.gh_run_id: + push_manifest(config, architectures, image_to_push, insecure, config.gh_run_id) + + if release: + registry = args["registry"] + "/" + args["image"] + context_tag = args["release_version"] + "-context" + push_manifest( + config, architectures, args["image"], insecure, args["release_version"] ) - - -def build_e2e_image(config: DevConfig) -> None: - release = _load_release() - sonar_build_image( - "e2e-arm64", - config, - args={ - "registry": config.repo_url, - "base_image": release["golang-builder-image"], - "e2e_image": config.e2e_image, - }, - inventory="inventories/e2e-inventory.yaml", - ) - sonar_build_image( - "e2e-amd64", - config, - args={ - "registry": config.repo_url, - "base_image": release["golang-builder-image"], - "e2e_image": config.e2e_image, - }, - inventory="inventories/e2e-inventory.yaml", - ) - - create_and_push_manifest(config, config.e2e_image) - - if config.gh_run_id is not None and config.gh_run_id != "": - create_and_push_manifest(config, config.e2e_image, config.gh_run_id) + push_manifest(config, architectures, args["image"], insecure, context_tag) + if sign: + sign_and_verify(registry, args["release_version"]) + sign_and_verify(registry, context_tag) """ @@ -281,97 +148,145 @@ def build_e2e_image(config: DevConfig) -> None: """ -def create_and_push_manifest( - config: DevConfig, image: str, tag: str = "latest" +def push_manifest( + config: DevConfig, + architectures: Set[str], + image_name: str, + insecure: bool = False, + image_tag: str = "latest", ) -> None: - final_manifest = "{0}/{1}:{2}".format(config.repo_url, image, tag) - args = ["docker", "manifest", "rm", final_manifest] - args_str = " ".join(args) - print(f"removing existing manifest: {args_str}") - subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + logger.info(f"Pushing manifest for {image_tag}") + final_manifest = "{0}/{1}:{2}".format(config.repo_url, image_name, image_tag) + remove_args = ["docker", "manifest", "rm", final_manifest] + logger.info("Removing existing manifest") + run_cli_command(remove_args, fail_on_error=False) - args = [ + create_args = [ "docker", "manifest", "create", final_manifest, - "--amend", - final_manifest + "-amd64", - "--amend", - final_manifest + "-arm64", ] - args_str = " ".join(args) - print(f"creating new manifest: {args_str}") - cp = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if cp.returncode != 0: - raise Exception(cp.stderr) + if insecure: + create_args.append("--insecure") - args = ["docker", "manifest", "push", final_manifest] - args_str = " ".join(args) - print(f"pushing new manifest: {args_str}") - cp = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + for arch in architectures: + create_args.extend(["--amend", final_manifest + "-" + arch]) - if cp.returncode != 0: - raise Exception(cp.stderr) + logger.info("Creating new manifest") + run_cli_command(create_args) + push_args = ["docker", "manifest", "push", final_manifest] + logger.info("Pushing new manifest") + run_cli_command(push_args) -def sonar_build_image( - image_name: str, - config: DevConfig, - args: Optional[Dict[str, str]] = None, - inventory: str = "inventory.yaml", -) -> None: - """Calls sonar to build `image_name` with arguments defined in `args`.""" - process_image( - image_name, - build_args=args, - inventory=inventory, - include_tags=config.include_tags, - skip_tags=config.skip_tags, - ) + +# Raises exceptions by default +def run_cli_command(args: List[str], fail_on_error: bool = True) -> None: + command = " ".join(args) + logger.debug(f"Running: {command}") + try: + cp = subprocess.run( + command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + check=False, + ) + except Exception as e: + logger.error(f" Command raised the following exception: {e}") + if fail_on_error: + raise Exception + else: + logger.warning("Continuing...") + return + + if cp.returncode != 0: + error_msg = cp.stderr.decode().strip() + stdout = cp.stdout.decode().strip() + logger.error(f"Error running command") + logger.error(f"stdout:\n{stdout}") + logger.error(f"stderr:\n{error_msg}") + if fail_on_error: + raise Exception + else: + logger.warning("Continuing...") + return def _parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser() parser.add_argument("--image-name", type=str) - parser.add_argument("--release", type=lambda x: x.lower() == "true") + parser.add_argument("--release", action="store_true", default=False) + parser.add_argument( + "--arch", + choices=["amd64", "arm64"], + nargs="+", + help="for daily builds only, specify the list of architectures to build for images", + ) parser.add_argument("--tag", type=str) + parser.add_argument("--sign", action="store_true", default=False) + parser.add_argument("--insecure", action="store_true", default=False) return parser.parse_args() +""" +Takes arguments: +--image-name : The name of the image to build, must be one of VALID_IMAGE_NAMES +--release : We push the image to the registry only if this flag is set +--architecture : List of architectures to build for the image +--sign : Sign images with our private key if sign is set (only for release) + +Run with --help for more information +Example usage : `python pipeline.py --image-name agent --release --sign` + +Builds and push the docker image to the registry +Many parameters are defined in the dev configuration, default path is : ~/.community-operator-dev/config.json +""" + + def main() -> int: args = _parse_args() image_name = args.image_name if image_name not in VALID_IMAGE_NAMES: - print( - f"Image name [{image_name}] is not valid. Must be one of [{', '.join(VALID_IMAGE_NAMES)}]" + logger.error( + f"Invalid image name: {image_name}. Valid options are: {VALID_IMAGE_NAMES}" ) return 1 - config = load_config() + # Handle dev config + config: DevConfig = load_config() + config.gh_run_id = args.tag - # by default we do not want to run any release tasks. We must explicitly - # use the --release flag to run them. - config.ensure_skip_tag("release") + # Warn user if trying to release E2E tests + if args.release and image_name == "e2e": + logger.warning( + "Warning : releasing E2E test will fail because E2E image has no release version" + ) - config.gh_run_id = args.tag + # Skipping release tasks by default + if not args.release: + config.ensure_skip_tag("release") + if args.sign: + logger.warning("--sign flag has no effect without --release") - # specify --release to release the image - if args.release: - config.ensure_tag_is_run("release") + if args.arch: + arch_set = set(args.arch) + else: + # Default is multi-arch + arch_set = {"amd64", "arm64"} + logger.info(f"Building for architectures: {','.join(arch_set)}") - image_build_function = { - "agent-ubi": build_agent_image_ubi, - "agent-ubuntu": build_agent_image_ubuntu, - "readiness-probe-init": build_readiness_probe_image, - "version-post-start-hook-init": build_version_post_start_hook_image, - "operator-ubi": build_operator_ubi_image, - "e2e": build_e2e_image, - }[image_name] + if not args.sign: + logger.warning("--sign flag not provided, images won't be signed") - image_build_function(config) + image_args = build_image_args(config, image_name) + + build_and_push_image( + image_name, config, image_args, arch_set, args.release, args.sign, args.insecure + ) return 0 diff --git a/pkg/agent/agent_readiness.go b/pkg/agent/agent_readiness.go index 290697f3d..eefe3a49d 100644 --- a/pkg/agent/agent_readiness.go +++ b/pkg/agent/agent_readiness.go @@ -1,6 +1,7 @@ package agent import ( + "context" "fmt" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/pod" @@ -27,9 +28,9 @@ type PodState struct { // AllReachedGoalState returns whether the agents associated with a given StatefulSet have reached goal state. // it achieves this by reading the Pod annotations and checking to see if they have reached the expected config versions. -func AllReachedGoalState(sts appsv1.StatefulSet, podGetter pod.Getter, desiredMemberCount, targetConfigVersion int, log *zap.SugaredLogger) (bool, error) { +func AllReachedGoalState(ctx context.Context, sts appsv1.StatefulSet, podGetter pod.Getter, desiredMemberCount, targetConfigVersion int, log *zap.SugaredLogger) (bool, error) { // AllReachedGoalState does not use desiredArbitersCount for backwards compatibility - podStates, err := GetAllDesiredMembersAndArbitersPodState(types.NamespacedName{ + podStates, err := GetAllDesiredMembersAndArbitersPodState(ctx, types.NamespacedName{ Namespace: sts.Namespace, Name: sts.Name, }, podGetter, desiredMemberCount, 0, targetConfigVersion, log) @@ -63,7 +64,7 @@ func AllReachedGoalState(sts appsv1.StatefulSet, podGetter pod.Getter, desiredMe // GetAllDesiredMembersAndArbitersPodState returns states of all desired pods in a replica set. // Pod names to search for are calculated using desiredMemberCount and desiredArbitersCount. Each pod is then checked if it exists // or if it reached goal state vs targetConfigVersion. -func GetAllDesiredMembersAndArbitersPodState(namespacedName types.NamespacedName, podGetter pod.Getter, desiredMembersCount, desiredArbitersCount, targetConfigVersion int, log *zap.SugaredLogger) ([]PodState, error) { +func GetAllDesiredMembersAndArbitersPodState(ctx context.Context, namespacedName types.NamespacedName, podGetter pod.Getter, desiredMembersCount, desiredArbitersCount, targetConfigVersion int, log *zap.SugaredLogger) ([]PodState, error) { podStates := make([]PodState, desiredMembersCount+desiredArbitersCount) membersPodNames := statefulSetPodNames(namespacedName.Name, desiredMembersCount) @@ -78,7 +79,7 @@ func GetAllDesiredMembersAndArbitersPodState(namespacedName types.NamespacedName IsArbiter: i >= len(membersPodNames), } - p, err := podGetter.GetPod(podNamespacedName) + p, err := podGetter.GetPod(ctx, podNamespacedName) if err != nil { if apiErrors.IsNotFound(err) { // we can skip below iteration and check for our goal state since the pod is not available yet diff --git a/pkg/agent/agent_readiness_test.go b/pkg/agent/agent_readiness_test.go index 685413322..2f898ad9d 100644 --- a/pkg/agent/agent_readiness_test.go +++ b/pkg/agent/agent_readiness_test.go @@ -1,6 +1,7 @@ package agent import ( + "context" "os" "testing" @@ -22,17 +23,18 @@ func init() { } func TestAllReachedGoalState(t *testing.T) { + ctx := context.Background() sts, err := statefulset.NewBuilder().SetName("sts").SetNamespace("test-ns").Build() assert.NoError(t, err) t.Run("Returns true if all pods are not found", func(t *testing.T) { - ready, err := AllReachedGoalState(sts, mockPodGetter{}, 3, 3, zap.S()) + ready, err := AllReachedGoalState(ctx, sts, mockPodGetter{}, 3, 3, zap.S()) assert.NoError(t, err) assert.True(t, ready) }) t.Run("Returns true if all pods are ready", func(t *testing.T) { - ready, err := AllReachedGoalState(sts, mockPodGetter{pods: []corev1.Pod{ + ready, err := AllReachedGoalState(ctx, sts, mockPodGetter{pods: []corev1.Pod{ createPodWithAgentAnnotation("3"), createPodWithAgentAnnotation("3"), createPodWithAgentAnnotation("3"), @@ -42,7 +44,7 @@ func TestAllReachedGoalState(t *testing.T) { }) t.Run("Returns false if one pod is not ready", func(t *testing.T) { - ready, err := AllReachedGoalState(sts, mockPodGetter{pods: []corev1.Pod{ + ready, err := AllReachedGoalState(ctx, sts, mockPodGetter{pods: []corev1.Pod{ createPodWithAgentAnnotation("2"), createPodWithAgentAnnotation("3"), createPodWithAgentAnnotation("3"), @@ -52,7 +54,7 @@ func TestAllReachedGoalState(t *testing.T) { }) t.Run("Returns true when the pods are not found", func(t *testing.T) { - ready, err := AllReachedGoalState(sts, mockPodGetter{shouldReturnNotFoundError: true}, 3, 3, zap.S()) + ready, err := AllReachedGoalState(ctx, sts, mockPodGetter{shouldReturnNotFoundError: true}, 3, 3, zap.S()) assert.NoError(t, err) assert.True(t, ready) }) @@ -92,7 +94,7 @@ type mockPodGetter struct { shouldReturnNotFoundError bool } -func (m mockPodGetter) GetPod(client.ObjectKey) (corev1.Pod, error) { +func (m mockPodGetter) GetPod(context.Context, client.ObjectKey) (corev1.Pod, error) { if m.shouldReturnNotFoundError || m.currPodIndex >= len(m.pods) { return corev1.Pod{}, notFoundError() } diff --git a/pkg/agent/replica_set_port_manager.go b/pkg/agent/replica_set_port_manager.go index 579205b5a..e47e94181 100644 --- a/pkg/agent/replica_set_port_manager.go +++ b/pkg/agent/replica_set_port_manager.go @@ -128,7 +128,7 @@ func (r *ReplicaSetPortManager) calculateExpectedPorts() (processPortMap map[str for _, podState := range r.currentPodStates { if !podState.ReachedGoalState { r.log.Debugf("Port change required but not all pods reached goal state, abandoning port change") - return processPortMap, portChangeRequired, oldPort + return processPortMap, true, oldPort } } @@ -143,5 +143,5 @@ func (r *ReplicaSetPortManager) calculateExpectedPorts() (processPortMap map[str } } - return processPortMap, portChangeRequired, oldPort + return processPortMap, true, oldPort } diff --git a/pkg/authentication/authentication.go b/pkg/authentication/authentication.go index 00bcfd83c..a856bda66 100644 --- a/pkg/authentication/authentication.go +++ b/pkg/authentication/authentication.go @@ -1,7 +1,9 @@ package authentication import ( + "context" "fmt" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" "k8s.io/apimachinery/pkg/types" @@ -13,22 +15,55 @@ import ( "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" ) -func Enable(auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable, agentCertSecret types.NamespacedName) error { +func Enable(ctx context.Context, auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable, agentCertSecret types.NamespacedName) error { scramEnabled := false for _, authMode := range mdb.GetAuthOptions().AuthMechanisms { switch authMode { case constants.Sha1, constants.Sha256: if !scramEnabled { - if err := scram.Enable(auth, secretGetUpdateCreateDeleter, mdb); err != nil { + if err := scram.Enable(ctx, auth, secretGetUpdateCreateDeleter, mdb); err != nil { return fmt.Errorf("could not configure scram authentication: %s", err) } scramEnabled = true } case constants.X509: - if err := x509.Enable(auth, secretGetUpdateCreateDeleter, mdb, agentCertSecret); err != nil { + if err := x509.Enable(ctx, auth, secretGetUpdateCreateDeleter, mdb, agentCertSecret); err != nil { return fmt.Errorf("could not configure x509 authentication: %s", err) } } } return nil } + +func AddRemovedUsers(auth *automationconfig.Auth, mdb mdbv1.MongoDBCommunity, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) { + deletedUsers := getRemovedUsersFromSpec(mdb.Spec, lastAppliedSpec) + + auth.UsersDeleted = append(auth.UsersDeleted, deletedUsers...) +} + +func getRemovedUsersFromSpec(currentMDB mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec *mdbv1.MongoDBCommunitySpec) []automationconfig.DeletedUser { + type user struct { + db string + name string + } + m := map[user]bool{} + var deletedUsers []automationconfig.DeletedUser + + for _, mongoDBUser := range currentMDB.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] = true + } + + for _, mongoDBUser := range lastAppliedMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + _, ok := m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] + if !ok { + deletedUsers = append(deletedUsers, automationconfig.DeletedUser{User: mongoDBUser.Name, Dbs: []string{mongoDBUser.DB}}) + } + } + return deletedUsers +} diff --git a/pkg/authentication/authentication_test.go b/pkg/authentication/authentication_test.go index e97b7327d..edfef363d 100644 --- a/pkg/authentication/authentication_test.go +++ b/pkg/authentication/authentication_test.go @@ -1,6 +1,8 @@ package authentication import ( + "context" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -16,6 +18,7 @@ import ( ) func TestEnable(t *testing.T) { + ctx := context.Background() t.Run("SCRAM only", func(t *testing.T) { auth := automationconfig.Auth{} user := mocks.BuildScramMongoDBUser("my-user") @@ -27,7 +30,7 @@ func TestEnable(t *testing.T) { Build() secrets := mocks.NewMockedSecretGetUpdateCreateDeleter(passwordSecret) - err := Enable(&auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) assert.NoError(t, err) assert.Equal(t, false, auth.Disabled) @@ -49,7 +52,7 @@ func TestEnable(t *testing.T) { Build() secrets := mocks.NewMockedSecretGetUpdateCreateDeleter(passwordSecret) - err := Enable(&auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) assert.NoError(t, err) assert.Equal(t, false, auth.Disabled) @@ -67,7 +70,7 @@ func TestEnable(t *testing.T) { agentSecret := x509.CreateAgentCertificateSecret("tls.crt", false, mdb.AgentCertificateSecretNamespacedName()) secrets := mocks.NewMockedSecretGetUpdateCreateDeleter(agentSecret) - err := Enable(&auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) assert.NoError(t, err) assert.Equal(t, false, auth.Disabled) @@ -90,7 +93,7 @@ func TestEnable(t *testing.T) { Build() secrets := mocks.NewMockedSecretGetUpdateCreateDeleter(passwordSecret) - err := Enable(&auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) assert.NoError(t, err) assert.Equal(t, false, auth.Disabled) @@ -115,7 +118,7 @@ func TestEnable(t *testing.T) { agentSecret := x509.CreateAgentCertificateSecret("tls.crt", false, mdb.AgentCertificateSecretNamespacedName()) secrets := mocks.NewMockedSecretGetUpdateCreateDeleter(passwordSecret, agentSecret) - err := Enable(&auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) assert.NoError(t, err) assert.Equal(t, false, auth.Disabled) @@ -130,6 +133,99 @@ func TestEnable(t *testing.T) { } +func TestGetDeletedUsers(t *testing.T) { + lastAppliedSpec := mdbv1.MongoDBCommunitySpec{ + Members: 3, + Type: "ReplicaSet", + Version: "7.0.2", + Arbiters: 0, + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + }, + Users: []mdbv1.MongoDBUser{ + { + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + DB: "admin", + }, + }, + } + + t.Run("no change same resource", func(t *testing.T) { + actual := getRemovedUsersFromSpec(lastAppliedSpec, &lastAppliedSpec) + + var expected []automationconfig.DeletedUser + assert.Equal(t, expected, actual) + }) + + t.Run("new user", func(t *testing.T) { + current := mdbv1.MongoDBCommunitySpec{ + Members: 3, + Type: "ReplicaSet", + Version: "7.0.2", + Arbiters: 0, + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + }, + Users: []mdbv1.MongoDBUser{ + { + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + DB: "admin", + }, + { + Name: "newUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "new-password-secret-name", + }, + ConnectionStringSecretName: "new-connection-string-secret", + DB: "admin", + }, + }, + } + + var expected []automationconfig.DeletedUser + actual := getRemovedUsersFromSpec(current, &lastAppliedSpec) + + assert.Equal(t, expected, actual) + }) + + t.Run("removed one user", func(t *testing.T) { + current := mdbv1.MongoDBCommunitySpec{ + Members: 3, + Type: "ReplicaSet", + Version: "7.0.2", + Arbiters: 0, + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + }, + Users: []mdbv1.MongoDBUser{}, + } + + expected := []automationconfig.DeletedUser{ + { + User: "testUser", + Dbs: []string{"admin"}, + }, + } + actual := getRemovedUsersFromSpec(current, &lastAppliedSpec) + + assert.Equal(t, expected, actual) + }) +} + func buildConfigurable(name string, auth []string, agent string, users ...authtypes.User) mocks.MockConfigurable { return mocks.NewMockConfigurable( authtypes.Options{ diff --git a/pkg/authentication/authtypes/authtypes.go b/pkg/authentication/authtypes/authtypes.go index 02ca76d08..12d7b0cbb 100644 --- a/pkg/authentication/authtypes/authtypes.go +++ b/pkg/authentication/authtypes/authtypes.go @@ -70,6 +70,9 @@ type User struct { // Note: there will be one secret with connection strings per user created. ConnectionStringSecretName string + // ConnectionStringSecretNamespace is the namespace of the secret object created by the operator which exposes the connection strings for the user. + ConnectionStringSecretNamespace string `json:"connectionStringSecretNamespace,omitempty"` + // ConnectionStringOptions contains connection string options for this user // These options will be appended at the end of the connection string and // will override any existing options from the resources. diff --git a/pkg/authentication/mocks/mocks.go b/pkg/authentication/mocks/mocks.go index c89e9d8e4..627a105be 100644 --- a/pkg/authentication/mocks/mocks.go +++ b/pkg/authentication/mocks/mocks.go @@ -1,6 +1,7 @@ package mocks import ( + "context" "fmt" corev1 "k8s.io/api/core/v1" @@ -26,21 +27,21 @@ func NewMockedSecretGetUpdateCreateDeleter(secrets ...corev1.Secret) secret.GetU return mockSecretGetUpdateCreateDeleter } -func (c MockSecretGetUpdateCreateDeleter) DeleteSecret(objectKey client.ObjectKey) error { - delete(c.secrets, objectKey) +func (c MockSecretGetUpdateCreateDeleter) DeleteSecret(_ context.Context, key client.ObjectKey) error { + delete(c.secrets, key) return nil } -func (c MockSecretGetUpdateCreateDeleter) UpdateSecret(s corev1.Secret) error { +func (c MockSecretGetUpdateCreateDeleter) UpdateSecret(_ context.Context, s corev1.Secret) error { c.secrets[types.NamespacedName{Name: s.Name, Namespace: s.Namespace}] = s return nil } -func (c MockSecretGetUpdateCreateDeleter) CreateSecret(secret corev1.Secret) error { - return c.UpdateSecret(secret) +func (c MockSecretGetUpdateCreateDeleter) CreateSecret(ctx context.Context, secret corev1.Secret) error { + return c.UpdateSecret(ctx, secret) } -func (c MockSecretGetUpdateCreateDeleter) GetSecret(objectKey client.ObjectKey) (corev1.Secret, error) { +func (c MockSecretGetUpdateCreateDeleter) GetSecret(_ context.Context, objectKey client.ObjectKey) (corev1.Secret, error) { if s, ok := c.secrets[objectKey]; !ok { return corev1.Secret{}, &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}} } else { diff --git a/pkg/authentication/scram/scram.go b/pkg/authentication/scram/scram.go index 564596185..c21e185f1 100644 --- a/pkg/authentication/scram/scram.go +++ b/pkg/authentication/scram/scram.go @@ -1,6 +1,7 @@ package scram import ( + "context" "encoding/base64" "fmt" @@ -31,16 +32,16 @@ const ( // Enable will configure all of the required Kubernetes resources for SCRAM-SHA to be enabled. // The agent password and keyfile contents will be configured and stored in a secret. // the user credentials will be generated if not present, or existing credentials will be read. -func Enable(auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable) error { +func Enable(ctx context.Context, auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable) error { opts := mdb.GetAuthOptions() - desiredUsers, err := convertMongoDBResourceUsersToAutomationConfigUsers(secretGetUpdateCreateDeleter, mdb) + desiredUsers, err := convertMongoDBResourceUsersToAutomationConfigUsers(ctx, secretGetUpdateCreateDeleter, mdb) if err != nil { return fmt.Errorf("could not convert users to Automation Config users: %s", err) } if opts.AutoAuthMechanism == constants.Sha256 || opts.AutoAuthMechanism == constants.Sha1 { - if err := ensureAgent(auth, secretGetUpdateCreateDeleter, mdb); err != nil { + if err := ensureAgent(ctx, auth, secretGetUpdateCreateDeleter, mdb); err != nil { return err } } @@ -48,7 +49,7 @@ func Enable(auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.Get return enableClientAuthentication(auth, opts, desiredUsers) } -func ensureAgent(auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable) error { +func ensureAgent(ctx context.Context, auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable) error { generatedPassword, err := generate.RandomFixedLengthStringOfSize(20) if err != nil { return fmt.Errorf("could not generate password: %s", err) @@ -60,13 +61,13 @@ func ensureAgent(auth *automationconfig.Auth, secretGetUpdateCreateDeleter secre } // ensure that the agent password secret exists or read existing password. - agentPassword, err := secret.EnsureSecretWithKey(secretGetUpdateCreateDeleter, mdb.GetAgentPasswordSecretNamespacedName(), mdb.GetOwnerReferences(), constants.AgentPasswordKey, generatedPassword) + agentPassword, err := secret.EnsureSecretWithKey(ctx, secretGetUpdateCreateDeleter, mdb.GetAgentPasswordSecretNamespacedName(), mdb.GetOwnerReferences(), constants.AgentPasswordKey, generatedPassword) if err != nil { return err } // ensure that the agent keyfile secret exists or read existing keyfile. - agentKeyFile, err := secret.EnsureSecretWithKey(secretGetUpdateCreateDeleter, mdb.GetAgentKeyfileSecretNamespacedName(), mdb.GetOwnerReferences(), constants.AgentKeyfileKey, generatedContents) + agentKeyFile, err := secret.EnsureSecretWithKey(ctx, secretGetUpdateCreateDeleter, mdb.GetAgentKeyfileSecretNamespacedName(), mdb.GetOwnerReferences(), constants.AgentKeyfileKey, generatedContents) if err != nil { return err } @@ -76,14 +77,14 @@ func ensureAgent(auth *automationconfig.Auth, secretGetUpdateCreateDeleter secre // ensureScramCredentials will ensure that the ScramSha1 & ScramSha256 credentials exist and are stored in the credentials // secret corresponding to user of the given MongoDB deployment. -func ensureScramCredentials(getUpdateCreator secret.GetUpdateCreator, user authtypes.User, mdbNamespacedName types.NamespacedName, ownerRef []metav1.OwnerReference) (scramcredentials.ScramCreds, scramcredentials.ScramCreds, error) { +func ensureScramCredentials(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, user authtypes.User, mdbNamespacedName types.NamespacedName, ownerRef []metav1.OwnerReference) (scramcredentials.ScramCreds, scramcredentials.ScramCreds, error) { - password, err := secret.ReadKey(getUpdateCreator, user.PasswordSecretKey, types.NamespacedName{Name: user.PasswordSecretName, Namespace: mdbNamespacedName.Namespace}) + password, err := secret.ReadKey(ctx, getUpdateCreator, user.PasswordSecretKey, types.NamespacedName{Name: user.PasswordSecretName, Namespace: mdbNamespacedName.Namespace}) if err != nil { // if the password is deleted, that's fine we can read from the stored credentials that were previously generated if secret.SecretNotExist(err) { zap.S().Debugf("password secret was not found, reading from credentials from secret/%s", user.ScramCredentialsSecretName) - return readExistingCredentials(getUpdateCreator, mdbNamespacedName, user.ScramCredentialsSecretName) + return readExistingCredentials(ctx, getUpdateCreator, mdbNamespacedName, user.ScramCredentialsSecretName) } return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, fmt.Errorf("could not read secret key: %s", err) } @@ -91,7 +92,7 @@ func ensureScramCredentials(getUpdateCreator secret.GetUpdateCreator, user autht // we should only need to generate new credentials in two situations. // 1. We are creating the credentials for the first time // 2. We are changing the password - shouldGenerateNewCredentials, err := needToGenerateNewCredentials(getUpdateCreator, user.Username, user.ScramCredentialsSecretName, mdbNamespacedName, password) + shouldGenerateNewCredentials, err := needToGenerateNewCredentials(ctx, getUpdateCreator, user.Username, user.ScramCredentialsSecretName, mdbNamespacedName, password) if err != nil { return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, fmt.Errorf("could not determine if new credentials need to be generated: %s", err) } @@ -99,7 +100,7 @@ func ensureScramCredentials(getUpdateCreator secret.GetUpdateCreator, user autht // there are no changes required, we can re-use the same credentials. if !shouldGenerateNewCredentials { zap.S().Debugf("Credentials have not changed, using credentials stored in: secret/%s", user.ScramCredentialsSecretName) - return readExistingCredentials(getUpdateCreator, mdbNamespacedName, user.ScramCredentialsSecretName) + return readExistingCredentials(ctx, getUpdateCreator, mdbNamespacedName, user.ScramCredentialsSecretName) } // the password has changed, or we are generating it for the first time @@ -110,7 +111,7 @@ func ensureScramCredentials(getUpdateCreator secret.GetUpdateCreator, user autht } // create or update our credentials secret for this user - if err := createScramCredentialsSecret(getUpdateCreator, mdbNamespacedName, ownerRef, user.ScramCredentialsSecretName, sha1Creds, sha256Creds); err != nil { + if err := createScramCredentialsSecret(ctx, getUpdateCreator, mdbNamespacedName, ownerRef, user.ScramCredentialsSecretName, sha1Creds, sha256Creds); err != nil { return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, fmt.Errorf("faild to create scram credentials secret %s: %s", user.ScramCredentialsSecretName, err) } @@ -120,8 +121,8 @@ func ensureScramCredentials(getUpdateCreator secret.GetUpdateCreator, user autht // needToGenerateNewCredentials determines if it is required to generate new credentials or not. // this will be the case if we are either changing password, or are generating credentials for the first time. -func needToGenerateNewCredentials(secretGetter secret.Getter, username, scramCredentialsSecretName string, mdbNamespacedName types.NamespacedName, password string) (bool, error) { - s, err := secretGetter.GetSecret(types.NamespacedName{Name: scramCredentialsSecretName, Namespace: mdbNamespacedName.Namespace}) +func needToGenerateNewCredentials(ctx context.Context, secretGetter secret.Getter, username, scramCredentialsSecretName string, mdbNamespacedName types.NamespacedName, password string) (bool, error) { + s, err := secretGetter.GetSecret(ctx, types.NamespacedName{Name: scramCredentialsSecretName, Namespace: mdbNamespacedName.Namespace}) if err != nil { // haven't generated credentials yet, so we are changing password if secret.SecretNotExist(err) { @@ -151,7 +152,7 @@ func needToGenerateNewCredentials(secretGetter secret.Getter, username, scramCre return false, err } - existingSha1Creds, existingSha256Creds, err := readExistingCredentials(secretGetter, mdbNamespacedName, scramCredentialsSecretName) + existingSha1Creds, existingSha256Creds, err := readExistingCredentials(ctx, secretGetter, mdbNamespacedName, scramCredentialsSecretName) if err != nil { return false, err } @@ -194,7 +195,7 @@ func computeScramShaCredentials(username, password string, sha1Salt, sha256Salt // createScramCredentialsSecret will create a Secret that contains all of the fields required to read these credentials // back in the future. -func createScramCredentialsSecret(getUpdateCreator secret.GetUpdateCreator, mdbObjectKey types.NamespacedName, ref []metav1.OwnerReference, scramCredentialsSecretName string, sha1Creds, sha256Creds scramcredentials.ScramCreds) error { +func createScramCredentialsSecret(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdbObjectKey types.NamespacedName, ref []metav1.OwnerReference, scramCredentialsSecretName string, sha1Creds, sha256Creds scramcredentials.ScramCreds) error { scramCredsSecret := secret.Builder(). SetName(scramCredentialsSecretName). SetNamespace(mdbObjectKey.Namespace). @@ -206,12 +207,12 @@ func createScramCredentialsSecret(getUpdateCreator secret.GetUpdateCreator, mdbO SetField(sha256ServerKeyKey, sha256Creds.ServerKey). SetOwnerReferences(ref). Build() - return secret.CreateOrUpdate(getUpdateCreator, scramCredsSecret) + return secret.CreateOrUpdate(ctx, getUpdateCreator, scramCredsSecret) } // readExistingCredentials reads the existing set of credentials for both ScramSha 1 & 256 -func readExistingCredentials(secretGetter secret.Getter, mdbObjectKey types.NamespacedName, scramCredentialsSecretName string) (scramcredentials.ScramCreds, scramcredentials.ScramCreds, error) { - credentialsSecret, err := secretGetter.GetSecret(types.NamespacedName{Name: scramCredentialsSecretName, Namespace: mdbObjectKey.Namespace}) +func readExistingCredentials(ctx context.Context, secretGetter secret.Getter, mdbObjectKey types.NamespacedName, scramCredentialsSecretName string) (scramcredentials.ScramCreds, scramcredentials.ScramCreds, error) { + credentialsSecret, err := secretGetter.GetSecret(ctx, types.NamespacedName{Name: scramCredentialsSecretName, Namespace: mdbObjectKey.Namespace}) if err != nil { return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, fmt.Errorf("could not get secret %s/%s: %s", mdbObjectKey.Namespace, scramCredentialsSecretName, err) } @@ -239,11 +240,11 @@ func readExistingCredentials(secretGetter secret.Getter, mdbObjectKey types.Name } // convertMongoDBResourceUsersToAutomationConfigUsers returns a list of users that are able to be set in the AutomationConfig -func convertMongoDBResourceUsersToAutomationConfigUsers(secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable) ([]automationconfig.MongoDBUser, error) { +func convertMongoDBResourceUsersToAutomationConfigUsers(ctx context.Context, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable) ([]automationconfig.MongoDBUser, error) { var usersWanted []automationconfig.MongoDBUser for _, u := range mdb.GetAuthUsers() { if u.Database != constants.ExternalDB { - acUser, err := convertMongoDBUserToAutomationConfigUser(secretGetUpdateCreateDeleter, mdb.NamespacedName(), mdb.GetOwnerReferences(), u) + acUser, err := convertMongoDBUserToAutomationConfigUser(ctx, secretGetUpdateCreateDeleter, mdb.NamespacedName(), mdb.GetOwnerReferences(), u) if err != nil { return nil, fmt.Errorf("failed to convert scram user %s to Automation Config user: %s", u.Username, err) } @@ -255,7 +256,7 @@ func convertMongoDBResourceUsersToAutomationConfigUsers(secretGetUpdateCreateDel // convertMongoDBUserToAutomationConfigUser converts a single user configured in the MongoDB resource and converts it to a user // that can be added directly to the AutomationConfig. -func convertMongoDBUserToAutomationConfigUser(secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdbNsName types.NamespacedName, ownerRef []metav1.OwnerReference, user authtypes.User) (automationconfig.MongoDBUser, error) { +func convertMongoDBUserToAutomationConfigUser(ctx context.Context, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdbNsName types.NamespacedName, ownerRef []metav1.OwnerReference, user authtypes.User) (automationconfig.MongoDBUser, error) { acUser := automationconfig.MongoDBUser{ Username: user.Username, Database: user.Database, @@ -266,7 +267,7 @@ func convertMongoDBUserToAutomationConfigUser(secretGetUpdateCreateDeleter secre Database: role.Database, }) } - sha1Creds, sha256Creds, err := ensureScramCredentials(secretGetUpdateCreateDeleter, user, mdbNsName, ownerRef) + sha1Creds, sha256Creds, err := ensureScramCredentials(ctx, secretGetUpdateCreateDeleter, user, mdbNsName, ownerRef) if err != nil { return automationconfig.MongoDBUser{}, fmt.Errorf("could not ensure scram credentials: %s", err) } diff --git a/pkg/authentication/scram/scram_test.go b/pkg/authentication/scram/scram_test.go index f6b9aedaf..dd43ffc9c 100644 --- a/pkg/authentication/scram/scram_test.go +++ b/pkg/authentication/scram/scram_test.go @@ -1,6 +1,7 @@ package scram import ( + "context" "os" "reflect" "testing" @@ -39,23 +40,24 @@ const ( ) func TestReadExistingCredentials(t *testing.T) { + ctx := context.Background() mdbObjectKey := types.NamespacedName{Name: "mdb-0", Namespace: "default"} user := mocks.BuildScramMongoDBUser("mdbuser-0") t.Run("credentials are successfully generated when all fields are present", func(t *testing.T) { scramCredsSecret := validScramCredentialsSecret(mdbObjectKey, user.ScramCredentialsSecretName) - scram1Creds, scram256Creds, err := readExistingCredentials(mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredsSecret), mdbObjectKey, user.ScramCredentialsSecretName) + scram1Creds, scram256Creds, err := readExistingCredentials(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredsSecret), mdbObjectKey, user.ScramCredentialsSecretName) assert.NoError(t, err) assertScramCredsCredentialsValidity(t, scram1Creds, scram256Creds) }) t.Run("credentials are not generated if a field is missing", func(t *testing.T) { scramCredsSecret := invalidSecret(mdbObjectKey, user.ScramCredentialsSecretName) - _, _, err := readExistingCredentials(mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredsSecret), mdbObjectKey, user.ScramCredentialsSecretName) + _, _, err := readExistingCredentials(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredsSecret), mdbObjectKey, user.ScramCredentialsSecretName) assert.Error(t, err) }) t.Run("credentials are not generated if the secret does not exist", func(t *testing.T) { scramCredsSecret := validScramCredentialsSecret(mdbObjectKey, user.ScramCredentialsSecretName) - _, _, err := readExistingCredentials(mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredsSecret), mdbObjectKey, "different-username") + _, _, err := readExistingCredentials(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredsSecret), mdbObjectKey, "different-username") assert.Error(t, err) }) } @@ -79,14 +81,15 @@ func TestComputeScramCredentials_ComputesSameStoredAndServerKey_WithSameSalt(t * } func TestEnsureScramCredentials(t *testing.T) { + ctx := context.Background() mdb, user := buildConfigurableAndUser("mdb-0") t.Run("Fails when there is no password secret, and no credentials secret", func(t *testing.T) { - _, _, err := ensureScramCredentials(mocks.NewMockedSecretGetUpdateCreateDeleter(), user, mdb.NamespacedName(), nil) + _, _, err := ensureScramCredentials(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(), user, mdb.NamespacedName(), nil) assert.Error(t, err) }) t.Run("Existing credentials are used when password does not exist, but credentials secret has been created", func(t *testing.T) { scramCredentialsSecret := validScramCredentialsSecret(mdb.NamespacedName(), user.ScramCredentialsSecretName) - scram1Creds, scram256Creds, err := ensureScramCredentials(mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredentialsSecret), user, mdb.NamespacedName(), nil) + scram1Creds, scram256Creds, err := ensureScramCredentials(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredentialsSecret), user, mdb.NamespacedName(), nil) assert.NoError(t, err) assertScramCredsCredentialsValidity(t, scram1Creds, scram256Creds) }) @@ -101,7 +104,7 @@ func TestEnsureScramCredentials(t *testing.T) { Build() scramCredentialsSecret := validScramCredentialsSecret(mdb.NamespacedName(), user.ScramCredentialsSecretName) - scram1Creds, scram256Creds, err := ensureScramCredentials(mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredentialsSecret, differentPasswordSecret), user, mdb.NamespacedName(), nil) + scram1Creds, scram256Creds, err := ensureScramCredentials(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredentialsSecret, differentPasswordSecret), user, mdb.NamespacedName(), nil) assert.NoError(t, err) assert.NotEqual(t, testSha1Salt, scram1Creds.Salt) assert.NotEmpty(t, scram1Creds.Salt) @@ -122,6 +125,7 @@ func TestEnsureScramCredentials(t *testing.T) { } func TestConvertMongoDBUserToAutomationConfigUser(t *testing.T) { + ctx := context.Background() mdb, user := buildConfigurableAndUser("mdb-0") t.Run("When password exists, the user is created in the automation config", func(t *testing.T) { @@ -131,7 +135,7 @@ func TestConvertMongoDBUserToAutomationConfigUser(t *testing.T) { SetField(user.PasswordSecretKey, "TDg_DESiScDrJV6"). Build() - acUser, err := convertMongoDBUserToAutomationConfigUser(mocks.NewMockedSecretGetUpdateCreateDeleter(passwordSecret), mdb.NamespacedName(), nil, user) + acUser, err := convertMongoDBUserToAutomationConfigUser(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(passwordSecret), mdb.NamespacedName(), nil, user) assert.NoError(t, err) assert.Equal(t, user.Username, acUser.Username) @@ -146,18 +150,19 @@ func TestConvertMongoDBUserToAutomationConfigUser(t *testing.T) { }) t.Run("If there is no password secret, the creation fails", func(t *testing.T) { - _, err := convertMongoDBUserToAutomationConfigUser(mocks.NewMockedSecretGetUpdateCreateDeleter(), mdb.NamespacedName(), nil, user) + _, err := convertMongoDBUserToAutomationConfigUser(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(), mdb.NamespacedName(), nil, user) assert.Error(t, err) }) } func TestConfigureScram(t *testing.T) { + ctx := context.Background() t.Run("Should fail if there is no password present for the user", func(t *testing.T) { mdb, _ := buildConfigurableAndUser("mdb-0") s := mocks.NewMockedSecretGetUpdateCreateDeleter() auth := automationconfig.Auth{} - err := Enable(&auth, s, mdb) + err := Enable(ctx, &auth, s, mdb) assert.Error(t, err) }) @@ -165,15 +170,15 @@ func TestConfigureScram(t *testing.T) { mdb := buildConfigurable("mdb-0") s := mocks.NewMockedSecretGetUpdateCreateDeleter() auth := automationconfig.Auth{} - err := Enable(&auth, s, mdb) + err := Enable(ctx, &auth, s, mdb) assert.NoError(t, err) - passwordSecret, err := s.GetSecret(mdb.GetAgentPasswordSecretNamespacedName()) + passwordSecret, err := s.GetSecret(ctx, mdb.GetAgentPasswordSecretNamespacedName()) assert.NoError(t, err) assert.True(t, secret.HasAllKeys(passwordSecret, constants.AgentPasswordKey)) assert.NotEmpty(t, passwordSecret.Data[constants.AgentPasswordKey]) - keyfileSecret, err := s.GetSecret(mdb.GetAgentKeyfileSecretNamespacedName()) + keyfileSecret, err := s.GetSecret(ctx, mdb.GetAgentKeyfileSecretNamespacedName()) assert.NoError(t, err) assert.True(t, secret.HasAllKeys(keyfileSecret, constants.AgentKeyfileKey)) assert.NotEmpty(t, keyfileSecret.Data[constants.AgentKeyfileKey]) @@ -183,10 +188,10 @@ func TestConfigureScram(t *testing.T) { mdb := buildConfigurable("mdb-0") s := mocks.NewMockedSecretGetUpdateCreateDeleter() auth := automationconfig.Auth{} - err := Enable(&auth, s, mdb) + err := Enable(ctx, &auth, s, mdb) assert.NoError(t, err) - passwordSecret, err := s.GetSecret(mdb.GetAgentPasswordSecretNamespacedName()) + passwordSecret, err := s.GetSecret(ctx, mdb.GetAgentPasswordSecretNamespacedName()) assert.NoError(t, err) actualRef := passwordSecret.GetOwnerReferences() @@ -209,10 +214,10 @@ func TestConfigureScram(t *testing.T) { s := mocks.NewMockedSecretGetUpdateCreateDeleter(agentPasswordSecret) auth := automationconfig.Auth{} - err := Enable(&auth, s, mdb) + err := Enable(ctx, &auth, s, mdb) assert.NoError(t, err) - ps, err := s.GetSecret(mdb.GetAgentPasswordSecretNamespacedName()) + ps, err := s.GetSecret(ctx, mdb.GetAgentPasswordSecretNamespacedName()) assert.NoError(t, err) assert.True(t, secret.HasAllKeys(ps, constants.AgentPasswordKey)) assert.NotEmpty(t, ps.Data[constants.AgentPasswordKey]) @@ -231,10 +236,10 @@ func TestConfigureScram(t *testing.T) { s := mocks.NewMockedSecretGetUpdateCreateDeleter(keyfileSecret) auth := automationconfig.Auth{} - err := Enable(&auth, s, mdb) + err := Enable(ctx, &auth, s, mdb) assert.NoError(t, err) - ks, err := s.GetSecret(mdb.GetAgentKeyfileSecretNamespacedName()) + ks, err := s.GetSecret(ctx, mdb.GetAgentKeyfileSecretNamespacedName()) assert.NoError(t, err) assert.True(t, secret.HasAllKeys(ks, constants.AgentKeyfileKey)) assert.Equal(t, "RuPeMaIe2g0SNTTa", string(ks.Data[constants.AgentKeyfileKey])) @@ -245,7 +250,7 @@ func TestConfigureScram(t *testing.T) { mdb := buildConfigurable("mdb-0") s := mocks.NewMockedSecretGetUpdateCreateDeleter() auth := automationconfig.Auth{} - err := Enable(&auth, s, mdb) + err := Enable(ctx, &auth, s, mdb) assert.NoError(t, err) }) } diff --git a/pkg/authentication/x509/x509.go b/pkg/authentication/x509/x509.go index 1506471ae..20297e35f 100644 --- a/pkg/authentication/x509/x509.go +++ b/pkg/authentication/x509/x509.go @@ -2,6 +2,7 @@ package x509 import ( "bytes" + "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -27,13 +28,13 @@ import ( // Enable will configure all of the required Kubernetes resources for X509 to be enabled. // The agent password and keyfile contents will be configured and stored in a secret. // the user credentials will be generated if not present, or existing credentials will be read. -func Enable(auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable, agentCertSecret types.NamespacedName) error { +func Enable(ctx context.Context, auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable, agentCertSecret types.NamespacedName) error { opts := mdb.GetAuthOptions() desiredUsers := convertMongoDBResourceUsersToAutomationConfigUsers(mdb) if opts.AutoAuthMechanism == constants.X509 { - if err := ensureAgent(auth, secretGetUpdateCreateDeleter, mdb, agentCertSecret); err != nil { + if err := ensureAgent(ctx, auth, secretGetUpdateCreateDeleter, mdb, agentCertSecret); err != nil { return err } } @@ -41,19 +42,19 @@ func Enable(auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.Get return enableClientAuthentication(auth, opts, desiredUsers) } -func ensureAgent(auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable, agentCertSecret types.NamespacedName) error { +func ensureAgent(ctx context.Context, auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable, agentCertSecret types.NamespacedName) error { generatedContents, err := generate.KeyFileContents() if err != nil { return fmt.Errorf("could not generate keyfile contents: %s", err) } // ensure that the agent keyfile secret exists or read existing keyfile. - agentKeyFile, err := secret.EnsureSecretWithKey(secretGetUpdateCreateDeleter, mdb.GetAgentKeyfileSecretNamespacedName(), mdb.GetOwnerReferences(), constants.AgentKeyfileKey, generatedContents) + agentKeyFile, err := secret.EnsureSecretWithKey(ctx, secretGetUpdateCreateDeleter, mdb.GetAgentKeyfileSecretNamespacedName(), mdb.GetOwnerReferences(), constants.AgentKeyfileKey, generatedContents) if err != nil { return err } - agentCert, err := secret.ReadKey(secretGetUpdateCreateDeleter, "tls.crt", agentCertSecret) + agentCert, err := secret.ReadKey(ctx, secretGetUpdateCreateDeleter, "tls.crt", agentCertSecret) if err != nil { return err } diff --git a/pkg/authentication/x509/x509_test.go b/pkg/authentication/x509/x509_test.go index 13a4efb95..ed4f728fc 100644 --- a/pkg/authentication/x509/x509_test.go +++ b/pkg/authentication/x509/x509_test.go @@ -1,6 +1,7 @@ package x509 import ( + "context" "reflect" "testing" @@ -16,6 +17,7 @@ import ( ) func TestEnable(t *testing.T) { + ctx := context.Background() t.Run("X509 agent", func(t *testing.T) { auth := automationconfig.Auth{} mdb := buildX509Configurable("mdb", mocks.BuildX509MongoDBUser("my-user"), mocks.BuildScramMongoDBUser("my-scram-user")) @@ -28,7 +30,7 @@ func TestEnable(t *testing.T) { Build() secrets := mocks.NewMockedSecretGetUpdateCreateDeleter(agentSecret, keyfileSecret) - err := Enable(&auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) assert.NoError(t, err) expected := automationconfig.Auth{ @@ -70,7 +72,7 @@ func TestEnable(t *testing.T) { secrets := mocks.NewMockedSecretGetUpdateCreateDeleter() - err := Enable(&auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) assert.NoError(t, err) expected := automationconfig.Auth{ @@ -100,18 +102,19 @@ func TestEnable(t *testing.T) { } func Test_ensureAgent(t *testing.T) { + ctx := context.Background() auth := automationconfig.Auth{} mdb := buildX509Configurable("mdb") secrets := mocks.NewMockedSecretGetUpdateCreateDeleter() - err := ensureAgent(&auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + err := ensureAgent(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) assert.Error(t, err) auth = automationconfig.Auth{} agentSecret := CreateAgentCertificateSecret("tls.pem", false, mdb.AgentCertificateSecretNamespacedName()) secrets = mocks.NewMockedSecretGetUpdateCreateDeleter(agentSecret) - err = ensureAgent(&auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + err = ensureAgent(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) assert.Error(t, err) assert.ErrorContains(t, err, "key \"tls.crt\" not present in the Secret") @@ -119,7 +122,7 @@ func Test_ensureAgent(t *testing.T) { agentSecret = CreateAgentCertificateSecret("tls.crt", true, mdb.AgentCertificateSecretNamespacedName()) secrets = mocks.NewMockedSecretGetUpdateCreateDeleter(agentSecret) - err = ensureAgent(&auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + err = ensureAgent(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) assert.Error(t, err) assert.ErrorContains(t, err, "x509: malformed certificate") @@ -127,7 +130,7 @@ func Test_ensureAgent(t *testing.T) { agentSecret = CreateAgentCertificateSecret("tls.crt", false, mdb.AgentCertificateSecretNamespacedName()) secrets = mocks.NewMockedSecretGetUpdateCreateDeleter(agentSecret) - err = ensureAgent(&auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + err = ensureAgent(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) assert.NoError(t, err) } diff --git a/pkg/automationconfig/automation_config.go b/pkg/automationconfig/automation_config.go index dac98a16b..855985108 100644 --- a/pkg/automationconfig/automation_config.go +++ b/pkg/automationconfig/automation_config.go @@ -3,6 +3,7 @@ package automationconfig import ( "bytes" "encoding/json" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scramcredentials" "github.com/spf13/cast" "github.com/stretchr/objx" @@ -126,15 +127,16 @@ type LogRotate struct { } type Process struct { - Name string `json:"name"` - Disabled bool `json:"disabled"` - HostName string `json:"hostname"` - Args26 objx.Map `json:"args2_6"` - FeatureCompatibilityVersion string `json:"featureCompatibilityVersion"` - ProcessType ProcessType `json:"processType"` - Version string `json:"version"` - AuthSchemaVersion int `json:"authSchemaVersion"` - LogRotate AcLogRotate `json:"LogRotate,omitempty"` + Name string `json:"name"` + Disabled bool `json:"disabled"` + HostName string `json:"hostname"` + Args26 objx.Map `json:"args2_6"` + FeatureCompatibilityVersion string `json:"featureCompatibilityVersion"` + ProcessType ProcessType `json:"processType"` + Version string `json:"version"` + AuthSchemaVersion int `json:"authSchemaVersion"` + LogRotate *AcLogRotate `json:"logRotate,omitempty"` + AuditLogRotate *AcLogRotate `json:"auditLogRotate,omitempty"` } func (p *Process) SetPort(port int) *Process { @@ -179,13 +181,19 @@ func (p *Process) SetLogRotate(lr *CrdLogRotate) *Process { return p } +// SetAuditLogRotate sets the acLogRotate by converting the CrdLogRotate to an acLogRotate. +func (p *Process) SetAuditLogRotate(lr *CrdLogRotate) *Process { + p.AuditLogRotate = ConvertCrdLogRotateToAC(lr) + return p +} + // ConvertCrdLogRotateToAC converts a CrdLogRotate to an AcLogRotate representation. -func ConvertCrdLogRotateToAC(lr *CrdLogRotate) AcLogRotate { +func ConvertCrdLogRotateToAC(lr *CrdLogRotate) *AcLogRotate { if lr == nil { - return AcLogRotate{} + return &AcLogRotate{} } - return AcLogRotate{ + return &AcLogRotate{ LogRotate: LogRotate{ TimeThresholdHrs: lr.TimeThresholdHrs, NumUncompressed: lr.NumUncompressed, @@ -250,11 +258,20 @@ type EngineConfig struct { CacheSizeGB float32 `json:"cacheSizeGB"` } +// ReplSetForceConfig setting enables us to force reconfigure automation agent when the MongoDB deployment +// is in a broken state - for ex: doesn't have a primary. +// More info: https://www.mongodb.com/docs/ops-manager/current/reference/api/automation-config/automation-config-parameters/#replica-sets +type ReplSetForceConfig struct { + CurrentVersion int64 `json:"currentVersion"` +} + type ReplicaSet struct { - Id string `json:"_id"` - Members []ReplicaSetMember `json:"members"` - ProtocolVersion string `json:"protocolVersion"` - NumberArbiters int `json:"numberArbiters"` + Id string `json:"_id"` + Members []ReplicaSetMember `json:"members"` + ProtocolVersion string `json:"protocolVersion"` + NumberArbiters int `json:"numberArbiters"` + Force *ReplSetForceConfig `json:"force,omitempty"` + Settings map[string]interface{} `json:"settings,omitempty"` } type ReplicaSetMember struct { @@ -266,7 +283,7 @@ type ReplicaSetMember struct { // is different in AC from the CR(CR don't support float) - hence all the members are declared // separately Votes *int `json:"votes,omitempty"` - Priority float32 `json:"priority,omitempty"` + Priority *float32 `json:"priority,omitempty"` Tags map[string]string `json:"tags,omitempty"` } @@ -277,7 +294,7 @@ func newReplicaSetMember(name string, id int, horizons ReplicaSetHorizons, isArb // ensure that the number of voting members in the replica set is not more than 7 // as this is the maximum number of voting members. votes := 0 - priority := 0.0 + priority := float32(0.0) if isVotingMember { votes = 1 @@ -290,7 +307,7 @@ func newReplicaSetMember(name string, id int, horizons ReplicaSetHorizons, isArb ArbiterOnly: isArbiter, Horizons: horizons, Votes: &votes, - Priority: float32(priority), + Priority: &priority, } } @@ -317,6 +334,15 @@ type Auth struct { KeyFileWindows string `json:"keyfileWindows,omitempty"` // AutoPwd is a required field when going from `Disabled=false` to `Disabled=true` AutoPwd string `json:"autoPwd,omitempty"` + // UsersDeleted is an array of DeletedUser objects that define the authenticated users to be deleted from specified databases + UsersDeleted []DeletedUser `json:"usersDeleted,omitempty"` +} + +type DeletedUser struct { + // User is the username that should be deleted + User string `json:"user,omitempty"` + // Dbs is the array of database names from which the authenticated user should be deleted + Dbs []string `json:"dbs,omitempty"` } type Prometheus struct { @@ -468,7 +494,7 @@ func FromBytes(acBytes []byte) (AutomationConfig, error) { return ac, nil } -func ConfigureAgentConfiguration(systemLog *SystemLog, logRotate *CrdLogRotate, p *Process) { +func ConfigureAgentConfiguration(systemLog *SystemLog, logRotate *CrdLogRotate, auditLR *CrdLogRotate, p *Process) { if systemLog != nil { p.SetSystemLog(*systemLog) } @@ -481,6 +507,7 @@ func ConfigureAgentConfiguration(systemLog *SystemLog, logRotate *CrdLogRotate, zap.S().Warn("Configuring LogRotate with systemLog.Destination = Syslog will not work") } p.SetLogRotate(logRotate) + p.SetAuditLogRotate(auditLR) } } diff --git a/pkg/automationconfig/automation_config_builder.go b/pkg/automationconfig/automation_config_builder.go index 02411c0e1..3091734dc 100644 --- a/pkg/automationconfig/automation_config_builder.go +++ b/pkg/automationconfig/automation_config_builder.go @@ -6,8 +6,9 @@ import ( "strings" "github.com/blang/semver" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/versions" + + "k8s.io/utils/ptr" ) type Topology string @@ -39,19 +40,22 @@ type Builder struct { mongodbVersion string previousAC AutomationConfig // MongoDB installable versions - versions []MongoDbVersionConfig - backupVersions []BackupVersion - monitoringVersions []MonitoringVersion - options Options - processModifications []func(int, *Process) - modifications []Modification - auth *Auth - cafilePath string - sslConfig *TLS - tlsConfig *TLS - dataDir string - port int - memberOptions []MemberOptions + versions []MongoDbVersionConfig + backupVersions []BackupVersion + monitoringVersions []MonitoringVersion + options Options + processModifications []func(int, *Process) + modifications []Modification + auth *Auth + cafilePath string + sslConfig *TLS + tlsConfig *TLS + dataDir string + port int + memberOptions []MemberOptions + forceReconfigureToVersion *int64 + replicaSetId *string + settings map[string]interface{} } func NewBuilder() *Builder { @@ -191,6 +195,21 @@ func (b *Builder) SetAuth(auth Auth) *Builder { return b } +func (b *Builder) SetReplicaSetId(id *string) *Builder { + b.replicaSetId = id + return b +} + +func (b *Builder) SetSettings(settings map[string]interface{}) *Builder { + b.settings = settings + return b +} + +func (b *Builder) SetForceReconfigureToVersion(version int64) *Builder { + b.forceReconfigureToVersion = &version + return b +} + func (b *Builder) AddProcessModification(f func(int, *Process)) *Builder { b.processModifications = append(b.processModifications, f) return b @@ -339,7 +358,7 @@ func (b *Builder) Build() (AutomationConfig, error) { if len(b.memberOptions) > i { // override the member options if explicitly specified in the spec members[i].Votes = b.memberOptions[i].Votes - members[i].Priority = b.memberOptions[i].GetPriority() + members[i].Priority = ptr.To(b.memberOptions[i].GetPriority()) members[i].Tags = b.memberOptions[i].Tags } } @@ -354,15 +373,27 @@ func (b *Builder) Build() (AutomationConfig, error) { b.versions = append(b.versions, dummyConfig) } + var replSetForceConfig *ReplSetForceConfig + if b.forceReconfigureToVersion != nil { + replSetForceConfig = &ReplSetForceConfig{CurrentVersion: *b.forceReconfigureToVersion} + } + + replicaSetId := b.name + if b.replicaSetId != nil { + replicaSetId = *b.replicaSetId + } + currentAc := AutomationConfig{ Version: b.previousAC.Version, Processes: processes, ReplicaSets: []ReplicaSet{ { - Id: b.name, + Id: replicaSetId, Members: members, ProtocolVersion: "1", NumberArbiters: b.arbiters, + Force: replSetForceConfig, + Settings: b.settings, }, }, MonitoringVersions: b.monitoringVersions, diff --git a/pkg/automationconfig/automation_config_secret.go b/pkg/automationconfig/automation_config_secret.go index c27aa766e..9ca6ed469 100644 --- a/pkg/automationconfig/automation_config_secret.go +++ b/pkg/automationconfig/automation_config_secret.go @@ -1,6 +1,7 @@ package automationconfig import ( + "context" "encoding/json" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" @@ -12,8 +13,8 @@ const ConfigKey = "cluster-config.json" // ReadFromSecret returns the AutomationConfig present in the given Secret. If the Secret is not // found, it is not considered an error and an empty AutomationConfig is returned. -func ReadFromSecret(secretGetter secret.Getter, secretNsName types.NamespacedName) (AutomationConfig, error) { - acSecret, err := secretGetter.GetSecret(secretNsName) +func ReadFromSecret(ctx context.Context, secretGetter secret.Getter, secretNsName types.NamespacedName) (AutomationConfig, error) { + acSecret, err := secretGetter.GetSecret(ctx, secretNsName) if err != nil { if secret.SecretNotExist(err) { err = nil @@ -27,11 +28,11 @@ func ReadFromSecret(secretGetter secret.Getter, secretNsName types.NamespacedNam // if the desired config is the same as the current contents, no change is made. // The most recent AutomationConfig is returned. If no change is made, it will return the existing one, if there // is a change, the new AutomationConfig is returned. -func EnsureSecret(secretGetUpdateCreator secret.GetUpdateCreator, secretNsName types.NamespacedName, owner []metav1.OwnerReference, desiredAutomationConfig AutomationConfig) (AutomationConfig, error) { - existingSecret, err := secretGetUpdateCreator.GetSecret(secretNsName) +func EnsureSecret(ctx context.Context, secretGetUpdateCreator secret.GetUpdateCreator, secretNsName types.NamespacedName, owner []metav1.OwnerReference, desiredAutomationConfig AutomationConfig) (AutomationConfig, error) { + existingSecret, err := secretGetUpdateCreator.GetSecret(ctx, secretNsName) if err != nil { if secret.SecretNotExist(err) { - return createNewAutomationConfigSecret(secretGetUpdateCreator, secretNsName, owner, desiredAutomationConfig) + return createNewAutomationConfigSecret(ctx, secretGetUpdateCreator, secretNsName, owner, desiredAutomationConfig) } return AutomationConfig{}, err } @@ -62,10 +63,10 @@ func EnsureSecret(secretGetUpdateCreator secret.GetUpdateCreator, secretNsName t existingSecret.Name = secretNsName.Name existingSecret.Namespace = secretNsName.Namespace - return desiredAutomationConfig, secretGetUpdateCreator.UpdateSecret(existingSecret) + return desiredAutomationConfig, secretGetUpdateCreator.UpdateSecret(ctx, existingSecret) } -func createNewAutomationConfigSecret(secretGetUpdateCreator secret.GetUpdateCreator, secretNsName types.NamespacedName, owner []metav1.OwnerReference, desiredAutomation AutomationConfig) (AutomationConfig, error) { +func createNewAutomationConfigSecret(ctx context.Context, secretGetUpdateCreator secret.GetUpdateCreator, secretNsName types.NamespacedName, owner []metav1.OwnerReference, desiredAutomation AutomationConfig) (AutomationConfig, error) { acBytes, err := json.Marshal(desiredAutomation) if err != nil { return AutomationConfig{}, err @@ -78,7 +79,7 @@ func createNewAutomationConfigSecret(secretGetUpdateCreator secret.GetUpdateCrea SetOwnerReferences(owner). Build() - if err := secretGetUpdateCreator.CreateSecret(newSecret); err != nil { + if err := secretGetUpdateCreator.CreateSecret(ctx, newSecret); err != nil { return AutomationConfig{}, err } return desiredAutomation, nil diff --git a/pkg/automationconfig/automation_config_secret_test.go b/pkg/automationconfig/automation_config_secret_test.go index 15c20f362..ed9a4af77 100644 --- a/pkg/automationconfig/automation_config_secret_test.go +++ b/pkg/automationconfig/automation_config_secret_test.go @@ -1,6 +1,7 @@ package automationconfig import ( + "context" "encoding/json" "testing" @@ -14,6 +15,7 @@ import ( ) func TestEnsureSecret(t *testing.T) { + ctx := context.Background() secretNsName := types.NamespacedName{Name: "ac-secret", Namespace: "test-namespace"} desiredAutomationConfig, err := newAutomationConfig() assert.NoError(t, err) @@ -27,11 +29,11 @@ func TestEnsureSecret(t *testing.T) { secretGetUpdateCreator := &mockSecretGetUpdateCreator{secret: &s} - ac, err := EnsureSecret(secretGetUpdateCreator, secretNsName, []metav1.OwnerReference{}, desiredAutomationConfig) + ac, err := EnsureSecret(ctx, secretGetUpdateCreator, secretNsName, []metav1.OwnerReference{}, desiredAutomationConfig) assert.NoError(t, err) assert.Equal(t, desiredAutomationConfig, ac, "The config should be returned if there is not one currently.") - acSecret, err := secretGetUpdateCreator.GetSecret(secretNsName) + acSecret, err := secretGetUpdateCreator.GetSecret(ctx, secretNsName) assert.NoError(t, err) assert.Contains(t, acSecret.Data, ConfigKey, "The secret of the given name should have been updated with the config.") @@ -39,9 +41,10 @@ func TestEnsureSecret(t *testing.T) { }) t.Run("test LogRotate marshal and unmarshal", func(t *testing.T) { + ctx := context.Background() desiredAutomationConfig, err = NewBuilder().SetMembers(3).AddProcessModification(func(i_ int, p *Process) { - p.SetLogRotate(&CrdLogRotate{ + lr := &CrdLogRotate{ SizeThresholdMB: "0.001", LogRotate: LogRotate{ TimeThresholdHrs: 1, @@ -50,7 +53,9 @@ func TestEnsureSecret(t *testing.T) { IncludeAuditLogsWithMongoDBLogs: false, }, PercentOfDiskspace: "1", - }) + } + p.SetLogRotate(lr) + p.SetAuditLogRotate(lr) }).Build() assert.NoError(t, err) @@ -61,7 +66,7 @@ func TestEnsureSecret(t *testing.T) { secretGetUpdateCreator := &mockSecretGetUpdateCreator{secret: &s} - ac, err := EnsureSecret(secretGetUpdateCreator, secretNsName, []metav1.OwnerReference{}, desiredAutomationConfig) + ac, err := EnsureSecret(ctx, secretGetUpdateCreator, secretNsName, []metav1.OwnerReference{}, desiredAutomationConfig) assert.NoError(t, err) assert.Equal(t, desiredAutomationConfig, ac, "The config should be returned if there is not one currently.") @@ -69,7 +74,33 @@ func TestEnsureSecret(t *testing.T) { acFromBytes, err := FromBytes(bytes) assert.NoError(t, err) assert.Equal(t, 0.001, acFromBytes.Processes[0].LogRotate.SizeThresholdMB) + assert.Equal(t, 0.001, acFromBytes.Processes[0].AuditLogRotate.SizeThresholdMB) assert.Equal(t, float64(1), acFromBytes.Processes[0].LogRotate.PercentOfDiskspace) + assert.Equal(t, float64(1), acFromBytes.Processes[0].AuditLogRotate.PercentOfDiskspace) + }) + + t.Run("test LogRotate marshal and unmarshal if not set", func(t *testing.T) { + ctx := context.Background() + + desiredAutomationConfig, err = NewBuilder().SetMembers(3).AddProcessModification(func(i_ int, p *Process) {}).Build() + assert.NoError(t, err) + + s := secret.Builder(). + SetName(secretNsName.Name). + SetNamespace(secretNsName.Namespace). + Build() + + secretGetUpdateCreator := &mockSecretGetUpdateCreator{secret: &s} + + ac, err := EnsureSecret(ctx, secretGetUpdateCreator, secretNsName, []metav1.OwnerReference{}, desiredAutomationConfig) + assert.NoError(t, err) + assert.Equal(t, desiredAutomationConfig, ac, "The config should be returned if there is not one currently.") + + bytes := s.Data[ConfigKey] + acFromBytes, err := FromBytes(bytes) + assert.NoError(t, err) + assert.NotEqual(t, &AcLogRotate{}, acFromBytes.Processes[0].LogRotate) + assert.Nil(t, acFromBytes.Processes[0].LogRotate) }) t.Run("When the existing Automation Config is different the Automation Config Changes", func(t *testing.T) { @@ -84,7 +115,7 @@ func TestEnsureSecret(t *testing.T) { newAc, err := newAutomationConfigBuilder().SetDomain("different-domain").Build() assert.NoError(t, err) - res, err := EnsureSecret(secretGetUpdateCreator, secretNsName, []metav1.OwnerReference{}, newAc) + res, err := EnsureSecret(ctx, secretGetUpdateCreator, secretNsName, []metav1.OwnerReference{}, newAc) assert.NoError(t, err) assert.Equal(t, newAc, res) @@ -117,7 +148,7 @@ type mockSecretGetUpdateCreator struct { secret *corev1.Secret } -func (m *mockSecretGetUpdateCreator) GetSecret(objectKey client.ObjectKey) (corev1.Secret, error) { +func (m *mockSecretGetUpdateCreator) GetSecret(ctx context.Context, objectKey client.ObjectKey) (corev1.Secret, error) { if m.secret != nil { if objectKey.Name == m.secret.Name && objectKey.Namespace == m.secret.Namespace { return *m.secret, nil @@ -126,12 +157,12 @@ func (m *mockSecretGetUpdateCreator) GetSecret(objectKey client.ObjectKey) (core return corev1.Secret{}, notFoundError() } -func (m *mockSecretGetUpdateCreator) UpdateSecret(secret corev1.Secret) error { +func (m *mockSecretGetUpdateCreator) UpdateSecret(ctx context.Context, secret corev1.Secret) error { m.secret = &secret return nil } -func (m *mockSecretGetUpdateCreator) CreateSecret(secret corev1.Secret) error { +func (m *mockSecretGetUpdateCreator) CreateSecret(ctx context.Context, secret corev1.Secret) error { if m.secret == nil { m.secret = &secret return nil diff --git a/pkg/automationconfig/automation_config_test.go b/pkg/automationconfig/automation_config_test.go index e38fd3db6..19b3bcfe8 100644 --- a/pkg/automationconfig/automation_config_test.go +++ b/pkg/automationconfig/automation_config_test.go @@ -28,13 +28,15 @@ func defaultMongoDbVersion(version string) MongoDbVersionConfig { } func TestBuildAutomationConfig(t *testing.T) { - ac, err := NewBuilder(). + builder := NewBuilder(). SetName("my-rs"). SetDomain("my-ns.svc.cluster.local"). SetMongoDBVersion("4.2.0"). SetMembers(3). SetFCV("4.0"). - Build() + SetForceReconfigureToVersion(-1) + + ac, err := builder.Build() assert.NoError(t, err) assert.Len(t, ac.Processes, 3) @@ -56,6 +58,8 @@ func TestBuildAutomationConfig(t *testing.T) { rs := ac.ReplicaSets[0] assert.Equal(t, rs.Id, "my-rs", "The name provided should be configured to be the rs id") assert.Len(t, rs.Members, 3, "there should be the number of replicas provided") + require.NotNil(t, rs.Force) + assert.Equal(t, ReplSetForceConfig{CurrentVersion: -1}, *rs.Force) for i, member := range rs.Members { assert.Equal(t, 1, *member.Votes) @@ -63,6 +67,13 @@ func TestBuildAutomationConfig(t *testing.T) { assert.Equal(t, i, member.Id) assert.Equal(t, ac.Processes[i].Name, member.Host) } + + builder.SetForceReconfigureToVersion(1) + ac, err = builder.Build() + assert.NoError(t, err) + rs = ac.ReplicaSets[0] + require.NotNil(t, rs.Force) + assert.Equal(t, ReplSetForceConfig{CurrentVersion: 1}, *rs.Force) } func TestBuildAutomationConfigArbiters(t *testing.T) { @@ -466,6 +477,23 @@ func TestAreEqual(t *testing.T) { assert.NoError(t, err) assert.False(t, areEqual) }) + + t.Run("Automation Configs with nil and zero values are not equal", func(t *testing.T) { + votes := 1 + priority := "0.0" + firstBuilder := NewBuilder().SetName("name0").SetMongoDBVersion("mdbVersion0").SetOptions(Options{DownloadBase: "downloadBase0"}).SetDomain("domain0").SetMembers(2).SetAuth(Auth{Disabled: true}) + firstBuilder.SetMemberOptions([]MemberOptions{MemberOptions{Votes: &votes, Priority: &priority}}) + firstAc, _ := firstBuilder.Build() + firstAc.Version = 2 + secondBuilder := NewBuilder().SetName("name0").SetMongoDBVersion("mdbVersion0").SetOptions(Options{DownloadBase: "downloadBase0"}).SetDomain("domain0").SetMembers(2).SetAuth(Auth{Disabled: true}) + secondBuilder.SetMemberOptions([]MemberOptions{MemberOptions{Votes: &votes, Priority: nil}}) + secondAc, _ := secondBuilder.Build() + secondAc.Version = 2 + + areEqual, err := AreEqual(firstAc, secondAc) + assert.NoError(t, err) + assert.False(t, areEqual) + }) } func TestValidateFCV(t *testing.T) { @@ -502,3 +530,20 @@ func createAutomationConfig(name, mongodbVersion, domain string, opts Options, a ac.Version = acVersion return ac } + +func TestReplicaSetId(t *testing.T) { + id := "rs0" + ac, err := NewBuilder(). + SetName("my-rs"). + SetDomain("my-ns.svc.cluster.local"). + SetMongoDBVersion("4.2.0"). + SetMembers(3). + AddVersion(defaultMongoDbVersion("4.3.2")). + SetReplicaSetId(&id). + Build() + + assert.NoError(t, err) + assert.Len(t, ac.ReplicaSets, 1) + rs := ac.ReplicaSets[0] + assert.Equal(t, rs.Id, id, "The provided id should be used") +} diff --git a/pkg/automationconfig/zz_generated.deepcopy.go b/pkg/automationconfig/zz_generated.deepcopy.go index 0a4e123ab..723f24ba9 100644 --- a/pkg/automationconfig/zz_generated.deepcopy.go +++ b/pkg/automationconfig/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2021. diff --git a/pkg/kube/annotations/annotations.go b/pkg/kube/annotations/annotations.go index 9f508b990..44f5e9695 100644 --- a/pkg/kube/annotations/annotations.go +++ b/pkg/kube/annotations/annotations.go @@ -35,9 +35,9 @@ func GetAnnotation(object client.Object, key string) string { } // SetAnnotations updates the objects.Annotation with the supplied annotation and does the same with the object backed in kubernetes. -func SetAnnotations(object client.Object, annotations map[string]string, kubeClient client.Client) error { +func SetAnnotations(ctx context.Context, object client.Object, annotations map[string]string, kubeClient client.Client) error { currentObject := object.DeepCopyObject().(client.Object) - err := kubeClient.Get(context.TODO(), types.NamespacedName{Name: object.GetName(), Namespace: object.GetNamespace()}, currentObject) + err := kubeClient.Get(ctx, types.NamespacedName{Name: object.GetName(), Namespace: object.GetNamespace()}, currentObject) if err != nil { return err } @@ -68,17 +68,17 @@ func SetAnnotations(object client.Object, annotations map[string]string, kubeCli } patch := client.RawPatch(types.JSONPatchType, data) - if err = kubeClient.Patch(context.TODO(), currentObject, patch); err != nil { + if err = kubeClient.Patch(ctx, currentObject, patch); err != nil { return err } object.SetAnnotations(currentObject.GetAnnotations()) return nil } -func UpdateLastAppliedMongoDBVersion(mdb Versioned, kubeClient client.Client) error { +func UpdateLastAppliedMongoDBVersion(ctx context.Context, mdb Versioned, kubeClient client.Client) error { annotations := map[string]string{ LastAppliedMongoDBVersion: mdb.GetMongoDBVersionForAnnotation(), } - return SetAnnotations(mdb, annotations, kubeClient) + return SetAnnotations(ctx, mdb, annotations, kubeClient) } diff --git a/pkg/kube/client/client.go b/pkg/kube/client/client.go index f70048226..640e23373 100644 --- a/pkg/kube/client/client.go +++ b/pkg/kube/client/client.go @@ -27,7 +27,7 @@ type Client interface { k8sClient.Client KubernetesSecretClient // TODO: remove this function, add mongodb package which has GetAndUpdate function - GetAndUpdate(nsName types.NamespacedName, obj k8sClient.Object, updateFunc func()) error + GetAndUpdate(ctx context.Context, nsName types.NamespacedName, obj k8sClient.Object, updateFunc func()) error configmap.GetUpdateCreateDeleter service.GetUpdateCreateDeleter statefulset.GetUpdateCreateDeleter @@ -45,119 +45,119 @@ type client struct { // GetAndUpdate fetches the most recent version of the runtime.Object with the provided // nsName and applies the update function. The update function should update "obj" from // an outer scope -func (c client) GetAndUpdate(nsName types.NamespacedName, obj k8sClient.Object, updateFunc func()) error { - err := c.Get(context.TODO(), nsName, obj) +func (c client) GetAndUpdate(ctx context.Context, nsName types.NamespacedName, obj k8sClient.Object, updateFunc func()) error { + err := c.Get(ctx, nsName, obj) if err != nil { return err } // apply the function on the most recent version of the resource updateFunc() - return c.Update(context.TODO(), obj) + return c.Update(ctx, obj) } // GetConfigMap provides a thin wrapper and client.client to access corev1.ConfigMap types -func (c client) GetConfigMap(objectKey k8sClient.ObjectKey) (corev1.ConfigMap, error) { +func (c client) GetConfigMap(ctx context.Context, objectKey k8sClient.ObjectKey) (corev1.ConfigMap, error) { cm := corev1.ConfigMap{} - if err := c.Get(context.TODO(), objectKey, &cm); err != nil { + if err := c.Get(ctx, objectKey, &cm); err != nil { return corev1.ConfigMap{}, err } return cm, nil } // UpdateConfigMap provides a thin wrapper and client.Client to update corev1.ConfigMap types -func (c client) UpdateConfigMap(cm corev1.ConfigMap) error { - return c.Update(context.TODO(), &cm) +func (c client) UpdateConfigMap(ctx context.Context, cm corev1.ConfigMap) error { + return c.Update(ctx, &cm) } // CreateConfigMap provides a thin wrapper and client.Client to create corev1.ConfigMap types -func (c client) CreateConfigMap(cm corev1.ConfigMap) error { - return c.Create(context.TODO(), &cm) +func (c client) CreateConfigMap(ctx context.Context, cm corev1.ConfigMap) error { + return c.Create(ctx, &cm) } // DeleteConfigMap deletes the configmap of the given object key -func (c client) DeleteConfigMap(key k8sClient.ObjectKey) error { +func (c client) DeleteConfigMap(ctx context.Context, key k8sClient.ObjectKey) error { cm := corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: key.Name, Namespace: key.Namespace, }, } - return c.Delete(context.TODO(), &cm) + return c.Delete(ctx, &cm) } // GetPod provides a thin wrapper and client.client to access corev1.Pod types. -func (c client) GetPod(objectKey k8sClient.ObjectKey) (corev1.Pod, error) { +func (c client) GetPod(ctx context.Context, objectKey k8sClient.ObjectKey) (corev1.Pod, error) { p := corev1.Pod{} - if err := c.Get(context.TODO(), objectKey, &p); err != nil { + if err := c.Get(ctx, objectKey, &p); err != nil { return corev1.Pod{}, err } return p, nil } // GetSecret provides a thin wrapper and client.Client to access corev1.Secret types -func (c client) GetSecret(objectKey k8sClient.ObjectKey) (corev1.Secret, error) { +func (c client) GetSecret(ctx context.Context, objectKey k8sClient.ObjectKey) (corev1.Secret, error) { s := corev1.Secret{} - if err := c.Get(context.TODO(), objectKey, &s); err != nil { + if err := c.Get(ctx, objectKey, &s); err != nil { return corev1.Secret{}, err } return s, nil } // UpdateSecret provides a thin wrapper and client.Client to update corev1.Secret types -func (c client) UpdateSecret(secret corev1.Secret) error { - return c.Update(context.TODO(), &secret) +func (c client) UpdateSecret(ctx context.Context, secret corev1.Secret) error { + return c.Update(ctx, &secret) } // CreateSecret provides a thin wrapper and client.Client to create corev1.Secret types -func (c client) CreateSecret(secret corev1.Secret) error { - return c.Create(context.TODO(), &secret) +func (c client) CreateSecret(ctx context.Context, secret corev1.Secret) error { + return c.Create(ctx, &secret) } // DeleteSecret provides a thin wrapper and client.Client to delete corev1.Secret types -func (c client) DeleteSecret(key k8sClient.ObjectKey) error { +func (c client) DeleteSecret(ctx context.Context, key k8sClient.ObjectKey) error { s := corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: key.Name, Namespace: key.Namespace, }, } - return c.Delete(context.TODO(), &s) + return c.Delete(ctx, &s) } // GetService provides a thin wrapper and client.Client to access corev1.Service types -func (c client) GetService(objectKey k8sClient.ObjectKey) (corev1.Service, error) { +func (c client) GetService(ctx context.Context, objectKey k8sClient.ObjectKey) (corev1.Service, error) { s := corev1.Service{} - if err := c.Get(context.TODO(), objectKey, &s); err != nil { + if err := c.Get(ctx, objectKey, &s); err != nil { return corev1.Service{}, err } return s, nil } // UpdateService provides a thin wrapper and client.Client to update corev1.Service types -func (c client) UpdateService(service corev1.Service) error { - return c.Update(context.TODO(), &service) +func (c client) UpdateService(ctx context.Context, service corev1.Service) error { + return c.Update(ctx, &service) } // CreateService provides a thin wrapper and client.Client to create corev1.Service types -func (c client) CreateService(service corev1.Service) error { - return c.Create(context.TODO(), &service) +func (c client) CreateService(ctx context.Context, service corev1.Service) error { + return c.Create(ctx, &service) } // DeleteService provides a thin wrapper around client.Client to delete corev1.Service types -func (c client) DeleteService(objectKey k8sClient.ObjectKey) error { +func (c client) DeleteService(ctx context.Context, objectKey k8sClient.ObjectKey) error { svc := corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: objectKey.Name, Namespace: objectKey.Namespace, }, } - return c.Delete(context.TODO(), &svc) + return c.Delete(ctx, &svc) } // GetStatefulSet provides a thin wrapper and client.Client to access appsv1.StatefulSet types -func (c client) GetStatefulSet(objectKey k8sClient.ObjectKey) (appsv1.StatefulSet, error) { +func (c client) GetStatefulSet(ctx context.Context, objectKey k8sClient.ObjectKey) (appsv1.StatefulSet, error) { sts := appsv1.StatefulSet{} - if err := c.Get(context.TODO(), objectKey, &sts); err != nil { + if err := c.Get(ctx, objectKey, &sts); err != nil { return appsv1.StatefulSet{}, err } return sts, nil @@ -165,24 +165,24 @@ func (c client) GetStatefulSet(objectKey k8sClient.ObjectKey) (appsv1.StatefulSe // UpdateStatefulSet provides a thin wrapper and client.Client to update appsv1.StatefulSet types // the updated StatefulSet is returned -func (c client) UpdateStatefulSet(sts appsv1.StatefulSet) (appsv1.StatefulSet, error) { +func (c client) UpdateStatefulSet(ctx context.Context, sts appsv1.StatefulSet) (appsv1.StatefulSet, error) { stsToUpdate := &sts - err := c.Update(context.TODO(), stsToUpdate) + err := c.Update(ctx, stsToUpdate) return *stsToUpdate, err } // CreateStatefulSet provides a thin wrapper and client.Client to create appsv1.StatefulSet types -func (c client) CreateStatefulSet(sts appsv1.StatefulSet) error { - return c.Create(context.TODO(), &sts) +func (c client) CreateStatefulSet(ctx context.Context, sts appsv1.StatefulSet) error { + return c.Create(ctx, &sts) } // DeleteStatefulSet provides a thin wrapper and client.Client to delete appsv1.StatefulSet types -func (c client) DeleteStatefulSet(objectKey k8sClient.ObjectKey) error { +func (c client) DeleteStatefulSet(ctx context.Context, objectKey k8sClient.ObjectKey) error { sts := appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: objectKey.Name, Namespace: objectKey.Namespace, }, } - return c.Delete(context.TODO(), &sts) + return c.Delete(ctx, &sts) } diff --git a/pkg/kube/client/client_test.go b/pkg/kube/client/client_test.go index 0a8b5c726..083df075b 100644 --- a/pkg/kube/client/client_test.go +++ b/pkg/kube/client/client_test.go @@ -2,8 +2,6 @@ package client import ( "context" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/service" "testing" "k8s.io/apimachinery/pkg/types" @@ -15,20 +13,21 @@ import ( ) func TestChangingName_CreatesNewObject(t *testing.T) { + ctx := context.Background() cm := configmap.Builder(). SetName("some-name"). SetNamespace("some-namespace"). Build() client := NewClient(NewMockedClient()) - err := configmap.CreateOrUpdate(client, cm) + err := configmap.CreateOrUpdate(ctx, client, cm) assert.NoError(t, err) newCm := corev1.ConfigMap{} objectKey := k8sClient.ObjectKeyFromObject(&cm) assert.NoError(t, err) - err = client.Get(context.TODO(), objectKey, &newCm) + err = client.Get(ctx, objectKey, &newCm) assert.NoError(t, err) assert.Equal(t, newCm.Name, "some-name") @@ -37,79 +36,51 @@ func TestChangingName_CreatesNewObject(t *testing.T) { newCm.Name = "new-name" objectKey = k8sClient.ObjectKeyFromObject(&newCm) - _ = configmap.CreateOrUpdate(client, newCm) + _ = configmap.CreateOrUpdate(ctx, client, newCm) - _ = client.Get(context.TODO(), objectKey, &newCm) + _ = client.Get(ctx, objectKey, &newCm) assert.Equal(t, newCm.Name, "new-name") assert.Equal(t, newCm.Namespace, "some-namespace") } func TestAddingDataField_ModifiesExistingObject(t *testing.T) { + ctx := context.Background() cm := configmap.Builder(). SetName("some-name"). SetNamespace("some-namespace"). Build() client := NewClient(NewMockedClient()) - err := configmap.CreateOrUpdate(client, cm) + err := configmap.CreateOrUpdate(ctx, client, cm) assert.NoError(t, err) cm.Data["new-field"] = "value" - _ = configmap.CreateOrUpdate(client, cm) + _ = configmap.CreateOrUpdate(ctx, client, cm) newCm := corev1.ConfigMap{} objectKey := k8sClient.ObjectKeyFromObject(&newCm) assert.NoError(t, err) - _ = client.Get(context.TODO(), objectKey, &newCm) + _ = client.Get(ctx, objectKey, &newCm) assert.Contains(t, cm.Data, "new-field") assert.Equal(t, cm.Data["new-field"], "value") } func TestDeleteConfigMap(t *testing.T) { + ctx := context.Background() cm := configmap.Builder(). SetName("config-map"). SetNamespace("default"). Build() client := NewClient(NewMockedClient()) - err := client.CreateConfigMap(cm) + err := client.CreateConfigMap(ctx, cm) assert.NoError(t, err) - err = client.DeleteConfigMap(types.NamespacedName{Name: "config-map", Namespace: "default"}) + err = client.DeleteConfigMap(ctx, types.NamespacedName{Name: "config-map", Namespace: "default"}) assert.NoError(t, err) - _, err = client.GetConfigMap(types.NamespacedName{Name: "config-map", Namespace: "default"}) + _, err = client.GetConfigMap(ctx, types.NamespacedName{Name: "config-map", Namespace: "default"}) assert.Equal(t, err, notFoundError()) } - -// TestSetAnnotationsDoesNotChangeSuppliedObject verifies that the supplied object for annotations.SetAnnotations is not overridden due being a shallow copy. -// the function lies here, otherwise it will lead to import cycles. -func TestSetAnnotationsDoesNotChangeSuppliedObject(t *testing.T) { - c := NewClient(NewMockedClient()) - backedService := service.Builder(). - SetName("some-name"). - SetNamespace("some-namespace"). - SetAnnotations(map[string]string{"one": "annotation"}). - SetClusterIP("123"). - Build() - err := service.CreateOrUpdateService(c, backedService) - assert.NoError(t, err) - - serviceWithoutAnnotation := service.Builder(). - SetName("some-name"). - SetNamespace("some-namespace"). - Build() - - // make sure this method only changes the annotations locally and in kube - err = annotations.SetAnnotations(&serviceWithoutAnnotation, map[string]string{"new": "something"}, c) - assert.NoError(t, err) - assert.Len(t, serviceWithoutAnnotation.Annotations, 2) - assert.Equal(t, "", serviceWithoutAnnotation.Spec.ClusterIP) - - err = c.Get(context.TODO(), types.NamespacedName{Name: serviceWithoutAnnotation.GetName(), Namespace: serviceWithoutAnnotation.GetNamespace()}, &serviceWithoutAnnotation) - assert.NoError(t, err) - assert.Len(t, serviceWithoutAnnotation.Annotations, 2) - assert.Equal(t, "123", serviceWithoutAnnotation.Spec.ClusterIP) -} diff --git a/pkg/kube/client/mocked_client.go b/pkg/kube/client/mocked_client.go index dd1747690..f4a8e499e 100644 --- a/pkg/kube/client/mocked_client.go +++ b/pkg/kube/client/mocked_client.go @@ -4,63 +4,43 @@ import ( "context" "encoding/json" "fmt" - "reflect" - "strings" - appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/errors" - meta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "reflect" k8sClient "sigs.k8s.io/controller-runtime/pkg/client" + "strings" ) -// mockedClient dynamically creates maps to store instances of k8sClient.Object -type mockedClient struct { - backingMap map[reflect.Type]map[k8sClient.ObjectKey]k8sClient.Object -} - -// notFoundError returns an error which returns true for "errors.IsNotFound" -func notFoundError() error { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}} -} +var ( + _ k8sClient.Client = mockedClient{} + _ k8sClient.StatusWriter = mockedStatusWriter{} +) -func alreadyExistsError() error { - return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists}} +type patchValue struct { + Op string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value"` } -func NewMockedClient() k8sClient.Client { - return &mockedClient{backingMap: map[reflect.Type]map[k8sClient.ObjectKey]k8sClient.Object{}} +// mockedClient dynamically creates maps to store instances of k8sClient.Object +type mockedClient struct { + backingMap map[reflect.Type]map[k8sClient.ObjectKey]k8sClient.Object } -func (m *mockedClient) ensureMapFor(obj k8sClient.Object) map[k8sClient.ObjectKey]k8sClient.Object { - t := reflect.TypeOf(obj) - if _, ok := m.backingMap[t]; !ok { - m.backingMap[t] = map[k8sClient.ObjectKey]k8sClient.Object{} - } - return m.backingMap[t] +func (m mockedClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + panic("not implemented") } -func (m *mockedClient) Get(_ context.Context, key k8sClient.ObjectKey, obj k8sClient.Object) error { - relevantMap := m.ensureMapFor(obj) - if val, ok := relevantMap[key]; ok { - if currSts, ok := val.(*appsv1.StatefulSet); ok { - // TODO: this currently doesn't work with additional mongodb config - // just doing it for StatefulSets for now - objCopy := currSts.DeepCopyObject() - v := reflect.ValueOf(obj).Elem() - v.Set(reflect.ValueOf(objCopy).Elem()) - } else { - v := reflect.ValueOf(obj).Elem() - v.Set(reflect.ValueOf(val).Elem()) - } - return nil - } - return notFoundError() +func (m mockedClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + panic("not implemented") } -func (m *mockedClient) Create(_ context.Context, obj k8sClient.Object, _ ...k8sClient.CreateOption) error { +func (m mockedClient) Create(_ context.Context, obj k8sClient.Object, _ ...k8sClient.CreateOption) error { relevantMap := m.ensureMapFor(obj) objKey := k8sClient.ObjectKeyFromObject(obj) if _, ok := relevantMap[objKey]; ok { @@ -76,43 +56,23 @@ func (m *mockedClient) Create(_ context.Context, obj k8sClient.Object, _ ...k8sC return nil } -// makeStatefulSetReady configures the statefulset to be in the running state. -func makeStatefulSetReady(set *appsv1.StatefulSet) { - set.Status.UpdatedReplicas = *set.Spec.Replicas - set.Status.ReadyReplicas = *set.Spec.Replicas -} - -func (m *mockedClient) List(_ context.Context, _ k8sClient.ObjectList, _ ...k8sClient.ListOption) error { - return nil -} - -func (m *mockedClient) Delete(_ context.Context, obj k8sClient.Object, _ ...k8sClient.DeleteOption) error { - relevantMap := m.ensureMapFor(obj) - objKey := k8sClient.ObjectKeyFromObject(obj) - delete(relevantMap, objKey) - return nil -} - -func (m *mockedClient) Update(_ context.Context, obj k8sClient.Object, _ ...k8sClient.UpdateOption) error { +func (m mockedClient) Update(_ context.Context, obj k8sClient.Object, _ ...k8sClient.UpdateOption) error { relevantMap := m.ensureMapFor(obj) objKey := k8sClient.ObjectKeyFromObject(obj) + if _, ok := relevantMap[objKey]; !ok { + return errors.NewNotFound(schema.GroupResource{}, obj.GetName()) + } relevantMap[objKey] = obj return nil } -type patchValue struct { - Op string `json:"op"` - Path string `json:"path"` - Value interface{} `json:"value"` -} - -func (m *mockedClient) Patch(_ context.Context, obj k8sClient.Object, patch k8sClient.Patch, _ ...k8sClient.PatchOption) error { +func (m mockedClient) Patch(_ context.Context, obj k8sClient.Object, patch k8sClient.Patch, _ ...k8sClient.PatchOption) error { if patch.Type() != types.JSONPatchType { return fmt.Errorf("patch types different from JSONPatchType are not yet implemented") } relevantMap := m.ensureMapFor(obj) objKey := k8sClient.ObjectKeyFromObject(obj) - patches := []patchValue{} + var patches []patchValue data, err := patch.Data(obj) if err != nil { return err @@ -147,18 +107,94 @@ func (m *mockedClient) Patch(_ context.Context, obj k8sClient.Object, patch k8sC return nil } -func (m *mockedClient) DeleteAllOf(_ context.Context, _ k8sClient.Object, _ ...k8sClient.DeleteAllOfOption) error { +type mockedStatusWriter struct { + parent mockedClient +} + +func (m mockedStatusWriter) Create(ctx context.Context, obj k8sClient.Object, _ k8sClient.Object, _ ...k8sClient.SubResourceCreateOption) error { + return m.parent.Create(ctx, obj) +} + +func (m mockedStatusWriter) Update(ctx context.Context, obj k8sClient.Object, _ ...k8sClient.SubResourceUpdateOption) error { + return m.parent.Update(ctx, obj) +} + +func (m mockedStatusWriter) Patch(ctx context.Context, obj k8sClient.Object, patch k8sClient.Patch, _ ...k8sClient.SubResourcePatchOption) error { + return m.parent.Patch(ctx, obj, patch) +} + +func (m mockedClient) SubResource(string) k8sClient.SubResourceClient { + panic("implement me") +} + +// notFoundError returns an error which returns true for "errors.IsNotFound" +func notFoundError() error { + return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}} +} + +func alreadyExistsError() error { + return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists}} +} + +func NewMockedClient() k8sClient.Client { + return &mockedClient{backingMap: map[reflect.Type]map[k8sClient.ObjectKey]k8sClient.Object{}} +} + +func (m mockedClient) ensureMapFor(obj k8sClient.Object) map[k8sClient.ObjectKey]k8sClient.Object { + t := reflect.TypeOf(obj) + if _, ok := m.backingMap[t]; !ok { + m.backingMap[t] = map[k8sClient.ObjectKey]k8sClient.Object{} + } + return m.backingMap[t] +} + +func (m mockedClient) Get(_ context.Context, key k8sClient.ObjectKey, obj k8sClient.Object, _ ...k8sClient.GetOption) error { + relevantMap := m.ensureMapFor(obj) + if val, ok := relevantMap[key]; ok { + if currSts, ok := val.(*appsv1.StatefulSet); ok { + // TODO: this currently doesn't work with additional mongodb config + // just doing it for StatefulSets for now + objCopy := currSts.DeepCopyObject() + v := reflect.ValueOf(obj).Elem() + v.Set(reflect.ValueOf(objCopy).Elem()) + } else { + v := reflect.ValueOf(obj).Elem() + v.Set(reflect.ValueOf(val).Elem()) + } + return nil + } + return notFoundError() +} + +// makeStatefulSetReady configures the stateful to be in the running state. +func makeStatefulSetReady(set *appsv1.StatefulSet) { + set.Status.UpdatedReplicas = *set.Spec.Replicas + set.Status.ReadyReplicas = *set.Spec.Replicas +} + +func (m mockedClient) List(_ context.Context, _ k8sClient.ObjectList, _ ...k8sClient.ListOption) error { + return nil +} + +func (m mockedClient) Delete(_ context.Context, obj k8sClient.Object, _ ...k8sClient.DeleteOption) error { + relevantMap := m.ensureMapFor(obj) + objKey := k8sClient.ObjectKeyFromObject(obj) + delete(relevantMap, objKey) + return nil +} + +func (m mockedClient) DeleteAllOf(_ context.Context, _ k8sClient.Object, _ ...k8sClient.DeleteAllOfOption) error { return nil } -func (m *mockedClient) Status() k8sClient.StatusWriter { - return m +func (m mockedClient) Status() k8sClient.StatusWriter { + return mockedStatusWriter{parent: m} } -func (m *mockedClient) RESTMapper() meta.RESTMapper { +func (m mockedClient) RESTMapper() meta.RESTMapper { return nil } -func (m *mockedClient) Scheme() *runtime.Scheme { +func (m mockedClient) Scheme() *runtime.Scheme { return nil } diff --git a/pkg/kube/client/mocked_client_test.go b/pkg/kube/client/mocked_client_test.go index a27d00518..870b85380 100644 --- a/pkg/kube/client/mocked_client_test.go +++ b/pkg/kube/client/mocked_client_test.go @@ -12,6 +12,7 @@ import ( ) func TestMockedClient(t *testing.T) { + ctx := context.Background() mockedClient := NewMockedClient() cm := configmap.Builder(). @@ -21,11 +22,11 @@ func TestMockedClient(t *testing.T) { SetData(map[string]string{"key-2": "field-2"}). Build() - err := mockedClient.Create(context.TODO(), &cm) + err := mockedClient.Create(ctx, &cm) assert.NoError(t, err) newCm := corev1.ConfigMap{} - err = mockedClient.Get(context.TODO(), types.NamespacedName{Name: "cm-name", Namespace: "cm-namespace"}, &newCm) + err = mockedClient.Get(ctx, types.NamespacedName{Name: "cm-name", Namespace: "cm-namespace"}, &newCm) assert.NoError(t, err) assert.Equal(t, "cm-namespace", newCm.Namespace) assert.Equal(t, "cm-name", newCm.Name) @@ -37,11 +38,11 @@ func TestMockedClient(t *testing.T) { SetServiceType("service-type"). Build() - err = mockedClient.Create(context.TODO(), &svc) + err = mockedClient.Create(ctx, &svc) assert.NoError(t, err) newSvc := corev1.Service{} - err = mockedClient.Get(context.TODO(), types.NamespacedName{Name: "svc-name", Namespace: "svc-namespace"}, &newSvc) + err = mockedClient.Get(ctx, types.NamespacedName{Name: "svc-name", Namespace: "svc-namespace"}, &newSvc) assert.NoError(t, err) assert.Equal(t, "svc-namespace", newSvc.Namespace) assert.Equal(t, "svc-name", newSvc.Name) diff --git a/pkg/kube/client/mocked_manager.go b/pkg/kube/client/mocked_manager.go index 66facfa40..2f9a3d30c 100644 --- a/pkg/kube/client/mocked_manager.go +++ b/pkg/kube/client/mocked_manager.go @@ -3,6 +3,7 @@ package client import ( "context" "net/http" + "sigs.k8s.io/controller-runtime/pkg/config" "time" "github.com/go-logr/logr" @@ -12,7 +13,6 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/cache" k8sClient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -24,10 +24,10 @@ type MockedManager struct { Client Client } -func NewManager(obj k8sClient.Object) *MockedManager { +func NewManager(ctx context.Context, obj k8sClient.Object) *MockedManager { c := NewMockedClient() if obj != nil { - _ = c.Create(context.TODO(), obj) + _ = c.Create(ctx, obj) } return &MockedManager{Client: NewClient(c)} } @@ -36,6 +36,10 @@ func NewManagerWithClient(c k8sClient.Client) *MockedManager { return &MockedManager{Client: NewClient(c)} } +func (m *MockedManager) GetHTTPClient() *http.Client { + panic("implement me") +} + func (m *MockedManager) Add(_ manager.Runnable) error { return nil } @@ -69,8 +73,7 @@ func (m *MockedManager) GetScheme() *runtime.Scheme { // GetAdmissionDecoder returns the runtime.Decoder based on the scheme. func (m *MockedManager) GetAdmissionDecoder() admission.Decoder { // just returning nothing - d, _ := admission.NewDecoder(runtime.NewScheme()) - return *d + return admission.NewDecoder(runtime.NewScheme()) } // GetAPIReader returns the client reader @@ -107,11 +110,11 @@ func (m *MockedManager) GetRESTMapper() meta.RESTMapper { return nil } -func (m *MockedManager) GetWebhookServer() *webhook.Server { +func (m *MockedManager) GetWebhookServer() webhook.Server { return nil } -func (m *MockedManager) AddMetricsExtraHandler(path string, handler http.Handler) error { +func (m *MockedManager) AddMetricsServerExtraHandler(path string, handler http.Handler) error { return nil } @@ -129,9 +132,9 @@ func (m *MockedManager) GetLogger() logr.Logger { return logr.Logger{} } -func (m *MockedManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec { +func (m *MockedManager) GetControllerOptions() config.Controller { var duration = time.Duration(0) - return v1alpha1.ControllerConfigurationSpec{ - CacheSyncTimeout: &duration, + return config.Controller{ + CacheSyncTimeout: duration, } } diff --git a/pkg/kube/configmap/configmap.go b/pkg/kube/configmap/configmap.go index 5a6abbd03..36f38d469 100644 --- a/pkg/kube/configmap/configmap.go +++ b/pkg/kube/configmap/configmap.go @@ -1,6 +1,7 @@ package configmap import ( + "context" "fmt" "strings" @@ -11,19 +12,19 @@ import ( ) type Getter interface { - GetConfigMap(objectKey client.ObjectKey) (corev1.ConfigMap, error) + GetConfigMap(ctx context.Context, objectKey client.ObjectKey) (corev1.ConfigMap, error) } type Updater interface { - UpdateConfigMap(cm corev1.ConfigMap) error + UpdateConfigMap(ctx context.Context, cm corev1.ConfigMap) error } type Creator interface { - CreateConfigMap(cm corev1.ConfigMap) error + CreateConfigMap(ctx context.Context, cm corev1.ConfigMap) error } type Deleter interface { - DeleteConfigMap(key client.ObjectKey) error + DeleteConfigMap(ctx context.Context, key client.ObjectKey) error } type GetUpdater interface { @@ -51,8 +52,8 @@ const ( // ReadKey accepts a ConfigMap Getter, the object of the ConfigMap to get, and the key within // the config map to read. It returns the string value, and an error if one occurred. -func ReadKey(getter Getter, key string, objectKey client.ObjectKey) (string, error) { - data, err := ReadData(getter, objectKey) +func ReadKey(ctx context.Context, getter Getter, key string, objectKey client.ObjectKey) (string, error) { + data, err := ReadData(ctx, getter, objectKey) if err != nil { return "", err } @@ -63,8 +64,8 @@ func ReadKey(getter Getter, key string, objectKey client.ObjectKey) (string, err } // ReadData extracts the contents of the Data field in a given config map -func ReadData(getter Getter, key client.ObjectKey) (map[string]string, error) { - cm, err := getter.GetConfigMap(key) +func ReadData(ctx context.Context, getter Getter, key client.ObjectKey) (map[string]string, error) { + cm, err := getter.GetConfigMap(ctx, key) if err != nil { return nil, err } @@ -72,26 +73,26 @@ func ReadData(getter Getter, key client.ObjectKey) (map[string]string, error) { } // UpdateField updates the sets "key" to the given "value" -func UpdateField(getUpdater GetUpdater, objectKey client.ObjectKey, key, value string) error { - cm, err := getUpdater.GetConfigMap(objectKey) +func UpdateField(ctx context.Context, getUpdater GetUpdater, objectKey client.ObjectKey, key, value string) error { + cm, err := getUpdater.GetConfigMap(ctx, objectKey) if err != nil { return err } cm.Data[key] = value - return getUpdater.UpdateConfigMap(cm) + return getUpdater.UpdateConfigMap(ctx, cm) } // CreateOrUpdate creates the given ConfigMap if it doesn't exist, // or updates it if it does. -func CreateOrUpdate(getUpdateCreator GetUpdateCreator, cm corev1.ConfigMap) error { - _, err := getUpdateCreator.GetConfigMap(types.NamespacedName{Name: cm.Name, Namespace: cm.Namespace}) - if err != nil { +func CreateOrUpdate(ctx context.Context, getUpdateCreator GetUpdateCreator, cm corev1.ConfigMap) error { + if err := getUpdateCreator.UpdateConfigMap(ctx, cm); err != nil { if apiErrors.IsNotFound(err) { - return getUpdateCreator.CreateConfigMap(cm) + return getUpdateCreator.CreateConfigMap(ctx, cm) + } else { + return err } - return err } - return getUpdateCreator.UpdateConfigMap(cm) + return nil } // filelikePropertiesToMap converts a file-like field in a ConfigMap to a map[string]string. @@ -109,8 +110,8 @@ func filelikePropertiesToMap(s string) (map[string]string, error) { } // ReadFileLikeField reads a ConfigMap with file-like properties and returns the value inside one of the fields. -func ReadFileLikeField(getter Getter, objectKey client.ObjectKey, externalKey string, internalKey string) (string, error) { - cmData, err := ReadData(getter, objectKey) +func ReadFileLikeField(ctx context.Context, getter Getter, objectKey client.ObjectKey, externalKey string, internalKey string) (string, error) { + cmData, err := ReadData(ctx, getter, objectKey) if err != nil { return "", err } @@ -130,8 +131,8 @@ func ReadFileLikeField(getter Getter, objectKey client.ObjectKey, externalKey st } // Exists return whether a configmap with the given namespaced name exists -func Exists(cmGetter Getter, nsName types.NamespacedName) (bool, error) { - _, err := cmGetter.GetConfigMap(nsName) +func Exists(ctx context.Context, cmGetter Getter, nsName types.NamespacedName) (bool, error) { + _, err := cmGetter.GetConfigMap(ctx, nsName) if err != nil { if apiErrors.IsNotFound(err) { diff --git a/pkg/kube/configmap/configmap_test.go b/pkg/kube/configmap/configmap_test.go index b685180ad..1d731573a 100644 --- a/pkg/kube/configmap/configmap_test.go +++ b/pkg/kube/configmap/configmap_test.go @@ -1,6 +1,7 @@ package configmap import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -15,7 +16,7 @@ type configMapGetter struct { cm corev1.ConfigMap } -func (c configMapGetter) GetConfigMap(objectKey client.ObjectKey) (corev1.ConfigMap, error) { +func (c configMapGetter) GetConfigMap(ctx context.Context, objectKey client.ObjectKey) (corev1.ConfigMap, error) { if c.cm.Name == objectKey.Name && c.cm.Namespace == objectKey.Namespace { return c.cm, nil } @@ -29,6 +30,7 @@ func newGetter(cm corev1.ConfigMap) Getter { } func TestReadKey(t *testing.T) { + ctx := context.Background() getter := newGetter( Builder(). SetName("name"). @@ -38,19 +40,20 @@ func TestReadKey(t *testing.T) { Build(), ) - value, err := ReadKey(getter, "key1", nsName("namespace", "name")) + value, err := ReadKey(ctx, getter, "key1", nsName("namespace", "name")) assert.Equal(t, "value1", value) assert.NoError(t, err) - value, err = ReadKey(getter, "key2", nsName("namespace", "name")) + value, err = ReadKey(ctx, getter, "key2", nsName("namespace", "name")) assert.Equal(t, "value2", value) assert.NoError(t, err) - _, err = ReadKey(getter, "key3", nsName("namespace", "name")) + _, err = ReadKey(ctx, getter, "key3", nsName("namespace", "name")) assert.Error(t, err) } func TestReadData(t *testing.T) { + ctx := context.Background() getter := newGetter( Builder(). SetName("name"). @@ -60,7 +63,7 @@ func TestReadData(t *testing.T) { Build(), ) - data, err := ReadData(getter, nsName("namespace", "name")) + data, err := ReadData(ctx, getter, nsName("namespace", "name")) assert.NoError(t, err) assert.Contains(t, data, "key1") @@ -71,6 +74,7 @@ func TestReadData(t *testing.T) { } func TestReadFileLikeField(t *testing.T) { + ctx := context.Background() getter := newGetter( Builder(). SetName("name"). @@ -79,13 +83,14 @@ func TestReadFileLikeField(t *testing.T) { Build(), ) - data, err := ReadFileLikeField(getter, nsName("namespace", "name"), "key1", "value1") + data, err := ReadFileLikeField(ctx, getter, nsName("namespace", "name"), "key1", "value1") assert.NoError(t, err) assert.Equal(t, "1", data) } func TestReadFileLikeField_InvalidExternalKey(t *testing.T) { + ctx := context.Background() getter := newGetter( Builder(). SetName("name"). @@ -94,12 +99,13 @@ func TestReadFileLikeField_InvalidExternalKey(t *testing.T) { Build(), ) - _, err := ReadFileLikeField(getter, nsName("namespace", "name"), "key2", "value1") + _, err := ReadFileLikeField(ctx, getter, nsName("namespace", "name"), "key2", "value1") assert.Error(t, err) assert.Equal(t, "key key2 is not present in ConfigMap namespace/name", err.Error()) } func TestReadFileLikeField_InvalidInternalKey(t *testing.T) { + ctx := context.Background() getter := newGetter( Builder(). SetName("name"). @@ -108,7 +114,7 @@ func TestReadFileLikeField_InvalidInternalKey(t *testing.T) { Build(), ) - _, err := ReadFileLikeField(getter, nsName("namespace", "name"), "key1", "value3") + _, err := ReadFileLikeField(ctx, getter, nsName("namespace", "name"), "key1", "value3") assert.Error(t, err) assert.Equal(t, "key value3 is not present in the key1 field of ConfigMap namespace/name", err.Error()) } @@ -117,14 +123,14 @@ type configMapGetUpdater struct { cm corev1.ConfigMap } -func (c configMapGetUpdater) GetConfigMap(objectKey client.ObjectKey) (corev1.ConfigMap, error) { +func (c configMapGetUpdater) GetConfigMap(ctx context.Context, objectKey client.ObjectKey) (corev1.ConfigMap, error) { if c.cm.Name == objectKey.Name && c.cm.Namespace == objectKey.Namespace { return c.cm, nil } return corev1.ConfigMap{}, notFoundError() } -func (c *configMapGetUpdater) UpdateConfigMap(cm corev1.ConfigMap) error { +func (c *configMapGetUpdater) UpdateConfigMap(ctx context.Context, cm corev1.ConfigMap) error { c.cm = cm return nil } @@ -136,6 +142,7 @@ func newGetUpdater(cm corev1.ConfigMap) GetUpdater { } func TestUpdateField(t *testing.T) { + ctx := context.Background() getUpdater := newGetUpdater( Builder(). SetName("name"). @@ -144,11 +151,11 @@ func TestUpdateField(t *testing.T) { SetDataField("field2", "value2"). Build(), ) - err := UpdateField(getUpdater, nsName("namespace", "name"), "field1", "newValue") + err := UpdateField(ctx, getUpdater, nsName("namespace", "name"), "field1", "newValue") assert.NoError(t, err) - val, _ := ReadKey(getUpdater, "field1", nsName("namespace", "name")) + val, _ := ReadKey(ctx, getUpdater, "field1", nsName("namespace", "name")) assert.Equal(t, "newValue", val) - val2, _ := ReadKey(getUpdater, "field2", nsName("namespace", "name")) + val2, _ := ReadKey(ctx, getUpdater, "field2", nsName("namespace", "name")) assert.Equal(t, "value2", val2) } diff --git a/pkg/kube/container/container_test.go b/pkg/kube/container/container_test.go index 5c08b14b9..a61a0be15 100644 --- a/pkg/kube/container/container_test.go +++ b/pkg/kube/container/container_test.go @@ -146,7 +146,7 @@ func TestMergeEnvs(t *testing.T) { }, } - merged := envvar.MergeWithOverride(existing, desired) + merged := envvar.MergeWithOverride(existing, desired) // nolint:forbidigo t.Run("EnvVars should be sorted", func(t *testing.T) { assert.Equal(t, "A_env", merged[0].Name) diff --git a/pkg/kube/container/containers.go b/pkg/kube/container/containers.go index debd4b115..687befc5b 100644 --- a/pkg/kube/container/containers.go +++ b/pkg/kube/container/containers.go @@ -129,7 +129,7 @@ func WithLifecycle(lifeCycleMod lifecycle.Modification) Modification { // WithEnvs ensures all of the provided envs exist in the container func WithEnvs(envs ...corev1.EnvVar) Modification { return func(container *corev1.Container) { - container.Env = envvar.MergeWithOverride(container.Env, envs) + container.Env = envvar.MergeWithOverride(container.Env, envs) // nolint:forbidigo } } diff --git a/pkg/kube/pod/pod.go b/pkg/kube/pod/pod.go index 434f62b6a..7b991a694 100644 --- a/pkg/kube/pod/pod.go +++ b/pkg/kube/pod/pod.go @@ -1,10 +1,11 @@ package pod import ( + "context" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) type Getter interface { - GetPod(objectKey client.ObjectKey) (corev1.Pod, error) + GetPod(ctx context.Context, objectKey client.ObjectKey) (corev1.Pod, error) } diff --git a/pkg/kube/podtemplatespec/podspec_template.go b/pkg/kube/podtemplatespec/podspec_template.go index e79b4cf05..f908a214a 100644 --- a/pkg/kube/podtemplatespec/podspec_template.go +++ b/pkg/kube/podtemplatespec/podspec_template.go @@ -297,7 +297,7 @@ func FindContainerByName(name string, podTemplateSpec *corev1.PodTemplateSpec) * } func WithDefaultSecurityContextsModifications() (Modification, container.Modification) { - managedSecurityContext := envvar.ReadBool(ManagedSecurityContextEnv) + managedSecurityContext := envvar.ReadBool(ManagedSecurityContextEnv) // nolint:forbidigo configureContainerSecurityContext := container.NOOP() configurePodSpecSecurityContext := NOOP() if !managedSecurityContext { diff --git a/pkg/kube/secret/secret.go b/pkg/kube/secret/secret.go index a8c4774df..93f9b64ea 100644 --- a/pkg/kube/secret/secret.go +++ b/pkg/kube/secret/secret.go @@ -1,6 +1,7 @@ package secret import ( + "context" "fmt" "reflect" "strings" @@ -15,19 +16,19 @@ import ( ) type Getter interface { - GetSecret(objectKey client.ObjectKey) (corev1.Secret, error) + GetSecret(ctx context.Context, objectKey client.ObjectKey) (corev1.Secret, error) } type Updater interface { - UpdateSecret(secret corev1.Secret) error + UpdateSecret(ctx context.Context, secret corev1.Secret) error } type Creator interface { - CreateSecret(secret corev1.Secret) error + CreateSecret(ctx context.Context, secret corev1.Secret) error } type Deleter interface { - DeleteSecret(objectKey client.ObjectKey) error + DeleteSecret(ctx context.Context, key client.ObjectKey) error } type GetUpdater interface { @@ -48,8 +49,8 @@ type GetUpdateCreateDeleter interface { Deleter } -func ReadKey(getter Getter, key string, objectKey client.ObjectKey) (string, error) { - data, err := ReadStringData(getter, objectKey) +func ReadKey(ctx context.Context, getter Getter, key string, objectKey client.ObjectKey) (string, error) { + data, err := ReadStringData(ctx, getter, objectKey) if err != nil { return "", err } @@ -60,8 +61,8 @@ func ReadKey(getter Getter, key string, objectKey client.ObjectKey) (string, err } // ReadByteData reads the Data field of the secret with the given objectKey -func ReadByteData(getter Getter, objectKey client.ObjectKey) (map[string][]byte, error) { - secret, err := getter.GetSecret(objectKey) +func ReadByteData(ctx context.Context, getter Getter, objectKey client.ObjectKey) (map[string][]byte, error) { + secret, err := getter.GetSecret(ctx, objectKey) if err != nil { return nil, err } @@ -69,8 +70,8 @@ func ReadByteData(getter Getter, objectKey client.ObjectKey) (map[string][]byte, } // ReadStringData reads the StringData field of the secret with the given objectKey -func ReadStringData(getter Getter, key client.ObjectKey) (map[string]string, error) { - secret, err := getter.GetSecret(key) +func ReadStringData(ctx context.Context, getter Getter, key client.ObjectKey) (map[string]string, error) { + secret, err := getter.GetSecret(ctx, key) if err != nil { return nil, err } @@ -87,25 +88,25 @@ func dataToStringData(data map[string][]byte) map[string]string { } // UpdateField updates a single field in the secret with the provided objectKey -func UpdateField(getUpdater GetUpdater, objectKey client.ObjectKey, key, value string) error { - secret, err := getUpdater.GetSecret(objectKey) +func UpdateField(ctx context.Context, getUpdater GetUpdater, objectKey client.ObjectKey, key, value string) error { + secret, err := getUpdater.GetSecret(ctx, objectKey) if err != nil { return err } secret.Data[key] = []byte(value) - return getUpdater.UpdateSecret(secret) + return getUpdater.UpdateSecret(ctx, secret) } // CreateOrUpdate creates the Secret if it doesn't exist, other wise it updates it -func CreateOrUpdate(getUpdateCreator GetUpdateCreator, secret corev1.Secret) error { - _, err := getUpdateCreator.GetSecret(types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}) - if err != nil { +func CreateOrUpdate(ctx context.Context, getUpdateCreator GetUpdateCreator, secret corev1.Secret) error { + if err := getUpdateCreator.UpdateSecret(ctx, secret); err != nil { if SecretNotExist(err) { - return getUpdateCreator.CreateSecret(secret) + return getUpdateCreator.CreateSecret(ctx, secret) + } else { + return err } - return err } - return getUpdateCreator.UpdateSecret(secret) + return nil } // HasAllKeys returns true if the provided secret contains an element for every @@ -121,8 +122,8 @@ func HasAllKeys(secret corev1.Secret, keys ...string) bool { // EnsureSecretWithKey makes sure the Secret with the given name has a key with the given value if the key is not already present. // if the key is present, it will return the existing value associated with this key. -func EnsureSecretWithKey(secretGetUpdateCreateDeleter GetUpdateCreateDeleter, nsName types.NamespacedName, ownerReferences []metav1.OwnerReference, key, value string) (string, error) { - existingSecret, err0 := secretGetUpdateCreateDeleter.GetSecret(nsName) +func EnsureSecretWithKey(ctx context.Context, secretGetUpdateCreateDeleter GetUpdateCreateDeleter, nsName types.NamespacedName, ownerReferences []metav1.OwnerReference, key, value string) (string, error) { + existingSecret, err0 := secretGetUpdateCreateDeleter.GetSecret(ctx, nsName) if err0 != nil { if SecretNotExist(err0) { s := Builder(). @@ -132,7 +133,7 @@ func EnsureSecretWithKey(secretGetUpdateCreateDeleter GetUpdateCreateDeleter, ns SetOwnerReferences(ownerReferences). Build() - if err1 := secretGetUpdateCreateDeleter.CreateSecret(s); err1 != nil { + if err1 := secretGetUpdateCreateDeleter.CreateSecret(ctx, s); err1 != nil { return "", err1 } return value, nil @@ -143,8 +144,8 @@ func EnsureSecretWithKey(secretGetUpdateCreateDeleter GetUpdateCreateDeleter, ns } // CopySecret copies secret object(data) from one cluster client to another, the from and to cluster-client can belong to the same or different clusters -func CopySecret(fromClient Getter, toClient GetUpdateCreator, sourceSecretNsName, destNsName types.NamespacedName) error { - s, err := fromClient.GetSecret(sourceSecretNsName) +func CopySecret(ctx context.Context, fromClient Getter, toClient GetUpdateCreator, sourceSecretNsName, destNsName types.NamespacedName) error { + s, err := fromClient.GetSecret(ctx, sourceSecretNsName) if err != nil { return err } @@ -156,12 +157,12 @@ func CopySecret(fromClient Getter, toClient GetUpdateCreator, sourceSecretNsName SetDataType(s.Type). Build() - return CreateOrUpdate(toClient, secretCopy) + return CreateOrUpdate(ctx, toClient, secretCopy) } // Exists return whether a secret with the given namespaced name exists -func Exists(secretGetter Getter, nsName types.NamespacedName) (bool, error) { - _, err := secretGetter.GetSecret(nsName) +func Exists(ctx context.Context, secretGetter Getter, nsName types.NamespacedName) (bool, error) { + _, err := secretGetter.GetSecret(ctx, nsName) if err != nil { if apiErrors.IsNotFound(err) { @@ -184,12 +185,12 @@ func HasOwnerReferences(secret corev1.Secret, ownerRefs []metav1.OwnerReference) } // CreateOrUpdateIfNeeded creates a secret if it doesn't exist, or updates it if needed. -func CreateOrUpdateIfNeeded(getUpdateCreator GetUpdateCreator, secret corev1.Secret) error { +func CreateOrUpdateIfNeeded(ctx context.Context, getUpdateCreator GetUpdateCreator, secret corev1.Secret) error { // Check if the secret exists - oldSecret, err := getUpdateCreator.GetSecret(types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}) + oldSecret, err := getUpdateCreator.GetSecret(ctx, types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}) if err != nil { if apiErrors.IsNotFound(err) { - return getUpdateCreator.CreateSecret(secret) + return getUpdateCreator.CreateSecret(ctx, secret) } return err } @@ -200,7 +201,7 @@ func CreateOrUpdateIfNeeded(getUpdateCreator GetUpdateCreator, secret corev1.Sec } // They are different so we need to update it - return getUpdateCreator.UpdateSecret(secret) + return getUpdateCreator.UpdateSecret(ctx, secret) } func SecretNotExist(err error) bool { diff --git a/pkg/kube/secret/secret_test.go b/pkg/kube/secret/secret_test.go index a157533cf..71810e32d 100644 --- a/pkg/kube/secret/secret_test.go +++ b/pkg/kube/secret/secret_test.go @@ -1,6 +1,7 @@ package secret import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -15,7 +16,7 @@ type secretGetter struct { secret corev1.Secret } -func (c secretGetter) GetSecret(objectKey client.ObjectKey) (corev1.Secret, error) { +func (c secretGetter) GetSecret(ctx context.Context, objectKey client.ObjectKey) (corev1.Secret, error) { if c.secret.Name == objectKey.Name && c.secret.Namespace == objectKey.Namespace { return c.secret, nil } @@ -29,6 +30,7 @@ func newGetter(s corev1.Secret) Getter { } func TestReadKey(t *testing.T) { + ctx := context.Background() getter := newGetter( Builder(). SetName("name"). @@ -38,15 +40,15 @@ func TestReadKey(t *testing.T) { Build(), ) - value, err := ReadKey(getter, "key1", nsName("namespace", "name")) + value, err := ReadKey(ctx, getter, "key1", nsName("namespace", "name")) assert.Equal(t, "value1", value) assert.NoError(t, err) - value, err = ReadKey(getter, "key2", nsName("namespace", "name")) + value, err = ReadKey(ctx, getter, "key2", nsName("namespace", "name")) assert.Equal(t, "value2", value) assert.NoError(t, err) - _, err = ReadKey(getter, "key3", nsName("namespace", "name")) + _, err = ReadKey(ctx, getter, "key3", nsName("namespace", "name")) assert.Error(t, err) } @@ -60,7 +62,8 @@ func TestReadData(t *testing.T) { Build(), ) t.Run("ReadStringData", func(t *testing.T) { - stringData, err := ReadStringData(getter, nsName("namespace", "name")) + ctx := context.Background() + stringData, err := ReadStringData(ctx, getter, nsName("namespace", "name")) assert.NoError(t, err) assert.Contains(t, stringData, "key1") @@ -71,7 +74,8 @@ func TestReadData(t *testing.T) { }) t.Run("ReadByteData", func(t *testing.T) { - data, err := ReadByteData(getter, nsName("namespace", "name")) + ctx := context.Background() + data, err := ReadByteData(ctx, getter, nsName("namespace", "name")) assert.NoError(t, err) assert.Contains(t, data, "key1") @@ -95,15 +99,15 @@ type secretGetUpdater struct { secret corev1.Secret } -func (c secretGetUpdater) GetSecret(objectKey client.ObjectKey) (corev1.Secret, error) { +func (c secretGetUpdater) GetSecret(ctx context.Context, objectKey client.ObjectKey) (corev1.Secret, error) { if c.secret.Name == objectKey.Name && c.secret.Namespace == objectKey.Namespace { return c.secret, nil } return corev1.Secret{}, notFoundError() } -func (c *secretGetUpdater) UpdateSecret(s corev1.Secret) error { - c.secret = s +func (c *secretGetUpdater) UpdateSecret(ctx context.Context, secret corev1.Secret) error { + c.secret = secret return nil } @@ -114,6 +118,7 @@ func newGetUpdater(s corev1.Secret) GetUpdater { } func TestUpdateField(t *testing.T) { + ctx := context.Background() getUpdater := newGetUpdater( Builder(). SetName("name"). @@ -122,11 +127,11 @@ func TestUpdateField(t *testing.T) { SetField("field2", "value2"). Build(), ) - err := UpdateField(getUpdater, nsName("namespace", "name"), "field1", "newValue") + err := UpdateField(ctx, getUpdater, nsName("namespace", "name"), "field1", "newValue") assert.NoError(t, err) - val, _ := ReadKey(getUpdater, "field1", nsName("namespace", "name")) + val, _ := ReadKey(ctx, getUpdater, "field1", nsName("namespace", "name")) assert.Equal(t, "newValue", val) - val2, _ := ReadKey(getUpdater, "field2", nsName("namespace", "name")) + val2, _ := ReadKey(ctx, getUpdater, "field2", nsName("namespace", "name")) assert.Equal(t, "value2", val2) } @@ -135,23 +140,23 @@ type mockSecretGetUpdateCreateDeleter struct { apiCalls int } -func (c *mockSecretGetUpdateCreateDeleter) DeleteSecret(objectKey client.ObjectKey) error { - delete(c.secrets, objectKey) +func (c *mockSecretGetUpdateCreateDeleter) DeleteSecret(ctx context.Context, key client.ObjectKey) error { + delete(c.secrets, key) c.apiCalls += 1 return nil } -func (c *mockSecretGetUpdateCreateDeleter) UpdateSecret(s corev1.Secret) error { - c.secrets[types.NamespacedName{Name: s.Name, Namespace: s.Namespace}] = s +func (c *mockSecretGetUpdateCreateDeleter) UpdateSecret(ctx context.Context, secret corev1.Secret) error { + c.secrets[types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}] = secret c.apiCalls += 1 return nil } -func (c *mockSecretGetUpdateCreateDeleter) CreateSecret(secret corev1.Secret) error { - return c.UpdateSecret(secret) +func (c *mockSecretGetUpdateCreateDeleter) CreateSecret(ctx context.Context, secret corev1.Secret) error { + return c.UpdateSecret(ctx, secret) } -func (c *mockSecretGetUpdateCreateDeleter) GetSecret(objectKey client.ObjectKey) (corev1.Secret, error) { +func (c *mockSecretGetUpdateCreateDeleter) GetSecret(ctx context.Context, objectKey client.ObjectKey) (corev1.Secret, error) { c.apiCalls += 1 if s, ok := c.secrets[objectKey]; !ok { return corev1.Secret{}, notFoundError() @@ -161,6 +166,7 @@ func (c *mockSecretGetUpdateCreateDeleter) GetSecret(objectKey client.ObjectKey) } func TestCreateOrUpdateIfNeededCreate(t *testing.T) { + ctx := context.Background() mock := &mockSecretGetUpdateCreateDeleter{ secrets: map[client.ObjectKey]corev1.Secret{}, apiCalls: 0, @@ -169,12 +175,13 @@ func TestCreateOrUpdateIfNeededCreate(t *testing.T) { secret := getDefaultSecret() // first time it does not exist, we create it - err := CreateOrUpdateIfNeeded(mock, secret) + err := CreateOrUpdateIfNeeded(ctx, mock, secret) assert.NoError(t, err) assert.Equal(t, 2, mock.apiCalls) // 2 calls -> get + creation } func TestCreateOrUpdateIfNeededUpdate(t *testing.T) { + ctx := context.Background() mock := &mockSecretGetUpdateCreateDeleter{ secrets: map[client.ObjectKey]corev1.Secret{}, apiCalls: 0, @@ -182,7 +189,7 @@ func TestCreateOrUpdateIfNeededUpdate(t *testing.T) { secret := getDefaultSecret() { - err := mock.CreateSecret(secret) + err := mock.CreateSecret(ctx, secret) assert.NoError(t, err) mock.apiCalls = 0 } @@ -190,13 +197,14 @@ func TestCreateOrUpdateIfNeededUpdate(t *testing.T) { { secret.Data = map[string][]byte{"test": {1, 2, 3}} // secret differs -> we update - err := CreateOrUpdateIfNeeded(mock, secret) + err := CreateOrUpdateIfNeeded(ctx, mock, secret) assert.NoError(t, err) assert.Equal(t, 2, mock.apiCalls) // 2 calls -> get + update } } func TestCreateOrUpdateIfNeededEqual(t *testing.T) { + ctx := context.Background() mock := &mockSecretGetUpdateCreateDeleter{ secrets: map[client.ObjectKey]corev1.Secret{}, apiCalls: 0, @@ -204,14 +212,14 @@ func TestCreateOrUpdateIfNeededEqual(t *testing.T) { secret := getDefaultSecret() { - err := mock.CreateSecret(secret) + err := mock.CreateSecret(ctx, secret) assert.NoError(t, err) mock.apiCalls = 0 } { // the secret already exists, so we only call get - err := CreateOrUpdateIfNeeded(mock, secret) + err := CreateOrUpdateIfNeeded(ctx, mock, secret) assert.NoError(t, err) assert.Equal(t, 1, mock.apiCalls) // 1 call -> get } diff --git a/pkg/kube/service/service.go b/pkg/kube/service/service.go index 887a65d92..abb749acf 100644 --- a/pkg/kube/service/service.go +++ b/pkg/kube/service/service.go @@ -1,28 +1,25 @@ package service import ( - "fmt" - + "context" corev1 "k8s.io/api/core/v1" - apiErrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) type Getter interface { - GetService(objectKey client.ObjectKey) (corev1.Service, error) + GetService(ctx context.Context, objectKey client.ObjectKey) (corev1.Service, error) } type Updater interface { - UpdateService(service corev1.Service) error + UpdateService(ctx context.Context, service corev1.Service) error } type Creator interface { - CreateService(service corev1.Service) error + CreateService(ctx context.Context, service corev1.Service) error } type Deleter interface { - DeleteService(objectKey client.ObjectKey) error + DeleteService(ctx context.Context, objectKey client.ObjectKey) error } type GetDeleter interface { @@ -47,80 +44,3 @@ type GetUpdateCreateDeleter interface { Creator Deleter } - -func DeleteServiceIfItExists(getterDeleter GetDeleter, serviceName types.NamespacedName) error { - _, err := getterDeleter.GetService(serviceName) - if err != nil { - // If it is not found return - if apiErrors.IsNotFound(err) { - return nil - } - // Otherwise we got an error when trying to get it - return fmt.Errorf("can't get service %s: %s", serviceName, err) - } - return getterDeleter.DeleteService(serviceName) -} - -// Merge merges `source` into `dest`. Both arguments will remain unchanged -// a new service will be created and returned. -// The "merging" process is arbitrary and it only handle specific attributes -func Merge(dest corev1.Service, source corev1.Service) corev1.Service { - for k, v := range source.ObjectMeta.Annotations { - dest.ObjectMeta.Annotations[k] = v - } - - for k, v := range source.ObjectMeta.Labels { - dest.ObjectMeta.Labels[k] = v - } - - for k, v := range source.Spec.Selector { - dest.Spec.Selector[k] = v - } - - cachedNodePorts := map[int32]int32{} - for _, port := range dest.Spec.Ports { - cachedNodePorts[port.Port] = port.NodePort - } - - if len(source.Spec.Ports) > 0 { - portCopy := make([]corev1.ServicePort, len(source.Spec.Ports)) - copy(portCopy, source.Spec.Ports) - dest.Spec.Ports = portCopy - - for i := range dest.Spec.Ports { - // Source might not specify NodePort and we shouldn't override existing NodePort value - if dest.Spec.Ports[i].NodePort == 0 { - dest.Spec.Ports[i].NodePort = cachedNodePorts[dest.Spec.Ports[i].Port] - } - } - } - - dest.Spec.Type = source.Spec.Type - dest.Spec.LoadBalancerIP = source.Spec.LoadBalancerIP - dest.Spec.ExternalTrafficPolicy = source.Spec.ExternalTrafficPolicy - return dest -} - -// CreateOrUpdateService will create or update a service in Kubernetes. -func CreateOrUpdateService(getUpdateCreator GetUpdateCreator, desiredService corev1.Service) error { - namespacedName := types.NamespacedName{Namespace: desiredService.ObjectMeta.Namespace, Name: desiredService.ObjectMeta.Name} - existingService, err := getUpdateCreator.GetService(namespacedName) - - if err != nil { - if apiErrors.IsNotFound(err) { - err = getUpdateCreator.CreateService(desiredService) - if err != nil { - return err - } - } else { - return err - } - } else { - mergedService := Merge(existingService, desiredService) - err = getUpdateCreator.UpdateService(mergedService) - if err != nil { - return err - } - } - return nil -} diff --git a/pkg/kube/statefulset/statefulset.go b/pkg/kube/statefulset/statefulset.go index 28ddd1f4f..d6e7660cb 100644 --- a/pkg/kube/statefulset/statefulset.go +++ b/pkg/kube/statefulset/statefulset.go @@ -1,6 +1,7 @@ package statefulset import ( + "context" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/merge" apiErrors "k8s.io/apimachinery/pkg/api/errors" @@ -17,19 +18,19 @@ const ( ) type Getter interface { - GetStatefulSet(objectKey client.ObjectKey) (appsv1.StatefulSet, error) + GetStatefulSet(ctx context.Context, objectKey client.ObjectKey) (appsv1.StatefulSet, error) } type Updater interface { - UpdateStatefulSet(sts appsv1.StatefulSet) (appsv1.StatefulSet, error) + UpdateStatefulSet(ctx context.Context, sts appsv1.StatefulSet) (appsv1.StatefulSet, error) } type Creator interface { - CreateStatefulSet(sts appsv1.StatefulSet) error + CreateStatefulSet(ctx context.Context, sts appsv1.StatefulSet) error } type Deleter interface { - DeleteStatefulSet(objectKey client.ObjectKey) error + DeleteStatefulSet(ctx context.Context, objectKey client.ObjectKey) error } type GetUpdater interface { @@ -52,26 +53,27 @@ type GetUpdateCreateDeleter interface { // CreateOrUpdate creates the given StatefulSet if it doesn't exist, // or updates it if it does. -func CreateOrUpdate(getUpdateCreator GetUpdateCreator, sts appsv1.StatefulSet) (appsv1.StatefulSet, error) { - _, err := getUpdateCreator.GetStatefulSet(types.NamespacedName{Name: sts.Name, Namespace: sts.Namespace}) - if err != nil { +func CreateOrUpdate(ctx context.Context, getUpdateCreator GetUpdateCreator, statefulSet appsv1.StatefulSet) (appsv1.StatefulSet, error) { + if sts, err := getUpdateCreator.UpdateStatefulSet(ctx, statefulSet); err != nil { if apiErrors.IsNotFound(err) { - return appsv1.StatefulSet{}, getUpdateCreator.CreateStatefulSet(sts) + return statefulSet, getUpdateCreator.CreateStatefulSet(ctx, statefulSet) + } else { + return appsv1.StatefulSet{}, err } - return appsv1.StatefulSet{}, err + } else { + return sts, nil } - return getUpdateCreator.UpdateStatefulSet(sts) } // GetAndUpdate applies the provided function to the most recent version of the object -func GetAndUpdate(getUpdater GetUpdater, nsName types.NamespacedName, updateFunc func(*appsv1.StatefulSet)) (appsv1.StatefulSet, error) { - sts, err := getUpdater.GetStatefulSet(nsName) +func GetAndUpdate(ctx context.Context, getUpdater GetUpdater, nsName types.NamespacedName, updateFunc func(*appsv1.StatefulSet)) (appsv1.StatefulSet, error) { + sts, err := getUpdater.GetStatefulSet(ctx, nsName) if err != nil { return appsv1.StatefulSet{}, err } // apply the function on the most recent version of the resource updateFunc(&sts) - return getUpdater.UpdateStatefulSet(sts) + return getUpdater.UpdateStatefulSet(ctx, sts) } // VolumeMountData contains values required for the MountVolume function @@ -333,13 +335,13 @@ func VolumeMountWithNameExists(mounts []corev1.VolumeMount, volumeName string) b // ResetUpdateStrategy resets the statefulset update strategy to RollingUpdate. // If a version change is in progress, it doesn't do anything. -func ResetUpdateStrategy(mdb annotations.Versioned, kubeClient GetUpdater) error { +func ResetUpdateStrategy(ctx context.Context, mdb annotations.Versioned, kubeClient GetUpdater) error { if !mdb.IsChangingVersion() { return nil } // if we changed the version, we need to reset the UpdatePolicy back to OnUpdate - _, err := GetAndUpdate(kubeClient, mdb.NamespacedName(), func(sts *appsv1.StatefulSet) { + _, err := GetAndUpdate(ctx, kubeClient, mdb.NamespacedName(), func(sts *appsv1.StatefulSet) { sts.Spec.UpdateStrategy.Type = appsv1.RollingUpdateStatefulSetStrategyType }) return err diff --git a/pkg/readiness/config/config.go b/pkg/readiness/config/config.go index bb3dab712..7f3e64714 100644 --- a/pkg/readiness/config/config.go +++ b/pkg/readiness/config/config.go @@ -13,16 +13,19 @@ import ( ) const ( - defaultAgentHealthStatusFilePath = "/var/log/mongodb-mms-automation/agent-health-status.json" - defaultLogPath = "/var/log/mongodb-mms-automation/readiness.log" - podNamespaceEnv = "POD_NAMESPACE" - automationConfigSecretEnv = "AUTOMATION_CONFIG_MAP" //nolint - agentHealthStatusFilePathEnv = "AGENT_STATUS_FILEPATH" - logPathEnv = "LOG_FILE_PATH" - hostNameEnv = "HOSTNAME" - readinessProbeLoggerBackups = "READINESS_PROBE_LOGGER_BACKUPS" - readinessProbeLoggerMaxSize = "READINESS_PROBE_LOGGER_MAX_SIZE" - readinessProbeLoggerMaxAge = "READINESS_PROBE_LOGGER_MAX_AGE" + DefaultAgentHealthStatusFilePath = "/var/log/mongodb-mms-automation/agent-health-status.json" + AgentHealthStatusFilePathEnv = "AGENT_STATUS_FILEPATH" + WithAgentFileLogging = "MDB_WITH_AGENT_FILE_LOGGING" + + defaultLogPath = "/var/log/mongodb-mms-automation/readiness.log" + podNamespaceEnv = "POD_NAMESPACE" + automationConfigSecretEnv = "AUTOMATION_CONFIG_MAP" //nolint + logPathEnv = "LOG_FILE_PATH" + hostNameEnv = "HOSTNAME" + ReadinessProbeLoggerBackups = "READINESS_PROBE_LOGGER_BACKUPS" + ReadinessProbeLoggerMaxSize = "READINESS_PROBE_LOGGER_MAX_SIZE" + ReadinessProbeLoggerMaxAge = "READINESS_PROBE_LOGGER_MAX_AGE" + ReadinessProbeLoggerCompress = "READINESS_PROBE_LOGGER_COMPRESS" ) type Config struct { @@ -32,43 +35,30 @@ type Config struct { AutomationConfigSecretName string HealthStatusReader io.Reader LogFilePath string - Logger *lumberjack.Logger } -func BuildFromEnvVariables(clientSet kubernetes.Interface, isHeadless bool) (Config, error) { - healthStatusFilePath := getEnvOrDefault(agentHealthStatusFilePathEnv, defaultAgentHealthStatusFilePath) - logFilePath := getEnvOrDefault(logPathEnv, defaultLogPath) +func BuildFromEnvVariables(clientSet kubernetes.Interface, isHeadless bool, file *os.File) (Config, error) { + logFilePath := GetEnvOrDefault(logPathEnv, defaultLogPath) var namespace, automationConfigName, hostname string if isHeadless { var ok bool - namespace, ok = os.LookupEnv(podNamespaceEnv) + namespace, ok = os.LookupEnv(podNamespaceEnv) // nolint:forbidigo if !ok { return Config{}, fmt.Errorf("the '%s' environment variable must be set", podNamespaceEnv) } - automationConfigName, ok = os.LookupEnv(automationConfigSecretEnv) + automationConfigName, ok = os.LookupEnv(automationConfigSecretEnv) // nolint:forbidigo if !ok { return Config{}, fmt.Errorf("the '%s' environment variable must be set", automationConfigSecretEnv) } - hostname, ok = os.LookupEnv(hostNameEnv) + hostname, ok = os.LookupEnv(hostNameEnv) // nolint:forbidigo if !ok { return Config{}, fmt.Errorf("the '%s' environment variable must be set", hostNameEnv) } } - logger := &lumberjack.Logger{ - Filename: readinessProbeLogFilePath(), - MaxBackups: readIntOrDefault(readinessProbeLoggerBackups, 5), - MaxSize: readInt(readinessProbeLoggerMaxSize), - MaxAge: readInt(readinessProbeLoggerMaxAge), - } - // Note, that we shouldn't close the file here - it will be closed very soon by the 'ioutil.ReadAll' // in main.go - file, err := os.Open(healthStatusFilePath) - if err != nil { - return Config{}, err - } return Config{ ClientSet: clientSet, Namespace: namespace, @@ -76,16 +66,26 @@ func BuildFromEnvVariables(clientSet kubernetes.Interface, isHeadless bool) (Con Hostname: hostname, HealthStatusReader: file, LogFilePath: logFilePath, - Logger: logger, }, nil } +func GetLogger() *lumberjack.Logger { + logger := &lumberjack.Logger{ + Filename: readinessProbeLogFilePath(), + MaxBackups: readIntOrDefault(ReadinessProbeLoggerBackups, 5), + MaxSize: readIntOrDefault(ReadinessProbeLoggerMaxSize, 5), + MaxAge: readInt(ReadinessProbeLoggerMaxAge), + Compress: ReadBoolWitDefault(ReadinessProbeLoggerCompress, "false"), + } + return logger +} + func readinessProbeLogFilePath() string { - return getEnvOrDefault(logPathEnv, defaultLogPath) + return GetEnvOrDefault(logPathEnv, defaultLogPath) } -func getEnvOrDefault(envVar, defaultValue string) string { - value := strings.TrimSpace(os.Getenv(envVar)) +func GetEnvOrDefault(envVar, defaultValue string) string { + value := strings.TrimSpace(os.Getenv(envVar)) // nolint:forbidigo if value == "" { return defaultValue } @@ -101,10 +101,16 @@ func readInt(envVarName string) int { // readIntOrDefault returns the int value of an envvar of the given name. // defaults to the given value if not specified. func readIntOrDefault(envVarName string, defaultValue int) int { - envVar := getEnvOrDefault(envVarName, strconv.Itoa(defaultValue)) + envVar := GetEnvOrDefault(envVarName, strconv.Itoa(defaultValue)) intValue, err := strconv.Atoi(envVar) if err != nil { return defaultValue } return intValue } + +// ReadBoolWitDefault returns the boolean value of an envvar of the given name. +func ReadBoolWitDefault(envVarName string, defaultValue string) bool { + envVar := GetEnvOrDefault(envVarName, defaultValue) + return strings.TrimSpace(strings.ToLower(envVar)) == "true" +} diff --git a/pkg/readiness/headless/headless.go b/pkg/readiness/headless/headless.go index 9f4fc7d6d..18c28e23f 100644 --- a/pkg/readiness/headless/headless.go +++ b/pkg/readiness/headless/headless.go @@ -1,6 +1,7 @@ package headless import ( + "context" "fmt" "io" "os" @@ -23,11 +24,11 @@ const ( // /var/run/secrets/kubernetes.io/serviceaccount/namespace file (see // https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod) // though passing the namespace as an environment variable makes the code simpler for testing and saves an IO operation -func PerformCheckHeadlessMode(health health.Status, conf config.Config) (bool, error) { +func PerformCheckHeadlessMode(ctx context.Context, health health.Status, conf config.Config) (bool, error) { var targetVersion int64 var err error - targetVersion, err = secret.ReadAutomationConfigVersionFromSecret(conf.Namespace, conf.ClientSet, conf.AutomationConfigSecretName) + targetVersion, err = secret.ReadAutomationConfigVersionFromSecret(ctx, conf.Namespace, conf.ClientSet, conf.AutomationConfigSecretName) if err != nil { // this file is expected to be present in case of AppDB, there is no point trying to access it in // community, it masks the underlying error @@ -54,7 +55,7 @@ func PerformCheckHeadlessMode(health health.Status, conf config.Config) (bool, e currentAgentVersion := readCurrentAgentInfo(health, targetVersion) - if err = pod.PatchPodAnnotation(conf.Namespace, currentAgentVersion, conf.Hostname, conf.ClientSet); err != nil { + if err = pod.PatchPodAnnotation(ctx, conf.Namespace, currentAgentVersion, conf.Hostname, conf.ClientSet); err != nil { return false, err } diff --git a/pkg/readiness/headless/headless_test.go b/pkg/readiness/headless/headless_test.go index afff7c506..d6f2f293c 100644 --- a/pkg/readiness/headless/headless_test.go +++ b/pkg/readiness/headless/headless_test.go @@ -15,6 +15,7 @@ import ( ) func TestPerformCheckHeadlessMode(t *testing.T) { + ctx := context.Background() c := testConfig() c.ClientSet = fake.NewSimpleClientset(testdata.TestPod(c.Namespace, c.Hostname), testdata.TestSecret(c.Namespace, c.AutomationConfigSecretName, 11)) @@ -24,12 +25,12 @@ func TestPerformCheckHeadlessMode(t *testing.T) { }}, } - achieved, err := PerformCheckHeadlessMode(status, c) + achieved, err := PerformCheckHeadlessMode(ctx, status, c) require.NoError(t, err) assert.False(t, achieved) - thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(context.TODO(), c.Hostname, metav1.GetOptions{}) + thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(ctx, c.Hostname, metav1.GetOptions{}) assert.Equal(t, map[string]string{"agent.mongodb.com/version": "10"}, thePod.Annotations) } diff --git a/pkg/readiness/pod/podannotation.go b/pkg/readiness/pod/podannotation.go index 211991822..d36bda37f 100644 --- a/pkg/readiness/pod/podannotation.go +++ b/pkg/readiness/pod/podannotation.go @@ -13,8 +13,8 @@ import ( const mongodbAgentVersionAnnotation = "agent.mongodb.com/version" -func PatchPodAnnotation(podNamespace string, lastVersionAchieved int64, memberName string, clientSet kubernetes.Interface) error { - pod, err := clientSet.CoreV1().Pods(podNamespace).Get(context.Background(), memberName, metav1.GetOptions{}) +func PatchPodAnnotation(ctx context.Context, podNamespace string, lastVersionAchieved int64, memberName string, clientSet kubernetes.Interface) error { + pod, err := clientSet.CoreV1().Pods(podNamespace).Get(ctx, memberName, metav1.GetOptions{}) if err != nil { return err } @@ -36,7 +36,7 @@ func PatchPodAnnotation(podNamespace string, lastVersionAchieved int64, memberNa }) patcher := NewKubernetesPodPatcher(clientSet) - updatedPod, err := patcher.patchPod(podNamespace, memberName, payload) + updatedPod, err := patcher.patchPod(ctx, podNamespace, memberName, payload) if updatedPod != nil { zap.S().Debugf("Updated Pod annotation: %v (%s)", pod.Annotations, memberName) } diff --git a/pkg/readiness/pod/podannotation_test.go b/pkg/readiness/pod/podannotation_test.go index d9ab9b0fd..b75382421 100644 --- a/pkg/readiness/pod/podannotation_test.go +++ b/pkg/readiness/pod/podannotation_test.go @@ -17,6 +17,7 @@ import ( // TestPatchPodAnnotation verifies that patching of the pod works correctly func TestPatchPodAnnotation(t *testing.T) { + ctx := context.Background() clientset := fake.NewSimpleClientset(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "my-replica-set-0", @@ -27,20 +28,21 @@ func TestPatchPodAnnotation(t *testing.T) { }, }) - pod, _ := clientset.CoreV1().Pods("test-ns").Get(context.TODO(), "my-replica-set-0", metav1.GetOptions{}) + pod, _ := clientset.CoreV1().Pods("test-ns").Get(ctx, "my-replica-set-0", metav1.GetOptions{}) assert.Empty(t, pod.Annotations[mongodbAgentVersionAnnotation]) // adding the annotations - assert.NoError(t, PatchPodAnnotation("test-ns", 1, "my-replica-set-0", clientset)) - pod, _ = clientset.CoreV1().Pods("test-ns").Get(context.TODO(), "my-replica-set-0", metav1.GetOptions{}) + assert.NoError(t, PatchPodAnnotation(ctx, "test-ns", 1, "my-replica-set-0", clientset)) + pod, _ = clientset.CoreV1().Pods("test-ns").Get(ctx, "my-replica-set-0", metav1.GetOptions{}) assert.Equal(t, map[string]string{"agent.mongodb.com/version": "1"}, pod.Annotations) // changing the annotations - no new annotations were added - assert.NoError(t, PatchPodAnnotation("test-ns", 2, "my-replica-set-0", clientset)) - pod, _ = clientset.CoreV1().Pods("test-ns").Get(context.TODO(), "my-replica-set-0", metav1.GetOptions{}) + assert.NoError(t, PatchPodAnnotation(ctx, "test-ns", 2, "my-replica-set-0", clientset)) + pod, _ = clientset.CoreV1().Pods("test-ns").Get(ctx, "my-replica-set-0", metav1.GetOptions{}) assert.Equal(t, map[string]string{"agent.mongodb.com/version": "2"}, pod.Annotations) } func TestUpdatePodAnnotationPodNotFound(t *testing.T) { - assert.True(t, apiErrors.IsNotFound(PatchPodAnnotation("wrong-ns", 1, "my-replica-set-0", fake.NewSimpleClientset()))) + ctx := context.Background() + assert.True(t, apiErrors.IsNotFound(PatchPodAnnotation(ctx, "wrong-ns", 1, "my-replica-set-0", fake.NewSimpleClientset()))) } diff --git a/pkg/readiness/pod/podpatcher.go b/pkg/readiness/pod/podpatcher.go index 8d39d627a..5bea91f33 100644 --- a/pkg/readiness/pod/podpatcher.go +++ b/pkg/readiness/pod/podpatcher.go @@ -24,10 +24,10 @@ func NewKubernetesPodPatcher(clientSet kubernetes.Interface) Patcher { return Patcher{clientset: clientSet} } -func (p Patcher) patchPod(namespace, podName string, payload []patchValue) (*v1.Pod, error) { +func (p Patcher) patchPod(ctx context.Context, namespace, podName string, payload []patchValue) (*v1.Pod, error) { data, err := json.Marshal(payload) if err != nil { return nil, err } - return p.clientset.CoreV1().Pods(namespace).Patch(context.TODO(), podName, types.JSONPatchType, data, metav1.PatchOptions{}) + return p.clientset.CoreV1().Pods(namespace).Patch(ctx, podName, types.JSONPatchType, data, metav1.PatchOptions{}) } diff --git a/pkg/readiness/secret/automationconfig.go b/pkg/readiness/secret/automationconfig.go index dc007b2ed..b08ebded0 100644 --- a/pkg/readiness/secret/automationconfig.go +++ b/pkg/readiness/secret/automationconfig.go @@ -1,6 +1,7 @@ package secret import ( + "context" "encoding/json" "github.com/spf13/cast" @@ -11,9 +12,9 @@ const ( automationConfigKey = "cluster-config.json" ) -func ReadAutomationConfigVersionFromSecret(namespace string, clientSet kubernetes.Interface, automationConfigMap string) (int64, error) { +func ReadAutomationConfigVersionFromSecret(ctx context.Context, namespace string, clientSet kubernetes.Interface, automationConfigMap string) (int64, error) { secretReader := newKubernetesSecretReader(clientSet) - theSecret, err := secretReader.ReadSecret(namespace, automationConfigMap) + theSecret, err := secretReader.ReadSecret(ctx, namespace, automationConfigMap) if err != nil { return -1, err } diff --git a/pkg/readiness/secret/secretreader.go b/pkg/readiness/secret/secretreader.go index a33161fce..aecb845e0 100644 --- a/pkg/readiness/secret/secretreader.go +++ b/pkg/readiness/secret/secretreader.go @@ -16,6 +16,6 @@ func newKubernetesSecretReader(clientSet kubernetes.Interface) *reader { return &reader{clientset: clientSet} } -func (r *reader) ReadSecret(namespace, secretName string) (*corev1.Secret, error) { - return r.clientset.CoreV1().Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{}) +func (r *reader) ReadSecret(ctx context.Context, namespace, secretName string) (*corev1.Secret, error) { + return r.clientset.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) } diff --git a/pkg/util/status/status.go b/pkg/util/status/status.go index e9dc0c9cb..21aebfc62 100644 --- a/pkg/util/status/status.go +++ b/pkg/util/status/status.go @@ -19,13 +19,13 @@ type OptionBuilder interface { } // Update takes the options provided by the given option builder, applies them all and then updates the resource -func Update(statusWriter client.StatusWriter, mdb *mdbv1.MongoDBCommunity, optionBuilder OptionBuilder) (reconcile.Result, error) { +func Update(ctx context.Context, statusWriter client.StatusWriter, mdb *mdbv1.MongoDBCommunity, optionBuilder OptionBuilder) (reconcile.Result, error) { options := optionBuilder.GetOptions() for _, opt := range options { opt.ApplyOption(mdb) } - if err := statusWriter.Update(context.TODO(), mdb); err != nil { + if err := statusWriter.Update(ctx, mdb); err != nil { return reconcile.Result{}, err } diff --git a/release.json b/release.json index d4d8e1103..078b90861 100644 --- a/release.json +++ b/release.json @@ -1,10 +1,8 @@ { - "golang-builder-image": "golang:1.21", - "mongodb-kubernetes-operator": "0.8.3", - "version-upgrade-hook": "1.0.8", - "readiness-probe": "1.0.17", - "mongodb-agent": { - "version": "12.0.25.7724-1", - "tools_version": "100.7.4" - } + "golang-builder-image": "golang:1.24", + "operator": "0.13.0", + "version-upgrade-hook": "1.0.10", + "readiness-probe": "1.0.23", + "agent": "108.0.6.8796-1", + "agent-tools-version": "100.11.0" } diff --git a/requirements.txt b/requirements.txt index 79ad99708..3247df769 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,20 +1,20 @@ -git+https://github.com/mongodb/sonar@0c21097a55a4426c8824f74cdcfbceb11b27bb68 +git+https://github.com/mongodb/sonar@bc7bf7732851425421f3cfe2a19cf50b0460e633 github-action-templates==0.0.4 -docker==4.3.1 +docker==7.1.0 kubernetes==26.1.0 -jinja2==2.11.3 +jinja2==3.1.4 MarkupSafe==2.0.1 PyYAML==6.0.1 -black==22.3.0 +black==24.3.0 mypy==0.961 -tqdm==v4.49.0 +tqdm==v4.66.3 boto3==1.16.21 -pymongo==3.11.4 -dnspython==2.0.0 -requests==2.31.0 +pymongo==4.6.3 +dnspython==2.6.1 +requests==2.32.3 ruamel.yaml==0.17.9 semver==2.13.0 rsa>=4.7 # not directly required, pinned by Snyk to avoid a vulnerability -setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability +setuptools==78.1.1 # not directly required, pinned by Snyk to avoid a vulnerability certifi>=2022.12.7 # not directly required, pinned by Snyk to avoid a vulnerability -urllib3>=1.26.5 # not directly required, pinned by Snyk to avoid a vulnerability \ No newline at end of file +urllib3<2 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/scripts/ci/base_logger.py b/scripts/ci/base_logger.py new file mode 100644 index 000000000..571c10aa0 --- /dev/null +++ b/scripts/ci/base_logger.py @@ -0,0 +1,21 @@ +import logging +import os +import sys + +LOGLEVEL = os.environ.get("LOGLEVEL", "DEBUG").upper() +logger = logging.getLogger("pipeline") +logger.setLevel(LOGLEVEL) +logger.propagate = False + +# Output Debug and Info logs to stdout, and above to stderr +stdout_handler = logging.StreamHandler(sys.stdout) +stdout_handler.setLevel(logging.DEBUG) +stdout_handler.addFilter(lambda record: record.levelno <= logging.INFO) +stderr_handler = logging.StreamHandler(sys.stderr) +stderr_handler.setLevel(logging.WARNING) + +formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") +stdout_handler.setFormatter(formatter) +stderr_handler.setFormatter(formatter) +logger.addHandler(stdout_handler) +logger.addHandler(stderr_handler) diff --git a/scripts/ci/config.json b/scripts/ci/config.json index ce2a8c6ee..0260f015f 100644 --- a/scripts/ci/config.json +++ b/scripts/ci/config.json @@ -8,10 +8,8 @@ "e2e_image": "community-operator-e2e", "version_upgrade_hook_image": "mongodb-kubernetes-operator-version-upgrade-post-start-hook", "version_upgrade_hook_image_dev": "mongodb-kubernetes-operator-version-upgrade-post-start-hook-dev", - "agent_image_ubuntu": "mongodb-agent", - "agent_image_ubuntu_dev": "mongodb-agent-dev", - "agent_image_ubi": "mongodb-agent-ubi", - "agent_image_ubi_dev": "mongodb-agent-ubi-dev", + "agent_image": "mongodb-agent-ubi", + "agent_image_dev": "mongodb-agent-ubi-dev", "readiness_probe_image": "mongodb-kubernetes-readinessprobe", "readiness_probe_image_dev": "mongodb-kubernetes-readinessprobe-dev", "s3_bucket": "s3://enterprise-operator-dockerfiles/dockerfiles" diff --git a/scripts/ci/determine_required_releases.py b/scripts/ci/determine_required_releases.py index 92f953c7e..f77b9df11 100755 --- a/scripts/ci/determine_required_releases.py +++ b/scripts/ci/determine_required_releases.py @@ -11,7 +11,7 @@ # contains a map of the quay urls to fetch data about the corresponding images. QUAY_URL_MAP: Dict[str, List[str]] = { - "mongodb-agent": [ + "agent": [ "https://quay.io/api/v1/repository/mongodb/mongodb-agent-ubi", "https://quay.io/api/v1/repository/mongodb/mongodb-agent", ], @@ -21,7 +21,7 @@ "version-upgrade-hook": [ "https://quay.io/api/v1/repository/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook" ], - "mongodb-kubernetes-operator": [ + "operator": [ "https://quay.io/api/v1/repository/mongodb/mongodb-kubernetes-operator" ], } @@ -48,8 +48,6 @@ def _load_image_name_to_version_map() -> Dict[str, str]: with open("release.json") as f: release = json.loads(f.read()) - # agent section is a sub object, we change the mapping so the key corresponds to the version directly. - release["mongodb-agent"] = release["mongodb-agent"]["version"] return release diff --git a/scripts/ci/images_signing.py b/scripts/ci/images_signing.py new file mode 100644 index 000000000..e2fb4a94e --- /dev/null +++ b/scripts/ci/images_signing.py @@ -0,0 +1,208 @@ +import os +import subprocess +import sys +from typing import List, Optional + +import requests + +from scripts.ci.base_logger import logger + +SIGNING_IMAGE_URI = os.environ.get( + "SIGNING_IMAGE_URI", + "artifactory.corp.mongodb.com/release-tools-container-registry-local/garasign-cosign", +) + + +def mongodb_artifactory_login() -> None: + command = [ + "docker", + "login", + "--password-stdin", + "--username", + os.environ["ARTIFACTORY_USERNAME"], + "artifactory.corp.mongodb.com/release-tools-container-registry-local/garasign-cosign", + ] + try: + subprocess.run( + command, + input=os.environ["ARTIFACTORY_PASSWORD"].encode("utf-8"), + check=True, + ) + except subprocess.CalledProcessError as e: + logger.error(f"Authentication to MongoDB Artifactory failed : {e.returncode}") + logger.error(f"Output: {e.stderr}") + + +def get_ecr_login_password(region: str) -> Optional[str]: + """ + Retrieves the login password from aws CLI, the secrets need to be stored in ~/.aws/credentials or equivalent. + :param region: Registry's AWS region + :return: The password as a string + """ + try: + result = subprocess.run( + ["aws", "ecr", "get-login-password", "--region", region], + capture_output=True, + text=True, + check=True, + ) + return result.stdout.strip() + except subprocess.CalledProcessError as e: + logger.error(f"Failed to get ECR login password: {e.stderr}") + return None + + +def is_ecr_registry(image_name: str) -> bool: + return "amazonaws.com" in image_name + + +def get_image_digest(image_name: str) -> Optional[str]: + """ + Retrieves the digest of an image from its tag. Uses the skopeo container to be able to retrieve manifests tags as well. + :param image_name: The full image name with its tag. + :return: the image digest, or None in case of failure. + """ + + transport_protocol = "docker://" + # Get digest + digest_command = [ + "docker", + "run", + "--rm", + f"--volume={os.path.expanduser('~')}/.aws:/root/.aws:ro", + "quay.io/skopeo/stable:latest", + "inspect", + "--format={{.Digest}}", + ] + + # Specify ECR credentials if necessary + if is_ecr_registry(image_name): + aws_region = os.environ.get("AWS_DEFAULT_REGION", "eu-west-1") + ecr_password = get_ecr_login_password(aws_region) + digest_command.append(f"--creds=AWS:{ecr_password}") + + digest_command.append(f"{transport_protocol}{image_name}") + + try: + result = subprocess.run( + digest_command, capture_output=True, text=True, check=True + ) + digest = result.stdout.strip() + return digest + except subprocess.CalledProcessError as e: + logger.error(f"Failed to get digest for {image_name}: {e.stderr}") + sys.exit(1) + + +def build_cosign_docker_command( + additional_args: List[str], cosign_command: List[str] +) -> List[str]: + """ + Common logic to build a cosign command with the garasign cosign image provided by DevProd. + :param additional_args: additional arguments passed to the docker container, e.g mounted volume or env + :param cosign_command: actual command executed with cosign such as `sign` or `verify` + :return: the full command as a List of strings + """ + home_dir = os.path.expanduser("~") + base_command = [ + "docker", + "run", + "--platform", + "linux/amd64", + "--rm", + f"--volume={home_dir}/.docker/config.json:/root/.docker/config.json:ro", + ] + return ( + base_command + additional_args + [SIGNING_IMAGE_URI, "cosign"] + cosign_command + ) + + +def sign_image(repository: str, tag: str) -> None: + image = repository + ":" + tag + logger.debug(f"Signing image {image}") + + working_directory = os.getcwd() + container_working_directory = "/usr/local/kubernetes" + + # Referring to the image via its tag is deprecated in cosign + # We fetch the digest from the registry + digest = get_image_digest(image) + if digest is None: + logger.error("Impossible to get image digest, exiting...") + sys.exit(1) + image_ref = f"{repository}@{digest}" + + # Read secrets from environment and put them in env file for container + grs_username = os.environ["GRS_USERNAME"] + grs_password = os.environ["GRS_PASSWORD"] + pkcs11_uri = os.environ["PKCS11_URI"] + env_file_lines = [ + f"GRS_CONFIG_USER1_USERNAME={grs_username}", + f"GRS_CONFIG_USER1_PASSWORD={grs_password}", + f"COSIGN_REPOSITORY={repository}", + ] + env_file_content = "\n".join(env_file_lines) + temp_file = "./env-file" + with open(temp_file, "w") as f: + f.write(env_file_content) + + additional_args = [ + f"--env-file={temp_file}", + f"--volume={working_directory}:{container_working_directory}", + f"--workdir={container_working_directory}", + ] + cosign_command = [ + "sign", + f"--key={pkcs11_uri}", + f"--sign-container-identity={image}", + f"--tlog-upload=false", + image_ref, + ] + command = build_cosign_docker_command(additional_args, cosign_command) + + try: + subprocess.run(command, check=True) + except subprocess.CalledProcessError as e: + # Fail the pipeline if signing fails + logger.error(f"Failed to sign image {image}: {e.stderr}") + raise + logger.debug("Signing successful") + + +def verify_signature(repository: str, tag: str) -> bool: + image = repository + ":" + tag + logger.debug(f"Verifying signature of {image}") + public_key_url = os.environ.get( + "SIGNING_PUBLIC_KEY_URL", + "https://cosign.mongodb.com/mongodb-enterprise-kubernetes-operator.pem", + ) + r = requests.get(public_key_url) + # Ensure the request was successful + if r.status_code == 200: + # Access the content of the file + kubernetes_operator_public_key = r.text + else: + logger.error(f"Failed to retrieve the public key: Status code {r.status_code}") + return False + + public_key_var_name = "OPERATOR_PUBLIC_KEY" + additional_args = [ + "--env", + f"{public_key_var_name}={kubernetes_operator_public_key}", + ] + cosign_command = [ + "verify", + "--insecure-ignore-tlog", + f"--key=env://{public_key_var_name}", + image, + ] + command = build_cosign_docker_command(additional_args, cosign_command) + + try: + subprocess.run(command, capture_output=True, text=True, check=True) + except subprocess.CalledProcessError as e: + # Fail the pipeline if verification fails + logger.error(f"Failed to verify signature for image {image}: {e.stderr}") + raise + logger.debug("Successful verification") + return True diff --git a/scripts/ci/update_release.py b/scripts/ci/update_release.py index 0fedfe860..96c76746f 100755 --- a/scripts/ci/update_release.py +++ b/scripts/ci/update_release.py @@ -49,7 +49,7 @@ def update_operator_deployment(operator_deployment: Dict, release: Dict) -> None 0 ] operator_container["image"] = _replace_tag( - operator_container["image"], release["mongodb-kubernetes-operator"] + operator_container["image"], release["operator"] ) operator_envs = operator_container["env"] for env in operator_envs: @@ -58,25 +58,23 @@ def update_operator_deployment(operator_deployment: Dict, release: Dict) -> None if env["name"] == "READINESS_PROBE_IMAGE": env["value"] = _replace_tag(env["value"], release["readiness-probe"]) if env["name"] == "AGENT_IMAGE": - env["value"] = _replace_tag( - env["value"], release["mongodb-agent"]["version"] - ) + env["value"] = _replace_tag(env["value"], release["agent"]) def update_chart_values(values: Dict, release: Dict) -> None: - values["agent"]["version"] = release["mongodb-agent"]["version"] + values["agent"]["version"] = release["agent"] values["versionUpgradeHook"]["version"] = release["version-upgrade-hook"] values["readinessProbe"]["version"] = release["readiness-probe"] - values["operator"]["version"] = release["mongodb-kubernetes-operator"] + values["operator"]["version"] = release["operator"] def update_chart(chart: Dict, release: Dict) -> None: - chart["version"] = release["mongodb-kubernetes-operator"] - chart["appVersion"] = release["mongodb-kubernetes-operator"] + chart["version"] = release["operator"] + chart["appVersion"] = release["operator"] for dependency in chart.get("dependencies", []): if dependency["name"] == "community-operator-crds": - dependency["version"] = release["mongodb-kubernetes-operator"] + dependency["version"] = release["operator"] def main() -> int: diff --git a/scripts/dev/dev_config.py b/scripts/dev/dev_config.py index cd7715a25..93476b203 100644 --- a/scripts/dev/dev_config.py +++ b/scripts/dev/dev_config.py @@ -117,32 +117,20 @@ def mongodb_image_repo_url(self) -> str: return self._config.get("mongodb_image_repo_url", "quay.io/mongodb") @property - def agent_dev_image_ubi(self) -> str: - return self._get_dev_image("agent_image_ubi_dev", "agent_image_ubi") - - @property - def agent_dev_image_ubuntu(self) -> str: - return self._get_dev_image("agent_image_ubuntu_dev", "agent_image_ubuntu") - - @property - def agent_image_ubuntu(self) -> str: - return self._config["agent_image_ubuntu"] + def agent_image(self) -> str: + return self._config["agent_image"] @property - def agent_image_ubi(self) -> str: - return self._config["agent_image_ubi"] + def local_operator(self) -> str: + return self._config["mdb_local_operator"] @property - def agent_dev_image(self) -> str: - if self._distro == Distro.UBI: - return self._get_dev_image("agent_image_ubi_dev", "agent_image_ubi") - return self._get_dev_image("agent_image_ubuntu_dev", "agent_image_ubuntu") + def kube_config(self) -> str: + return self._config["kubeconfig"] @property - def agent_image(self) -> str: - if self._distro == Distro.UBI: - return self.agent_dev_image_ubi - return self.agent_dev_image_ubuntu + def agent_image_dev(self) -> str: + return self._get_dev_image("agent_image_dev", "agent_image") @property def image_type(self) -> str: diff --git a/scripts/dev/e2e.py b/scripts/dev/e2e.py index 47f539e90..0a8c03df2 100644 --- a/scripts/dev/e2e.py +++ b/scripts/dev/e2e.py @@ -106,7 +106,7 @@ def create_test_pod(args: argparse.Namespace, dev_config: DevConfig) -> None: }, { "name": "AGENT_IMAGE", - "value": f"{dev_config.repo_url}/{dev_config.agent_dev_image}:{args.tag}", + "value": f"{dev_config.repo_url}/{dev_config.agent_image_dev}:{args.tag}", }, { "name": "TEST_NAMESPACE", diff --git a/scripts/dev/get_e2e_env_vars.py b/scripts/dev/get_e2e_env_vars.py index 5060b4d1d..cea1ac4e0 100755 --- a/scripts/dev/get_e2e_env_vars.py +++ b/scripts/dev/get_e2e_env_vars.py @@ -33,6 +33,8 @@ def _get_e2e_test_envs(dev_config: DevConfig) -> Dict[str, str]: "MONGODB_REPO_URL": dev_config.mongodb_image_repo_url, "HELM_CHART_PATH": os.path.abspath("./helm-charts/charts/community-operator"), "MDB_IMAGE_TYPE": dev_config.image_type, + "MDB_LOCAL_OPERATOR": dev_config.local_operator, + "KUBECONFIG": dev_config.kube_config, } diff --git a/scripts/dev/setup_kind_cluster.sh b/scripts/dev/setup_kind_cluster.sh index 7f19c0d6e..3178f2878 100755 --- a/scripts/dev/setup_kind_cluster.sh +++ b/scripts/dev/setup_kind_cluster.sh @@ -56,7 +56,7 @@ reg_name='kind-registry' reg_port='5000' running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" if [ "${running}" != 'true' ]; then - docker run -d --restart=always -p "127.0.0.1:${reg_port}:5000" --name "${reg_name}" registry:2 + docker run -d --restart=always -p "127.0.0.1:${reg_port}:5000" --network kind --name "${reg_name}" registry:2 fi if [ "${recreate}" != 0 ]; then @@ -72,10 +72,26 @@ networking: serviceSubnet: "${service_network}" containerdConfigPatches: - |- - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:${reg_port}"] - endpoint = ["http://${reg_name}:${reg_port}"] + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" EOF +# Add the registry config to the nodes +# +# This is necessary because localhost resolves to loopback addresses that are +# network-namespace local. +# In other words: localhost in the container is not localhost on the host. +# +# We want a consistent name that works from both ends, so we tell containerd to +# alias localhost:${reg_port} to the registry container when pulling images +REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}" +for node in $(kind get nodes --name "${cluster_name}"); do + docker exec "${node}" mkdir -p "${REGISTRY_DIR}" + cat < /dev/null; then echo "Installing goimports" - GO111MODULE=off go get golang.org/x/tools/cmd/goimports + go install golang.org/x/tools/cmd/goimports fi # Formats each file that was changed. diff --git a/test/e2e/client.go b/test/e2e/client.go index 31aaf0132..478e3b81c 100644 --- a/test/e2e/client.go +++ b/test/e2e/client.go @@ -34,14 +34,16 @@ var OperatorNamespace string // CleanupOptions are a way to register cleanup functions on object creation using the test client. type CleanupOptions struct { - TestContext *Context + TestContext *TestContext } // ApplyToCreate is a required method for CleanupOptions passed to the Create api. func (*CleanupOptions) ApplyToCreate(*client.CreateOptions) {} -// Context tracks cleanup functions to be called at the end of a test. -type Context struct { +// TestContext tracks cleanup functions to be called at the end of a test. +type TestContext struct { + Ctx context.Context + // shouldPerformCleanup indicates whether or not cleanup should happen after this test shouldPerformCleanup bool @@ -57,17 +59,17 @@ type Context struct { } // NewContext creates a context. -func NewContext(t *testing.T, performCleanup bool) (*Context, error) { +func NewContext(ctx context.Context, t *testing.T, performCleanup bool) (*TestContext, error) { testId, err := generate.RandomValidDNS1123Label(10) if err != nil { return nil, err } - return &Context{t: t, ExecutionId: testId, shouldPerformCleanup: performCleanup}, nil + return &TestContext{Ctx: ctx, t: t, ExecutionId: testId, shouldPerformCleanup: performCleanup}, nil } // Teardown is called at the end of a test. -func (ctx *Context) Teardown() { +func (ctx *TestContext) Teardown() { if !ctx.shouldPerformCleanup { return } @@ -80,7 +82,7 @@ func (ctx *Context) Teardown() { } // AddCleanupFunc adds a cleanup function to the context to be called at the end of a test. -func (ctx *Context) AddCleanupFunc(fn func() error) { +func (ctx *TestContext) AddCleanupFunc(fn func() error) { ctx.cleanupFuncs = append(ctx.cleanupFuncs, fn) } @@ -148,7 +150,7 @@ func (c *E2ETestClient) Get(ctx context.Context, key types.NamespacedName, obj c return c.Client.Get(ctx, key, obj) } -func (c *E2ETestClient) Execute(pod corev1.Pod, containerName, command string) (string, error) { +func (c *E2ETestClient) Execute(ctx context.Context, pod corev1.Pod, containerName, command string) (string, error) { req := c.CoreV1Client.RESTClient(). Post(). Namespace(pod.Namespace). @@ -170,7 +172,7 @@ func (c *E2ETestClient) Execute(pod corev1.Pod, containerName, command string) ( if err != nil { return "", err } - err = exec.Stream(remotecommand.StreamOptions{ + err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ Stdout: buf, Stderr: errBuf, }) @@ -194,7 +196,6 @@ func RunTest(m *testing.M) (int, error) { testEnv = &envtest.Environment{ UseExistingCluster: &useExistingCluster, AttachControlPlaneOutput: true, - KubeAPIServerFlags: []string{"--authorization-mode=RBAC"}, } fmt.Println("Starting test environment") diff --git a/test/e2e/e2eutil.go b/test/e2e/e2eutil.go index 710ec9580..d29fd9abb 100644 --- a/test/e2e/e2eutil.go +++ b/test/e2e/e2eutil.go @@ -36,7 +36,7 @@ func TestAnnotations() map[string]string { } func TestDataDir() string { - return envvar.GetEnvOrDefault(testDataDirEnv, "/workspace/testdata") + return envvar.GetEnvOrDefault(testDataDirEnv, "/workspace/testdata") // nolint:forbidigo } func TlsTestDataDir() string { @@ -45,18 +45,18 @@ func TlsTestDataDir() string { // UpdateMongoDBResource applies the provided function to the most recent version of the MongoDB resource // and retries when there are conflicts -func UpdateMongoDBResource(original *mdbv1.MongoDBCommunity, updateFunc func(*mdbv1.MongoDBCommunity)) error { - err := TestClient.Get(context.TODO(), types.NamespacedName{Name: original.Name, Namespace: original.Namespace}, original) +func UpdateMongoDBResource(ctx context.Context, original *mdbv1.MongoDBCommunity, updateFunc func(*mdbv1.MongoDBCommunity)) error { + err := TestClient.Get(ctx, types.NamespacedName{Name: original.Name, Namespace: original.Namespace}, original) if err != nil { return err } updateFunc(original) - return TestClient.Update(context.TODO(), original) + return TestClient.Update(ctx, original) } -func NewTestMongoDB(ctx *Context, name string, namespace string) (mdbv1.MongoDBCommunity, mdbv1.MongoDBUser) { +func NewTestMongoDB(ctx *TestContext, name string, namespace string) (mdbv1.MongoDBCommunity, mdbv1.MongoDBUser) { mongodbNamespace := namespace if mongodbNamespace == "" { mongodbNamespace = OperatorNamespace @@ -70,7 +70,7 @@ func NewTestMongoDB(ctx *Context, name string, namespace string) (mdbv1.MongoDBC Spec: mdbv1.MongoDBCommunitySpec{ Members: 3, Type: "ReplicaSet", - Version: "6.0.5", + Version: "8.0.0", Arbiters: 0, Security: mdbv1.Security{ Authentication: mdbv1.Authentication{ @@ -166,13 +166,13 @@ func NewTestTLSConfig(optional bool) mdbv1.TLS { } } -func NewPrometheusConfig(namespace string) *mdbv1.Prometheus { +func NewPrometheusConfig(ctx context.Context, namespace string) *mdbv1.Prometheus { sec := secret.Builder(). SetName("prom-secret"). SetNamespace(namespace). SetField("password", "prom-password"). Build() - err := TestClient.Create(context.TODO(), &sec, &CleanupOptions{}) + err := TestClient.Create(ctx, &sec, &CleanupOptions{}) if err != nil { if !apiErrors.IsAlreadyExists(err) { panic(fmt.Sprintf("Error trying to create secret: %s", err)) @@ -187,22 +187,22 @@ func NewPrometheusConfig(namespace string) *mdbv1.Prometheus { } } -func ensureObject(ctx *Context, obj k8sClient.Object) error { +func ensureObject(ctx *TestContext, obj k8sClient.Object) error { key := k8sClient.ObjectKeyFromObject(obj) obj.SetLabels(TestLabels()) - err := TestClient.Get(context.TODO(), key, obj) + err := TestClient.Get(ctx.Ctx, key, obj) if err != nil { if !apiErrors.IsNotFound(err) { return err } - err = TestClient.Create(context.TODO(), obj, &CleanupOptions{TestContext: ctx}) + err = TestClient.Create(ctx.Ctx, obj, &CleanupOptions{TestContext: ctx}) if err != nil { return err } } else { fmt.Printf("%s %s/%s already exists!\n", reflect.TypeOf(obj), key.Namespace, key.Name) - err = TestClient.Update(context.TODO(), obj) + err = TestClient.Update(ctx.Ctx, obj) if err != nil { return err } @@ -211,7 +211,7 @@ func ensureObject(ctx *Context, obj k8sClient.Object) error { } // EnsureNamespace checks that the given namespace exists and creates it if not. -func EnsureNamespace(ctx *Context, namespace string) error { +func EnsureNamespace(ctx *TestContext, namespace string) error { return ensureObject(ctx, &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: namespace, diff --git a/test/e2e/feature_compatibility_version/feature_compatibility_version_test.go b/test/e2e/feature_compatibility_version/feature_compatibility_version_test.go index 64321860d..2cc2db6d9 100644 --- a/test/e2e/feature_compatibility_version/feature_compatibility_version_test.go +++ b/test/e2e/feature_compatibility_version/feature_compatibility_version_test.go @@ -1,6 +1,7 @@ package feature_compatibility_version import ( + "context" "fmt" mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" @@ -11,7 +12,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -27,8 +28,9 @@ func TestMain(m *testing.M) { // format remains the same. Versions 5 and 6 are one way upgrade only. // See: https://www.mongodb.com/docs/manual/reference/command/setFeatureCompatibilityVersion/ func TestFeatureCompatibilityVersion(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() // This is the lowest version available for the official images const lowestMDBVersion = "4.4.16" @@ -36,30 +38,30 @@ func TestFeatureCompatibilityVersion(t *testing.T) { const featureCompatibility = "4.2" const upgradedFeatureCompatibility = "4.4" - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") mdb.Spec.Version = lowestMDBVersion mdb.Spec.FeatureCompatibilityVersion = featureCompatibility - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) t.Run(fmt.Sprintf("Test FeatureCompatibilityVersion is %s", featureCompatibility), tester.HasFCV(featureCompatibility, 3)) // Upgrade while keeping the Feature Compatibility intact t.Run("MongoDB is reachable while version is upgraded", func(t *testing.T) { defer tester.StartBackgroundConnectivityTest(t, time.Second*20)() - t.Run("Test Version can be upgraded", mongodbtests.ChangeVersion(&mdb, highestMDBVersion)) - t.Run("Stateful Set Reaches Ready State, after Upgrading", mongodbtests.StatefulSetBecomesReady(&mdb)) + t.Run("Test Version can be upgraded", mongodbtests.ChangeVersion(ctx, &mdb, highestMDBVersion)) + t.Run("Stateful Set Reaches Ready State, after Upgrading", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) }) t.Run("Test Basic Connectivity after upgrade has completed", tester.ConnectivitySucceeds()) @@ -68,20 +70,20 @@ func TestFeatureCompatibilityVersion(t *testing.T) { // Downgrade while keeping the Feature Compatibility intact t.Run("MongoDB is reachable while version is downgraded", func(t *testing.T) { defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() - t.Run("Test Version can be downgraded", mongodbtests.ChangeVersion(&mdb, lowestMDBVersion)) - t.Run("Stateful Set Reaches Ready State, after Upgrading", mongodbtests.StatefulSetBecomesReady(&mdb)) + t.Run("Test Version can be downgraded", mongodbtests.ChangeVersion(ctx, &mdb, lowestMDBVersion)) + t.Run("Stateful Set Reaches Ready State, after Upgrading", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) }) t.Run(fmt.Sprintf("Test FeatureCompatibilityVersion, after downgrade, is %s", featureCompatibility), tester.HasFCV(featureCompatibility, 3)) // Upgrade the Feature Compatibility keeping the MongoDB version the same t.Run("Test FeatureCompatibilityVersion can be upgraded", func(t *testing.T) { - err := e2eutil.UpdateMongoDBResource(&mdb, func(db *mdbv1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, &mdb, func(db *mdbv1.MongoDBCommunity) { db.Spec.FeatureCompatibilityVersion = upgradedFeatureCompatibility }) assert.NoError(t, err) - t.Run("Stateful Set Reaches Ready State, after Upgrading FeatureCompatibilityVersion", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("MongoDB Reaches Running Phase, after Upgrading FeatureCompatibilityVersion", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("Stateful Set Reaches Ready State, after Upgrading FeatureCompatibilityVersion", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase, after Upgrading FeatureCompatibilityVersion", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) }) t.Run(fmt.Sprintf("Test FeatureCompatibilityVersion, after upgrading FeatureCompatibilityVersion, is %s", upgradedFeatureCompatibility), tester.HasFCV(upgradedFeatureCompatibility, 10)) diff --git a/test/e2e/mongodbtests/mongodbtests.go b/test/e2e/mongodbtests/mongodbtests.go index 42efe275d..a7bbf30df 100644 --- a/test/e2e/mongodbtests/mongodbtests.go +++ b/test/e2e/mongodbtests/mongodbtests.go @@ -4,11 +4,13 @@ import ( "context" "encoding/json" "fmt" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "sort" "strings" "testing" "time" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" @@ -36,43 +38,43 @@ func SkipTestIfLocal(t *testing.T, msg string, f func(t *testing.T)) { // StatefulSetBecomesReady ensures that the underlying stateful set // reaches the running state. -func StatefulSetBecomesReady(mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { +func StatefulSetBecomesReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { defaultOpts := []wait.Configuration{ wait.RetryInterval(time.Second * 15), wait.Timeout(time.Minute * 25), } defaultOpts = append(defaultOpts, opts...) - return statefulSetIsReady(mdb, defaultOpts...) + return statefulSetIsReady(ctx, mdb, defaultOpts...) } // ArbitersStatefulSetBecomesReady ensures that the underlying stateful set // reaches the running state. -func ArbitersStatefulSetBecomesReady(mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { +func ArbitersStatefulSetBecomesReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { defaultOpts := []wait.Configuration{ wait.RetryInterval(time.Second * 15), wait.Timeout(time.Minute * 20), } defaultOpts = append(defaultOpts, opts...) - return arbitersStatefulSetIsReady(mdb, defaultOpts...) + return arbitersStatefulSetIsReady(ctx, mdb, defaultOpts...) } // StatefulSetBecomesUnready ensures the underlying stateful set reaches // the unready state. -func StatefulSetBecomesUnready(mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { +func StatefulSetBecomesUnready(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { defaultOpts := []wait.Configuration{ wait.RetryInterval(time.Second * 15), wait.Timeout(time.Minute * 15), } defaultOpts = append(defaultOpts, opts...) - return statefulSetIsNotReady(mdb, defaultOpts...) + return statefulSetIsNotReady(ctx, mdb, defaultOpts...) } // StatefulSetIsReadyAfterScaleDown ensures that a replica set is scaled down correctly // note: scaling down takes considerably longer than scaling up due the readiness probe // failure threshold being high -func StatefulSetIsReadyAfterScaleDown(mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { +func StatefulSetIsReadyAfterScaleDown(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { return func(t *testing.T) { - err := wait.ForStatefulSetToBeReadyAfterScaleDown(t, mdb, wait.RetryInterval(time.Second*60), wait.Timeout(time.Minute*45)) + err := wait.ForStatefulSetToBeReadyAfterScaleDown(ctx, t, mdb, wait.RetryInterval(time.Second*60), wait.Timeout(time.Minute*45)) if err != nil { t.Fatal(err) } @@ -82,10 +84,10 @@ func StatefulSetIsReadyAfterScaleDown(mdb *mdbv1.MongoDBCommunity) func(t *testi // statefulSetIsReady ensures that the underlying stateful set // reaches the running state. -func statefulSetIsReady(mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { +func statefulSetIsReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { return func(t *testing.T) { start := time.Now() - err := wait.ForStatefulSetToBeReady(t, mdb, opts...) + err := wait.ForStatefulSetToBeReady(ctx, t, mdb, opts...) if err != nil { t.Fatal(err) } @@ -96,9 +98,9 @@ func statefulSetIsReady(mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) // arbitersStatefulSetIsReady ensures that the underlying stateful set // reaches the running state. -func arbitersStatefulSetIsReady(mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { +func arbitersStatefulSetIsReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { return func(t *testing.T) { - err := wait.ForArbitersStatefulSetToBeReady(t, mdb, opts...) + err := wait.ForArbitersStatefulSetToBeReady(ctx, t, mdb, opts...) if err != nil { t.Fatal(err) } @@ -107,9 +109,9 @@ func arbitersStatefulSetIsReady(mdb *mdbv1.MongoDBCommunity, opts ...wait.Config } // statefulSetIsNotReady ensures that the underlying stateful set reaches the unready state. -func statefulSetIsNotReady(mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { +func statefulSetIsNotReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { return func(t *testing.T) { - err := wait.ForStatefulSetToBeUnready(t, mdb, opts...) + err := wait.ForStatefulSetToBeUnready(ctx, t, mdb, opts...) if err != nil { t.Fatal(err) } @@ -117,11 +119,11 @@ func statefulSetIsNotReady(mdb *mdbv1.MongoDBCommunity, opts ...wait.Configurati } } -func StatefulSetHasOwnerReference(mdb *mdbv1.MongoDBCommunity, expectedOwnerReference metav1.OwnerReference) func(t *testing.T) { +func StatefulSetHasOwnerReference(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedOwnerReference metav1.OwnerReference) func(t *testing.T) { return func(t *testing.T) { stsNamespacedName := types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace} sts := appsv1.StatefulSet{} - err := e2eutil.TestClient.Get(context.TODO(), stsNamespacedName, &sts) + err := e2eutil.TestClient.Get(ctx, stsNamespacedName, &sts) if err != nil { t.Fatal(err) @@ -131,20 +133,20 @@ func StatefulSetHasOwnerReference(mdb *mdbv1.MongoDBCommunity, expectedOwnerRefe } // StatefulSetIsDeleted ensures that the underlying stateful set is deleted -func StatefulSetIsDeleted(mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { +func StatefulSetIsDeleted(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { return func(t *testing.T) { - err := wait.ForStatefulSetToBeDeleted(mdb.Name, time.Second*10, time.Minute*1, mdb.Namespace) + err := wait.ForStatefulSetToBeDeleted(ctx, mdb.Name, time.Second*10, time.Minute*1, mdb.Namespace) if err != nil { t.Fatal(err) } } } -func ServiceHasOwnerReference(mdb *mdbv1.MongoDBCommunity, expectedOwnerReference metav1.OwnerReference) func(t *testing.T) { +func ServiceHasOwnerReference(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedOwnerReference metav1.OwnerReference) func(t *testing.T) { return func(t *testing.T) { serviceNamespacedName := types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace} srv := corev1.Service{} - err := e2eutil.TestClient.Get(context.TODO(), serviceNamespacedName, &srv) + err := e2eutil.TestClient.Get(ctx, serviceNamespacedName, &srv) if err != nil { t.Fatal(err) } @@ -152,11 +154,11 @@ func ServiceHasOwnerReference(mdb *mdbv1.MongoDBCommunity, expectedOwnerReferenc } } -func ServiceUsesCorrectPort(mdb *mdbv1.MongoDBCommunity, expectedPort int32) func(t *testing.T) { +func ServiceUsesCorrectPort(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedPort int32) func(t *testing.T) { return func(t *testing.T) { serviceNamespacedName := types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace} svc := corev1.Service{} - err := e2eutil.TestClient.Get(context.TODO(), serviceNamespacedName, &svc) + err := e2eutil.TestClient.Get(ctx, serviceNamespacedName, &svc) if err != nil { t.Fatal(err) } @@ -165,22 +167,22 @@ func ServiceUsesCorrectPort(mdb *mdbv1.MongoDBCommunity, expectedPort int32) fun } } -func AgentX509SecretsExists(mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { +func AgentX509SecretsExists(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { return func(t *testing.T) { agentCertSecret := corev1.Secret{} - err := e2eutil.TestClient.Get(context.TODO(), mdb.AgentCertificateSecretNamespacedName(), &agentCertSecret) + err := e2eutil.TestClient.Get(ctx, mdb.AgentCertificateSecretNamespacedName(), &agentCertSecret) assert.NoError(t, err) agentCertPemSecret := corev1.Secret{} - err = e2eutil.TestClient.Get(context.TODO(), mdb.AgentCertificatePemSecretNamespacedName(), &agentCertPemSecret) + err = e2eutil.TestClient.Get(ctx, mdb.AgentCertificatePemSecretNamespacedName(), &agentCertPemSecret) assert.NoError(t, err) } } -func AgentSecretsHaveOwnerReference(mdb *mdbv1.MongoDBCommunity, expectedOwnerReference metav1.OwnerReference) func(t *testing.T) { +func AgentSecretsHaveOwnerReference(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedOwnerReference metav1.OwnerReference) func(t *testing.T) { checkSecret := func(t *testing.T, resourceNamespacedName types.NamespacedName) { secret := corev1.Secret{} - err := e2eutil.TestClient.Get(context.TODO(), resourceNamespacedName, &secret) + err := e2eutil.TestClient.Get(ctx, resourceNamespacedName, &secret) assert.NoError(t, err) assertEqualOwnerReference(t, "Secret", resourceNamespacedName, secret.GetOwnerReferences(), expectedOwnerReference) @@ -194,12 +196,12 @@ func AgentSecretsHaveOwnerReference(mdb *mdbv1.MongoDBCommunity, expectedOwnerRe // ConnectionStringSecretsAreConfigured verifies that secrets storing the connection string were generated for all scram users // and that they have the expected owner reference -func ConnectionStringSecretsAreConfigured(mdb *mdbv1.MongoDBCommunity, expectedOwnerReference metav1.OwnerReference) func(t *testing.T) { +func ConnectionStringSecretsAreConfigured(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedOwnerReference metav1.OwnerReference) func(t *testing.T) { return func(t *testing.T) { for _, user := range mdb.GetAuthUsers() { secret := corev1.Secret{} secretNamespacedName := types.NamespacedName{Name: user.ConnectionStringSecretName, Namespace: mdb.Namespace} - err := e2eutil.TestClient.Get(context.TODO(), secretNamespacedName, &secret) + err := e2eutil.TestClient.Get(ctx, secretNamespacedName, &secret) assert.NoError(t, err) assertEqualOwnerReference(t, "Secret", secretNamespacedName, secret.GetOwnerReferences(), expectedOwnerReference) @@ -209,9 +211,9 @@ func ConnectionStringSecretsAreConfigured(mdb *mdbv1.MongoDBCommunity, expectedO // StatefulSetHasUpdateStrategy verifies that the StatefulSet holding this MongoDB // resource has the correct Update Strategy -func StatefulSetHasUpdateStrategy(mdb *mdbv1.MongoDBCommunity, strategy appsv1.StatefulSetUpdateStrategyType) func(t *testing.T) { +func StatefulSetHasUpdateStrategy(ctx context.Context, mdb *mdbv1.MongoDBCommunity, strategy appsv1.StatefulSetUpdateStrategyType) func(t *testing.T) { return func(t *testing.T) { - err := wait.ForStatefulSetToHaveUpdateStrategy(t, mdb, strategy, wait.RetryInterval(time.Second*15), wait.Timeout(time.Minute*8)) + err := wait.ForStatefulSetToHaveUpdateStrategy(ctx, t, mdb, strategy, wait.RetryInterval(time.Second*15), wait.Timeout(time.Minute*8)) if err != nil { t.Fatal(err) } @@ -220,8 +222,8 @@ func StatefulSetHasUpdateStrategy(mdb *mdbv1.MongoDBCommunity, strategy appsv1.S } // GetPersistentVolumes returns all persistent volumes on the cluster -func getPersistentVolumesList() (*corev1.PersistentVolumeList, error) { - return e2eutil.TestClient.CoreV1Client.PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) +func getPersistentVolumesList(ctx context.Context) (*corev1.PersistentVolumeList, error) { + return e2eutil.TestClient.CoreV1Client.PersistentVolumes().List(ctx, metav1.ListOptions{}) } func containsVolume(volumes []corev1.PersistentVolume, volumeName string) bool { @@ -233,9 +235,9 @@ func containsVolume(volumes []corev1.PersistentVolume, volumeName string) bool { return false } -func HasExpectedPersistentVolumes(volumes []corev1.PersistentVolume) func(t *testing.T) { +func HasExpectedPersistentVolumes(ctx context.Context, volumes []corev1.PersistentVolume) func(t *testing.T) { return func(t *testing.T) { - volumeList, err := getPersistentVolumesList() + volumeList, err := getPersistentVolumesList(ctx) actualVolumes := volumeList.Items assert.NoError(t, err) assert.Len(t, actualVolumes, len(volumes), @@ -246,31 +248,31 @@ func HasExpectedPersistentVolumes(volumes []corev1.PersistentVolume) func(t *tes } } } -func HasExpectedMetadata(mdb *mdbv1.MongoDBCommunity, expectedLabels map[string]string, expectedAnnotations map[string]string) func(t *testing.T) { +func HasExpectedMetadata(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedLabels map[string]string, expectedAnnotations map[string]string) func(t *testing.T) { return func(t *testing.T) { namespace := mdb.Namespace statefulSetList := appsv1.StatefulSetList{} - err := e2eutil.TestClient.Client.List(context.TODO(), &statefulSetList, client.InNamespace(namespace)) + err := e2eutil.TestClient.Client.List(ctx, &statefulSetList, client.InNamespace(namespace)) assert.NoError(t, err) assert.NotEmpty(t, statefulSetList.Items) for _, s := range statefulSetList.Items { - containsMetadata(t, &s.ObjectMeta, expectedLabels, expectedAnnotations, "statefulset "+s.Name) + containsMetadata(t, s.ObjectMeta, expectedLabels, expectedAnnotations, "statefulset "+s.Name) } volumeList := corev1.PersistentVolumeList{} - err = e2eutil.TestClient.Client.List(context.TODO(), &volumeList, client.InNamespace(namespace)) + err = e2eutil.TestClient.Client.List(ctx, &volumeList, client.InNamespace(namespace)) assert.NoError(t, err) assert.NotEmpty(t, volumeList.Items) for _, s := range volumeList.Items { volName := s.Name if strings.HasPrefix(volName, "data-volume-") || strings.HasPrefix(volName, "logs-volume-") { - containsMetadata(t, &s.ObjectMeta, expectedLabels, expectedAnnotations, "volume "+volName) + containsMetadata(t, s.ObjectMeta, expectedLabels, expectedAnnotations, "volume "+volName) } } podList := corev1.PodList{} - err = e2eutil.TestClient.Client.List(context.TODO(), &podList, client.InNamespace(namespace)) + err = e2eutil.TestClient.Client.List(ctx, &podList, client.InNamespace(namespace)) assert.NoError(t, err) assert.NotEmpty(t, podList.Items) @@ -297,12 +299,12 @@ func HasExpectedMetadata(mdb *mdbv1.MongoDBCommunity, expectedLabels map[string] continue } - containsMetadata(t, &s.ObjectMeta, expectedLabels, expectedAnnotations, "pod "+s.Name) + containsMetadata(t, s.ObjectMeta, expectedLabels, expectedAnnotations, "pod "+s.Name) } } } -func containsMetadata(t *testing.T, metadata *metav1.ObjectMeta, expectedLabels map[string]string, expectedAnnotations map[string]string, msg string) { +func containsMetadata(t *testing.T, metadata metav1.ObjectMeta, expectedLabels map[string]string, expectedAnnotations map[string]string, msg string) { labels := metadata.Labels for k, v := range expectedLabels { assert.Contains(t, labels, k, msg+" has label "+k) @@ -319,9 +321,9 @@ func containsMetadata(t *testing.T, metadata *metav1.ObjectMeta, expectedLabels } // MongoDBReachesPendingPhase ensures the MongoDB resources gets to the Pending phase -func MongoDBReachesPendingPhase(mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { +func MongoDBReachesPendingPhase(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { return func(t *testing.T) { - err := wait.ForMongoDBToReachPhase(t, mdb, mdbv1.Pending, time.Second*15, time.Minute*2) + err := wait.ForMongoDBToReachPhase(ctx, t, mdb, mdbv1.Pending, time.Second*15, time.Minute*2) if err != nil { t.Fatal(err) } @@ -330,9 +332,9 @@ func MongoDBReachesPendingPhase(mdb *mdbv1.MongoDBCommunity) func(t *testing.T) } // MongoDBReachesRunningPhase ensure the MongoDB resource reaches the Running phase -func MongoDBReachesRunningPhase(mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { +func MongoDBReachesRunningPhase(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { return func(t *testing.T) { - err := wait.ForMongoDBToReachPhase(t, mdb, mdbv1.Running, time.Second*15, time.Minute*12) + err := wait.ForMongoDBToReachPhase(ctx, t, mdb, mdbv1.Running, time.Second*15, time.Minute*12) if err != nil { t.Fatal(err) } @@ -341,9 +343,9 @@ func MongoDBReachesRunningPhase(mdb *mdbv1.MongoDBCommunity) func(t *testing.T) } // MongoDBReachesFailedPhase ensure the MongoDB resource reaches the Failed phase. -func MongoDBReachesFailedPhase(mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { +func MongoDBReachesFailedPhase(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { return func(t *testing.T) { - err := wait.ForMongoDBToReachPhase(t, mdb, mdbv1.Failed, time.Second*15, time.Minute*5) + err := wait.ForMongoDBToReachPhase(ctx, t, mdb, mdbv1.Failed, time.Second*15, time.Minute*5) if err != nil { t.Fatal(err) } @@ -351,9 +353,9 @@ func MongoDBReachesFailedPhase(mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { } } -func AutomationConfigSecretExists(mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { +func AutomationConfigSecretExists(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { return func(t *testing.T) { - s, err := wait.ForSecretToExist(mdb.AutomationConfigSecretName(), time.Second*5, time.Minute*1, mdb.Namespace) + s, err := wait.ForSecretToExist(ctx, mdb.AutomationConfigSecretName(), time.Second*5, time.Minute*1, mdb.Namespace) assert.NoError(t, err) t.Logf("Secret %s/%s was successfully created", mdb.Namespace, mdb.AutomationConfigSecretName()) @@ -363,10 +365,10 @@ func AutomationConfigSecretExists(mdb *mdbv1.MongoDBCommunity) func(t *testing.T } } -func getAutomationConfig(t *testing.T, mdb *mdbv1.MongoDBCommunity) automationconfig.AutomationConfig { +func getAutomationConfig(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity) automationconfig.AutomationConfig { currentSecret := corev1.Secret{} currentAc := automationconfig.AutomationConfig{} - err := e2eutil.TestClient.Get(context.TODO(), types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, ¤tSecret) + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, ¤tSecret) assert.NoError(t, err) err = json.Unmarshal(currentSecret.Data[automationconfig.ConfigKey], ¤tAc) assert.NoError(t, err) @@ -374,27 +376,34 @@ func getAutomationConfig(t *testing.T, mdb *mdbv1.MongoDBCommunity) automationco } // AutomationConfigVersionHasTheExpectedVersion verifies that the automation config has the expected version. -func AutomationConfigVersionHasTheExpectedVersion(mdb *mdbv1.MongoDBCommunity, expectedVersion int) func(t *testing.T) { +func AutomationConfigVersionHasTheExpectedVersion(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedVersion int) func(t *testing.T) { return func(t *testing.T) { - currentAc := getAutomationConfig(t, mdb) + currentAc := getAutomationConfig(ctx, t, mdb) assert.Equal(t, expectedVersion, currentAc.Version) } } // AutomationConfigHasLogRotationConfig verifies that the automation config contains the given logRotate config. -func AutomationConfigHasLogRotationConfig(mdb *mdbv1.MongoDBCommunity, lrc *automationconfig.CrdLogRotate) func(t *testing.T) { +func AutomationConfigHasLogRotationConfig(ctx context.Context, mdb *mdbv1.MongoDBCommunity, lrc *automationconfig.CrdLogRotate) func(t *testing.T) { return func(t *testing.T) { - currentAc := getAutomationConfig(t, mdb) + currentAc := getAutomationConfig(ctx, t, mdb) for _, p := range currentAc.Processes { assert.Equal(t, automationconfig.ConvertCrdLogRotateToAC(lrc), p.LogRotate) } } } +func AutomationConfigHasSettings(ctx context.Context, mdb *mdbv1.MongoDBCommunity, settings map[string]interface{}) func(t *testing.T) { + return func(t *testing.T) { + currentAc := getAutomationConfig(ctx, t, mdb) + assert.Equal(t, currentAc.ReplicaSets[0].Settings, settings) + } +} + // AutomationConfigReplicaSetsHaveExpectedArbiters verifies that the automation config has the expected version. -func AutomationConfigReplicaSetsHaveExpectedArbiters(mdb *mdbv1.MongoDBCommunity, expectedArbiters int) func(t *testing.T) { +func AutomationConfigReplicaSetsHaveExpectedArbiters(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedArbiters int) func(t *testing.T) { return func(t *testing.T) { - currentAc := getAutomationConfig(t, mdb) + currentAc := getAutomationConfig(ctx, t, mdb) lsRs := currentAc.ReplicaSets for _, rs := range lsRs { arbiters := 0 @@ -409,17 +418,34 @@ func AutomationConfigReplicaSetsHaveExpectedArbiters(mdb *mdbv1.MongoDBCommunity } // AutomationConfigHasTheExpectedCustomRoles verifies that the automation config has the expected custom roles. -func AutomationConfigHasTheExpectedCustomRoles(mdb *mdbv1.MongoDBCommunity, roles []automationconfig.CustomRole) func(t *testing.T) { +func AutomationConfigHasTheExpectedCustomRoles(ctx context.Context, mdb *mdbv1.MongoDBCommunity, roles []automationconfig.CustomRole) func(t *testing.T) { return func(t *testing.T) { - currentAc := getAutomationConfig(t, mdb) + currentAc := getAutomationConfig(ctx, t, mdb) assert.ElementsMatch(t, roles, currentAc.Roles) } } +func AutomationConfigHasVoteTagPriorityConfigured(ctx context.Context, mdb *mdbv1.MongoDBCommunity, memberOptions []automationconfig.MemberOptions) func(t *testing.T) { + acMemberOptions := make([]automationconfig.MemberOptions, 0) + + return func(t *testing.T) { + currentAc := getAutomationConfig(ctx, t, mdb) + rsMembers := currentAc.ReplicaSets + sort.Slice(rsMembers[0].Members, func(i, j int) bool { + return rsMembers[0].Members[i].Id < rsMembers[0].Members[j].Id + }) + + for _, m := range rsMembers[0].Members { + acMemberOptions = append(acMemberOptions, automationconfig.MemberOptions{Votes: m.Votes, Priority: floatPtrTostringPtr(m.Priority), Tags: m.Tags}) + } + assert.ElementsMatch(t, memberOptions, acMemberOptions) + } +} + // CreateMongoDBResource creates the MongoDB resource -func CreateMongoDBResource(mdb *mdbv1.MongoDBCommunity, ctx *e2eutil.Context) func(*testing.T) { +func CreateMongoDBResource(mdb *mdbv1.MongoDBCommunity, textCtx *e2eutil.TestContext) func(*testing.T) { return func(t *testing.T) { - if err := e2eutil.TestClient.Create(context.TODO(), mdb, &e2eutil.CleanupOptions{TestContext: ctx}); err != nil { + if err := e2eutil.TestClient.Create(textCtx.Ctx, mdb, &e2eutil.CleanupOptions{TestContext: textCtx}); err != nil { t.Fatal(err) } t.Logf("Created MongoDB resource %s/%s", mdb.Name, mdb.Namespace) @@ -427,9 +453,9 @@ func CreateMongoDBResource(mdb *mdbv1.MongoDBCommunity, ctx *e2eutil.Context) fu } // DeleteMongoDBResource deletes the MongoDB resource -func DeleteMongoDBResource(mdb *mdbv1.MongoDBCommunity, ctx *e2eutil.Context) func(*testing.T) { +func DeleteMongoDBResource(mdb *mdbv1.MongoDBCommunity, testCtx *e2eutil.TestContext) func(*testing.T) { return func(t *testing.T) { - if err := e2eutil.TestClient.Delete(context.TODO(), mdb); err != nil { + if err := e2eutil.TestClient.Delete(testCtx.Ctx, mdb); err != nil { t.Fatal(err) } t.Logf("Deleted MongoDB resource %s/%s", mdb.Name, mdb.Namespace) @@ -437,21 +463,21 @@ func DeleteMongoDBResource(mdb *mdbv1.MongoDBCommunity, ctx *e2eutil.Context) fu } // GetConnectionStringSecret returnes the secret generated by the operator that is storing the connection string for a specific user -func GetConnectionStringSecret(mdb mdbv1.MongoDBCommunity, user authtypes.User) corev1.Secret { +func GetConnectionStringSecret(ctx context.Context, mdb mdbv1.MongoDBCommunity, user authtypes.User) corev1.Secret { secret := corev1.Secret{} secretNamespacedName := types.NamespacedName{Name: user.ConnectionStringSecretName, Namespace: mdb.Namespace} - _ = e2eutil.TestClient.Get(context.TODO(), secretNamespacedName, &secret) + _ = e2eutil.TestClient.Get(ctx, secretNamespacedName, &secret) return secret } // GetConnectionStringForUser returns the mongodb standard connection string for a user -func GetConnectionStringForUser(mdb mdbv1.MongoDBCommunity, user authtypes.User) string { - return string(GetConnectionStringSecret(mdb, user).Data["connectionString.standard"]) +func GetConnectionStringForUser(ctx context.Context, mdb mdbv1.MongoDBCommunity, user authtypes.User) string { + return string(GetConnectionStringSecret(ctx, mdb, user).Data["connectionString.standard"]) } // GetSrvConnectionStringForUser returns the mongodb service connection string for a user -func GetSrvConnectionStringForUser(mdb mdbv1.MongoDBCommunity, user authtypes.User) string { - return string(GetConnectionStringSecret(mdb, user).Data["connectionString.standardSrv"]) +func GetSrvConnectionStringForUser(ctx context.Context, mdb mdbv1.MongoDBCommunity, user authtypes.User) string { + return string(GetConnectionStringSecret(ctx, mdb, user).Data["connectionString.standardSrv"]) } func getOwnerReference(mdb *mdbv1.MongoDBCommunity) metav1.OwnerReference { @@ -462,48 +488,47 @@ func getOwnerReference(mdb *mdbv1.MongoDBCommunity) metav1.OwnerReference { }) } -func BasicFunctionality(mdb *mdbv1.MongoDBCommunity, skipStatusCheck ...bool) func(*testing.T) { +func BasicFunctionality(ctx context.Context, mdb *mdbv1.MongoDBCommunity, skipStatusCheck ...bool) func(*testing.T) { return func(t *testing.T) { mdbOwnerReference := getOwnerReference(mdb) - t.Run("Secret Was Correctly Created", AutomationConfigSecretExists(mdb)) - t.Run("Stateful Set Reaches Ready State", StatefulSetBecomesReady(mdb)) - t.Run("MongoDB Reaches Running Phase", MongoDBReachesRunningPhase(mdb)) - t.Run("Stateful Set Has OwnerReference", StatefulSetHasOwnerReference(mdb, mdbOwnerReference)) - t.Run("Service Set Has OwnerReference", ServiceHasOwnerReference(mdb, mdbOwnerReference)) - t.Run("Agent Secrets Have OwnerReference", AgentSecretsHaveOwnerReference(mdb, mdbOwnerReference)) - t.Run("Connection string secrets are configured", ConnectionStringSecretsAreConfigured(mdb, mdbOwnerReference)) + t.Run("Secret Was Correctly Created", AutomationConfigSecretExists(ctx, mdb)) + t.Run("Stateful Set Reaches Ready State", StatefulSetBecomesReady(ctx, mdb)) + t.Run("MongoDB Reaches Running Phase", MongoDBReachesRunningPhase(ctx, mdb)) + t.Run("Stateful Set Has OwnerReference", StatefulSetHasOwnerReference(ctx, mdb, mdbOwnerReference)) + t.Run("Service Set Has OwnerReference", ServiceHasOwnerReference(ctx, mdb, mdbOwnerReference)) + t.Run("Agent Secrets Have OwnerReference", AgentSecretsHaveOwnerReference(ctx, mdb, mdbOwnerReference)) + t.Run("Connection string secrets are configured", ConnectionStringSecretsAreConfigured(ctx, mdb, mdbOwnerReference)) // TODO: this is temporary, remove the need for skipStatuscheck after 0.7.4 operator release if len(skipStatusCheck) > 0 && !skipStatusCheck[0] { - t.Run("Test Status Was Updated", Status(mdb, - mdbv1.MongoDBCommunityStatus{ - MongoURI: mdb.MongoURI(""), - Phase: mdbv1.Running, - Version: mdb.GetMongoDBVersion(), - CurrentMongoDBMembers: mdb.Spec.Members, - CurrentStatefulSetReplicas: mdb.Spec.Members, - })) + t.Run("Test Status Was Updated", Status(ctx, mdb, mdbv1.MongoDBCommunityStatus{ + MongoURI: mdb.MongoURI(""), + Phase: mdbv1.Running, + Version: mdb.GetMongoDBVersion(), + CurrentMongoDBMembers: mdb.Spec.Members, + CurrentStatefulSetReplicas: mdb.Spec.Members, + })) } } } -func BasicFunctionalityX509(mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { +func BasicFunctionalityX509(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { return func(t *testing.T) { mdbOwnerReference := getOwnerReference(mdb) - t.Run("Secret Was Correctly Created", AutomationConfigSecretExists(mdb)) - t.Run("Stateful Set Reaches Ready State", StatefulSetBecomesReady(mdb)) - t.Run("MongoDB Reaches Running Phase", MongoDBReachesRunningPhase(mdb)) - t.Run("Stateful Set Has OwnerReference", StatefulSetHasOwnerReference(mdb, mdbOwnerReference)) - t.Run("Service Set Has OwnerReference", ServiceHasOwnerReference(mdb, mdbOwnerReference)) - t.Run("Connection string secrets are configured", ConnectionStringSecretsAreConfigured(mdb, mdbOwnerReference)) + t.Run("Secret Was Correctly Created", AutomationConfigSecretExists(ctx, mdb)) + t.Run("Stateful Set Reaches Ready State", StatefulSetBecomesReady(ctx, mdb)) + t.Run("MongoDB Reaches Running Phase", MongoDBReachesRunningPhase(ctx, mdb)) + t.Run("Stateful Set Has OwnerReference", StatefulSetHasOwnerReference(ctx, mdb, mdbOwnerReference)) + t.Run("Service Set Has OwnerReference", ServiceHasOwnerReference(ctx, mdb, mdbOwnerReference)) + t.Run("Connection string secrets are configured", ConnectionStringSecretsAreConfigured(ctx, mdb, mdbOwnerReference)) } } // ServiceWithNameExists checks whether a service with the name serviceName exists -func ServiceWithNameExists(serviceName string, namespace string) func(t *testing.T) { +func ServiceWithNameExists(ctx context.Context, serviceName string, namespace string) func(t *testing.T) { return func(t *testing.T) { serviceNamespacedName := types.NamespacedName{Name: serviceName, Namespace: namespace} srv := corev1.Service{} - err := e2eutil.TestClient.Get(context.TODO(), serviceNamespacedName, &srv) + err := e2eutil.TestClient.Get(ctx, serviceNamespacedName, &srv) if err != nil { t.Fatal(err) } @@ -512,10 +537,10 @@ func ServiceWithNameExists(serviceName string, namespace string) func(t *testing } // DeletePod will delete a pod that belongs to this MongoDB resource's StatefulSet -func DeletePod(mdb *mdbv1.MongoDBCommunity, podNum int) func(*testing.T) { +func DeletePod(ctx context.Context, mdb *mdbv1.MongoDBCommunity, podNum int) func(*testing.T) { return func(t *testing.T) { pod := podFromMongoDBCommunity(mdb, podNum) - if err := e2eutil.TestClient.Delete(context.TODO(), &pod); err != nil { + if err := e2eutil.TestClient.Delete(ctx, &pod); err != nil { t.Fatal(err) } @@ -524,7 +549,7 @@ func DeletePod(mdb *mdbv1.MongoDBCommunity, podNum int) func(*testing.T) { } // DeleteStatefulSet provides a wrapper to delete appsv1.StatefulSet types -func DeleteStatefulSet(mdb *mdbv1.MongoDBCommunity) func(*testing.T) { +func DeleteStatefulSet(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { return func(t *testing.T) { sts := appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ @@ -532,7 +557,7 @@ func DeleteStatefulSet(mdb *mdbv1.MongoDBCommunity) func(*testing.T) { Namespace: mdb.Namespace, }, } - if err := e2eutil.TestClient.Delete(context.TODO(), &sts); err != nil { + if err := e2eutil.TestClient.Delete(ctx, &sts); err != nil { t.Fatal(err) } @@ -541,9 +566,9 @@ func DeleteStatefulSet(mdb *mdbv1.MongoDBCommunity) func(*testing.T) { } // Status compares the given status to the actual status of the MongoDB resource -func Status(mdb *mdbv1.MongoDBCommunity, expectedStatus mdbv1.MongoDBCommunityStatus) func(t *testing.T) { +func Status(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedStatus mdbv1.MongoDBCommunityStatus) func(t *testing.T) { return func(t *testing.T) { - if err := e2eutil.TestClient.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, mdb); err != nil { + if err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, mdb); err != nil { t.Fatalf("error getting MongoDB resource: %s", err) } assert.Equal(t, expectedStatus, mdb.Status) @@ -551,10 +576,10 @@ func Status(mdb *mdbv1.MongoDBCommunity, expectedStatus mdbv1.MongoDBCommunitySt } // Scale update the MongoDB with a new number of members and updates the resource. -func Scale(mdb *mdbv1.MongoDBCommunity, newMembers int) func(*testing.T) { +func Scale(ctx context.Context, mdb *mdbv1.MongoDBCommunity, newMembers int) func(*testing.T) { return func(t *testing.T) { t.Logf("Scaling Mongodb %s, to %d members", mdb.Name, newMembers) - err := e2eutil.UpdateMongoDBResource(mdb, func(db *mdbv1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { db.Spec.Members = newMembers }) if err != nil { @@ -564,10 +589,10 @@ func Scale(mdb *mdbv1.MongoDBCommunity, newMembers int) func(*testing.T) { } // ScaleArbiters update the MongoDB with a new number of arbiters and updates the resource. -func ScaleArbiters(mdb *mdbv1.MongoDBCommunity, newArbiters int) func(*testing.T) { +func ScaleArbiters(ctx context.Context, mdb *mdbv1.MongoDBCommunity, newArbiters int) func(*testing.T) { return func(t *testing.T) { t.Logf("Scaling Mongodb %s, to %d members", mdb.Name, newArbiters) - err := e2eutil.UpdateMongoDBResource(mdb, func(db *mdbv1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { db.Spec.Arbiters = newArbiters }) if err != nil { @@ -577,20 +602,20 @@ func ScaleArbiters(mdb *mdbv1.MongoDBCommunity, newArbiters int) func(*testing.T } // DisableTLS changes the tls.enabled attribute to false. -func DisableTLS(mdb *mdbv1.MongoDBCommunity) func(*testing.T) { - return tls(mdb, false) +func DisableTLS(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { + return tls(ctx, mdb, false) } // EnableTLS changes the tls.enabled attribute to true. -func EnableTLS(mdb *mdbv1.MongoDBCommunity) func(*testing.T) { - return tls(mdb, true) +func EnableTLS(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { + return tls(ctx, mdb, true) } // tls function configures the security.tls.enabled attribute. -func tls(mdb *mdbv1.MongoDBCommunity, enabled bool) func(*testing.T) { +func tls(ctx context.Context, mdb *mdbv1.MongoDBCommunity, enabled bool) func(*testing.T) { return func(t *testing.T) { t.Logf("Setting security.tls.enabled to %t", enabled) - err := e2eutil.UpdateMongoDBResource(mdb, func(db *mdbv1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { db.Spec.Security.TLS.Enabled = enabled }) if err != nil { @@ -599,10 +624,10 @@ func tls(mdb *mdbv1.MongoDBCommunity, enabled bool) func(*testing.T) { } } -func ChangeVersion(mdb *mdbv1.MongoDBCommunity, newVersion string) func(*testing.T) { +func ChangeVersion(ctx context.Context, mdb *mdbv1.MongoDBCommunity, newVersion string) func(*testing.T) { return func(t *testing.T) { t.Logf("Changing versions from: %s to %s", mdb.Spec.Version, newVersion) - err := e2eutil.UpdateMongoDBResource(mdb, func(db *mdbv1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { db.Spec.Version = newVersion }) if err != nil { @@ -611,10 +636,10 @@ func ChangeVersion(mdb *mdbv1.MongoDBCommunity, newVersion string) func(*testing } } -func ChangePort(mdb *mdbv1.MongoDBCommunity, newPort int) func(*testing.T) { +func ChangePort(ctx context.Context, mdb *mdbv1.MongoDBCommunity, newPort int) func(*testing.T) { return func(t *testing.T) { t.Logf("Changing port from: %d to %d", mdb.GetMongodConfiguration().GetDBPort(), newPort) - err := e2eutil.UpdateMongoDBResource(mdb, func(db *mdbv1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { db.Spec.AdditionalMongodConfig.SetDBPort(newPort) }) if err != nil { @@ -623,10 +648,10 @@ func ChangePort(mdb *mdbv1.MongoDBCommunity, newPort int) func(*testing.T) { } } -func AddConnectionStringOption(mdb *mdbv1.MongoDBCommunity, key string, value interface{}) func(t *testing.T) { +func AddConnectionStringOption(ctx context.Context, mdb *mdbv1.MongoDBCommunity, key string, value interface{}) func(t *testing.T) { return func(t *testing.T) { t.Logf("Adding %s:%v to connection string", key, value) - err := e2eutil.UpdateMongoDBResource(mdb, func(db *mdbv1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { db.Spec.AdditionalConnectionStringConfig.SetOption(key, value) }) if err != nil { @@ -635,9 +660,9 @@ func AddConnectionStringOption(mdb *mdbv1.MongoDBCommunity, key string, value in } } -func ResetConnectionStringOptions(mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { +func ResetConnectionStringOptions(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { return func(t *testing.T) { - err := e2eutil.UpdateMongoDBResource(mdb, func(db *mdbv1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { db.Spec.AdditionalConnectionStringConfig = mdbv1.NewMapWrapper() db.Spec.Users[0].AdditionalConnectionStringConfig = mdbv1.NewMapWrapper() }) @@ -647,10 +672,10 @@ func ResetConnectionStringOptions(mdb *mdbv1.MongoDBCommunity) func(t *testing.T } } -func AddConnectionStringOptionToUser(mdb *mdbv1.MongoDBCommunity, key string, value interface{}) func(t *testing.T) { +func AddConnectionStringOptionToUser(ctx context.Context, mdb *mdbv1.MongoDBCommunity, key string, value interface{}) func(t *testing.T) { return func(t *testing.T) { t.Logf("Adding %s:%v to connection string to first user", key, value) - err := e2eutil.UpdateMongoDBResource(mdb, func(db *mdbv1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { db.Spec.Users[0].AdditionalConnectionStringConfig.SetOption(key, value) }) if err != nil { @@ -659,10 +684,10 @@ func AddConnectionStringOptionToUser(mdb *mdbv1.MongoDBCommunity, key string, va } } -func StatefulSetContainerConditionIsTrue(mdb *mdbv1.MongoDBCommunity, containerName string, condition func(c corev1.Container) bool) func(*testing.T) { +func StatefulSetContainerConditionIsTrue(ctx context.Context, mdb *mdbv1.MongoDBCommunity, containerName string, condition func(c corev1.Container) bool) func(*testing.T) { return func(t *testing.T) { sts := appsv1.StatefulSet{} - err := e2eutil.TestClient.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) if err != nil { t.Fatal(err) } @@ -678,10 +703,10 @@ func StatefulSetContainerConditionIsTrue(mdb *mdbv1.MongoDBCommunity, containerN } } -func StatefulSetConditionIsTrue(mdb *mdbv1.MongoDBCommunity, condition func(s appsv1.StatefulSet) bool) func(*testing.T) { +func StatefulSetConditionIsTrue(ctx context.Context, mdb *mdbv1.MongoDBCommunity, condition func(s appsv1.StatefulSet) bool) func(*testing.T) { return func(t *testing.T) { sts := appsv1.StatefulSet{} - err := e2eutil.TestClient.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) if err != nil { t.Fatal(err) } @@ -693,33 +718,33 @@ func StatefulSetConditionIsTrue(mdb *mdbv1.MongoDBCommunity, condition func(s ap } // PodContainerBecomesNotReady waits until the container with 'containerName' in the pod #podNum becomes not ready. -func PodContainerBecomesNotReady(mdb *mdbv1.MongoDBCommunity, podNum int, containerName string) func(*testing.T) { +func PodContainerBecomesNotReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, podNum int, containerName string) func(*testing.T) { return func(t *testing.T) { pod := podFromMongoDBCommunity(mdb, podNum) - assert.NoError(t, wait.ForPodReadiness(t, false, containerName, time.Minute*10, pod)) + assert.NoError(t, wait.ForPodReadiness(ctx, t, false, containerName, time.Minute*10, pod)) } } // PodContainerBecomesReady waits until the container with 'containerName' in the pod #podNum becomes ready. -func PodContainerBecomesReady(mdb *mdbv1.MongoDBCommunity, podNum int, containerName string) func(*testing.T) { +func PodContainerBecomesReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, podNum int, containerName string) func(*testing.T) { return func(t *testing.T) { pod := podFromMongoDBCommunity(mdb, podNum) - assert.NoError(t, wait.ForPodReadiness(t, true, containerName, time.Minute*3, pod)) + assert.NoError(t, wait.ForPodReadiness(ctx, t, true, containerName, time.Minute*3, pod)) } } -func ExecInContainer(mdb *mdbv1.MongoDBCommunity, podNum int, containerName, command string) func(*testing.T) { +func ExecInContainer(ctx context.Context, mdb *mdbv1.MongoDBCommunity, podNum int, containerName, command string) func(*testing.T) { return func(t *testing.T) { pod := podFromMongoDBCommunity(mdb, podNum) - _, err := e2eutil.TestClient.Execute(pod, containerName, command) + _, err := e2eutil.TestClient.Execute(ctx, pod, containerName, command) assert.NoError(t, err) } } // StatefulSetMessageIsReceived waits (up to 5 minutes) to get desiredMessageStatus as a mongodb message status or returns a fatal error. -func StatefulSetMessageIsReceived(mdb *mdbv1.MongoDBCommunity, ctx *e2eutil.Context, desiredMessageStatus string) func(t *testing.T) { +func StatefulSetMessageIsReceived(mdb *mdbv1.MongoDBCommunity, testCtx *e2eutil.TestContext, desiredMessageStatus string) func(t *testing.T) { return func(t *testing.T) { - err := wait.ForMongoDBMessageStatus(t, mdb, time.Second*15, time.Minute*5, desiredMessageStatus) + err := wait.ForMongoDBMessageStatus(testCtx.Ctx, t, mdb, time.Second*15, time.Minute*5, desiredMessageStatus) if err != nil { t.Fatal(err) } @@ -744,3 +769,65 @@ func assertEqualOwnerReference(t *testing.T, resourceType string, resourceNamesp assert.Equal(t, expectedOwnerReference.Name, ownerReferences[0].Name) assert.Equal(t, expectedOwnerReference.UID, ownerReferences[0].UID) } + +func RemoveLastUserFromMongoDBCommunity(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { + return func(t *testing.T) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.Users = db.Spec.Users[:len(db.Spec.Users)-1] + }) + + if err != nil { + t.Fatal(err) + } + } +} + +func EditConnectionStringSecretNameOfLastUser(ctx context.Context, mdb *mdbv1.MongoDBCommunity, newSecretName string) func(*testing.T) { + return func(t *testing.T) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.Users[len(db.Spec.Users)-1].ConnectionStringSecretName = newSecretName + }) + + if err != nil { + t.Fatal(err) + } + } +} + +func ConnectionStringSecretIsCleanedUp(ctx context.Context, mdb *mdbv1.MongoDBCommunity, removedConnectionString string) func(t *testing.T) { + return func(t *testing.T) { + connectionStringSecret := corev1.Secret{} + newErr := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: removedConnectionString, Namespace: mdb.Namespace}, &connectionStringSecret) + + assert.EqualError(t, newErr, fmt.Sprintf("secrets \"%s\" not found", removedConnectionString)) + } +} + +func AuthUsersDeletedIsUpdated(ctx context.Context, mdb *mdbv1.MongoDBCommunity, mdbUser mdbv1.MongoDBUser) func(t *testing.T) { + return func(t *testing.T) { + deletedUser := automationconfig.DeletedUser{User: mdbUser.Name, Dbs: []string{mdbUser.DB}} + + currentAc := getAutomationConfig(ctx, t, mdb) + + assert.Contains(t, currentAc.Auth.UsersDeleted, deletedUser) + } +} + +func AddUserToMongoDBCommunity(ctx context.Context, mdb *mdbv1.MongoDBCommunity, newUser mdbv1.MongoDBUser) func(t *testing.T) { + return func(t *testing.T) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.Users = append(db.Spec.Users, newUser) + }) + if err != nil { + t.Fatal(err) + } + } +} + +func floatPtrTostringPtr(floatPtr *float32) *string { + if floatPtr != nil { + stringValue := fmt.Sprintf("%.1f", *floatPtr) + return &stringValue + } + return nil +} diff --git a/test/e2e/prometheus/prometheus_test.go b/test/e2e/prometheus/prometheus_test.go index 39e355019..809b9ca9c 100644 --- a/test/e2e/prometheus/prometheus_test.go +++ b/test/e2e/prometheus/prometheus_test.go @@ -1,6 +1,7 @@ package prometheus import ( + "context" "fmt" "os" "testing" @@ -10,7 +11,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" "github.com/stretchr/testify/assert" ) @@ -23,44 +24,45 @@ func TestMain(m *testing.M) { } func TestPrometheus(t *testing.T) { + ctx := context.Background() resourceName := "mdb0" - ctx, testConfig := setup.SetupWithTLS(t, resourceName) - defer ctx.Teardown() + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, resourceName, testConfig.Namespace) + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) - mdb.Spec.Prometheus = e2eutil.NewPrometheusConfig(mdb.Namespace) + mdb.Spec.Prometheus = e2eutil.NewPrometheusConfig(ctx, mdb.Namespace) - _, err := setup.GeneratePasswordForUser(ctx, user, testConfig.Namespace) + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) mongodbtests.SkipTestIfLocal(t, "Ensure MongoDB with Prometheus configuration", func(t *testing.T) { - t.Run("Resource has TLS Mode", tester.HasTlsMode("requireSSL", 60, WithTls(mdb))) - t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds(WithTls(mdb))) + t.Run("Resource has TLS Mode", tester.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb))) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) t.Run("Test Prometheus endpoint is active", tester.PrometheusEndpointIsReachable("prom-user", "prom-password", false)) - t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3, WithTls(mdb))) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3, WithTls(ctx, mdb))) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) t.Run("Enabling HTTPS on the Prometheus endpoint", func(t *testing.T) { - err = e2eutil.UpdateMongoDBResource(&mdb, func(mdb *v1.MongoDBCommunity) { + err = e2eutil.UpdateMongoDBResource(ctx, &mdb, func(mdb *v1.MongoDBCommunity) { mdb.Spec.Prometheus.TLSSecretRef.Name = "tls-certificate" }) assert.NoError(t, err) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) t.Run("Test Prometheus HTTPS endpoint is active", tester.PrometheusEndpointIsReachable("prom-user", "prom-password", true)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 2)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 2)) }) }) } diff --git a/test/e2e/replica_set/replica_set_test.go b/test/e2e/replica_set/replica_set_test.go index 436c1952a..4dfa5327f 100644 --- a/test/e2e/replica_set/replica_set_test.go +++ b/test/e2e/replica_set/replica_set_test.go @@ -1,10 +1,12 @@ package replica_set import ( + "context" "fmt" "os" "testing" + v1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" @@ -20,14 +22,18 @@ func TestMain(m *testing.M) { os.Exit(code) } +func intPtr(x int) *int { return &x } +func strPtr(s string) *string { return &s } + func TestReplicaSet(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") scramUser := mdb.GetAuthUsers()[0] - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } @@ -54,21 +60,50 @@ func TestReplicaSet(t *testing.T) { mdb.Spec.AgentConfiguration.LogRotate = &lcr mdb.Spec.AgentConfiguration.SystemLog = &systemLog - tester, err := FromResource(t, mdb) + // config member options + memberOptions := []automationconfig.MemberOptions{ + { + Votes: intPtr(1), + Tags: map[string]string{"foo1": "bar1"}, + Priority: strPtr("1.5"), + }, + { + Votes: intPtr(1), + Tags: map[string]string{"foo2": "bar2"}, + Priority: strPtr("1.0"), + }, + { + Votes: intPtr(1), + Tags: map[string]string{"foo3": "bar3"}, + Priority: strPtr("2.5"), + }, + } + mdb.Spec.MemberConfig = memberOptions + + settings := map[string]interface{}{ + "electionTimeoutMillis": float64(20), + } + mdb.Spec.AutomationConfigOverride = &v1.AutomationConfigOverride{ + ReplicaSet: v1.OverrideReplicaSet{Settings: v1.MapWrapper{Object: settings}}, + } + + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) - t.Run("AutomationConfig has the correct logRotateConfig", mongodbtests.AutomationConfigHasLogRotationConfig(&mdb, &lcr)) + t.Run("AutomationConfig has the correct logRotateConfig", mongodbtests.AutomationConfigHasLogRotationConfig(ctx, &mdb, &lcr)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) t.Run("Test Basic Connectivity with generated connection string secret", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Test SRV Connectivity with generated connection string secret", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("AutomationConfig has correct member options", mongodbtests.AutomationConfigHasVoteTagPriorityConfigured(ctx, &mdb, memberOptions)) + t.Run("AutomationConfig has correct settings", mongodbtests.AutomationConfigHasSettings(ctx, &mdb, settings)) } diff --git a/test/e2e/replica_set_arbiter/replica_set_arbiter_test.go b/test/e2e/replica_set_arbiter/replica_set_arbiter_test.go index d8d0057c3..0906dd900 100644 --- a/test/e2e/replica_set_arbiter/replica_set_arbiter_test.go +++ b/test/e2e/replica_set_arbiter/replica_set_arbiter_test.go @@ -1,13 +1,14 @@ package replica_set import ( + "context" "fmt" "os" "testing" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" "github.com/stretchr/testify/assert" ) @@ -25,8 +26,9 @@ func Test(t *testing.T) { } func TestReplicaSetArbiter(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() type args struct { numberOfArbiters int @@ -67,44 +69,44 @@ func TestReplicaSetArbiter(t *testing.T) { resourceName: "mdb4", }, } - for testName, _ := range tests { + for testName := range tests { t.Run(testName, func(t *testing.T) { - testConfig, _ := tests[testName] - mdb, user := e2eutil.NewTestMongoDB(ctx, testConfig.resourceName, "") + testConfig := tests[testName] + mdb, user := e2eutil.NewTestMongoDB(testCtx, testConfig.resourceName, "") mdb.Spec.Arbiters = testConfig.numberOfArbiters mdb.Spec.Members = testConfig.numberOfMembers // FIXME: This behavior has been changed in 6.x timeline and now the arbiter (nor the RS) can't reach the goal state. mdb.Spec.Version = "4.4.19" - pwd, err := setup.GeneratePasswordForUser(ctx, user, "") + pwd, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) if len(testConfig.expectedErrorMessage) > 0 { - t.Run("Check status", mongodbtests.StatefulSetMessageIsReceived(&mdb, ctx, testConfig.expectedErrorMessage)) + t.Run("Check status", mongodbtests.StatefulSetMessageIsReceived(&mdb, testCtx, testConfig.expectedErrorMessage)) } else { - t.Run("Check that the stateful set becomes ready", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("Check the number of arbiters", mongodbtests.AutomationConfigReplicaSetsHaveExpectedArbiters(&mdb, testConfig.numberOfArbiters)) + t.Run("Check that the stateful set becomes ready", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Check the number of arbiters", mongodbtests.AutomationConfigReplicaSetsHaveExpectedArbiters(ctx, &mdb, testConfig.numberOfArbiters)) if testConfig.numberOfArbiters != testConfig.scaleArbitersTo { - t.Run(fmt.Sprintf("Scale Arbiters to %v", testConfig.scaleArbitersTo), mongodbtests.ScaleArbiters(&mdb, testConfig.scaleArbitersTo)) - t.Run("Arbiters Stateful Set Scaled Correctly", mongodbtests.ArbitersStatefulSetBecomesReady(&mdb)) + t.Run(fmt.Sprintf("Scale Arbiters to %v", testConfig.scaleArbitersTo), mongodbtests.ScaleArbiters(ctx, &mdb, testConfig.scaleArbitersTo)) + t.Run("Arbiters Stateful Set Scaled Correctly", mongodbtests.ArbitersStatefulSetBecomesReady(ctx, &mdb)) } - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) t.Run("Test SRV Connectivity with generated connection string secret", func(t *testing.T) { - tester, err := mongotester.FromResource(t, mdb) + tester, err := mongotester.FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } scramUser := mdb.GetAuthUsers()[0] expectedCnxStr := fmt.Sprintf("mongodb+srv://%s-user:%s@%s-svc.%s.svc.cluster.local/admin?replicaSet=%s&ssl=false", mdb.Name, pwd, mdb.Name, mdb.Namespace, mdb.Name) - cnxStrSrv := mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser) + cnxStrSrv := mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser) assert.Equal(t, expectedCnxStr, cnxStrSrv) tester.ConnectivitySucceeds(mongotester.WithURI(cnxStrSrv)) }) } - t.Run("Delete MongoDB Resource", mongodbtests.DeleteMongoDBResource(&mdb, ctx)) + t.Run("Delete MongoDB Resource", mongodbtests.DeleteMongoDBResource(&mdb, testCtx)) }) } } diff --git a/test/e2e/replica_set_authentication/replica_set_authentication_test.go b/test/e2e/replica_set_authentication/replica_set_authentication_test.go index 3f20a0c93..38dbcd962 100644 --- a/test/e2e/replica_set_authentication/replica_set_authentication_test.go +++ b/test/e2e/replica_set_authentication/replica_set_authentication_test.go @@ -1,6 +1,7 @@ package replica_set_authentication import ( + "context" "fmt" "os" "testing" @@ -11,7 +12,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -23,22 +24,23 @@ func TestMain(m *testing.M) { } func TestReplicaSetAuthentication(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") - pw, err := setup.GeneratePasswordForUser(ctx, user, "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + pw, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) // Run all the possible configuration using sha256 or sha1 - t.Run("Auth test with SHA-256", testConfigAuthentication(mdb, user, pw)) - t.Run("Auth test with SHA-256 and SHA-1", testConfigAuthentication(mdb, user, pw, withSha1())) - t.Run("Auth test with SHA-256 (using label)", testConfigAuthentication(mdb, user, pw, withLabeledSha256())) - t.Run("Auth test with SHA-256 (using label) and SHA-1", testConfigAuthentication(mdb, user, pw, withSha1(), withLabeledSha256())) - t.Run("Auth test with SHA-1", testConfigAuthentication(mdb, user, pw, withSha1(), withoutSha256())) + t.Run("Auth test with SHA-256", testConfigAuthentication(ctx, mdb, user, pw)) + t.Run("Auth test with SHA-256 and SHA-1", testConfigAuthentication(ctx, mdb, user, pw, withSha1())) + t.Run("Auth test with SHA-256 (using label)", testConfigAuthentication(ctx, mdb, user, pw, withLabeledSha256())) + t.Run("Auth test with SHA-256 (using label) and SHA-1", testConfigAuthentication(ctx, mdb, user, pw, withSha1(), withLabeledSha256())) + t.Run("Auth test with SHA-1", testConfigAuthentication(ctx, mdb, user, pw, withSha1(), withoutSha256())) } type authOptions struct { @@ -63,7 +65,7 @@ func withSha1() func(*authOptions) { } // testConfigAuthentication run the tests using the auth options to update mdb and then checks that the resources are correctly configured -func testConfigAuthentication(mdb mdbv1.MongoDBCommunity, user mdbv1.MongoDBUser, pw string, allOptions ...func(*authOptions)) func(t *testing.T) { +func testConfigAuthentication(ctx context.Context, mdb mdbv1.MongoDBCommunity, user mdbv1.MongoDBUser, pw string, allOptions ...func(*authOptions)) func(t *testing.T) { return func(t *testing.T) { pickedOpts := authOptions{ @@ -92,19 +94,19 @@ func testConfigAuthentication(mdb mdbv1.MongoDBCommunity, user mdbv1.MongoDBUser } } - err := e2eutil.UpdateMongoDBResource(&mdb, func(db *mdbv1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, &mdb, func(db *mdbv1.MongoDBCommunity) { db.Spec.Security.Authentication.Modes = acceptedModes }) if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) if pickedOpts.sha256 { t.Run("Test Basic Connectivity with accepted auth", tester.ConnectivitySucceeds(WithScramWithAuth(user.Name, pw, "SCRAM-SHA-256"))) } else { diff --git a/test/e2e/replica_set_change_version/replica_set_change_version_test.go b/test/e2e/replica_set_change_version/replica_set_change_version_test.go index 0016ec4e0..4d022f9d7 100644 --- a/test/e2e/replica_set_change_version/replica_set_change_version_test.go +++ b/test/e2e/replica_set_change_version/replica_set_change_version_test.go @@ -1,6 +1,7 @@ package replica_set import ( + "context" "fmt" "os" "testing" @@ -10,7 +11,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" appsv1 "k8s.io/api/apps/v1" ) @@ -24,50 +25,51 @@ func TestMain(m *testing.M) { } func TestReplicaSetUpgradeVersion(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() const initialMDBVersion = "4.4.18" const upgradedMDBVersion = "5.0.12" const upgradedWithIncreasedPatchMDBVersion = "5.0.15" - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") mdb.Spec.Version = initialMDBVersion - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) // Upgrade minor version to upgradedMDBVersion t.Run("MongoDB is reachable while minor version is upgraded", func(t *testing.T) { defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() - t.Run("Test Minor Version can be upgraded", mongodbtests.ChangeVersion(&mdb, upgradedMDBVersion)) - t.Run("StatefulSet has OnDelete update strategy", mongodbtests.StatefulSetHasUpdateStrategy(&mdb, appsv1.OnDeleteStatefulSetStrategyType)) - t.Run("Stateful Set Reaches Ready State, after Upgrading", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 2)) + t.Run("Test Minor Version can be upgraded", mongodbtests.ChangeVersion(ctx, &mdb, upgradedMDBVersion)) + t.Run("StatefulSet has OnDelete update strategy", mongodbtests.StatefulSetHasUpdateStrategy(ctx, &mdb, appsv1.OnDeleteStatefulSetStrategyType)) + t.Run("Stateful Set Reaches Ready State, after Upgrading", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 2)) }) - t.Run("StatefulSet has RollingUpgrade restart strategy", mongodbtests.StatefulSetHasUpdateStrategy(&mdb, appsv1.RollingUpdateStatefulSetStrategyType)) + t.Run("StatefulSet has RollingUpgrade restart strategy", mongodbtests.StatefulSetHasUpdateStrategy(ctx, &mdb, appsv1.RollingUpdateStatefulSetStrategyType)) // Upgrade patch version to upgradedWithIncreasedPatchMDBVersion t.Run("MongoDB is reachable while patch version is upgraded", func(t *testing.T) { defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() - t.Run("Test Patch Version can be upgraded", mongodbtests.ChangeVersion(&mdb, upgradedWithIncreasedPatchMDBVersion)) - t.Run("StatefulSet has OnDelete restart strategy", mongodbtests.StatefulSetHasUpdateStrategy(&mdb, appsv1.OnDeleteStatefulSetStrategyType)) - t.Run("Stateful Set Reaches Ready State, after upgrading", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 3)) + t.Run("Test Patch Version can be upgraded", mongodbtests.ChangeVersion(ctx, &mdb, upgradedWithIncreasedPatchMDBVersion)) + t.Run("StatefulSet has OnDelete restart strategy", mongodbtests.StatefulSetHasUpdateStrategy(ctx, &mdb, appsv1.OnDeleteStatefulSetStrategyType)) + t.Run("Stateful Set Reaches Ready State, after upgrading", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 3)) }) - t.Run("StatefulSet has RollingUpgrade restart strategy", mongodbtests.StatefulSetHasUpdateStrategy(&mdb, appsv1.RollingUpdateStatefulSetStrategyType)) + t.Run("StatefulSet has RollingUpgrade restart strategy", mongodbtests.StatefulSetHasUpdateStrategy(ctx, &mdb, appsv1.RollingUpdateStatefulSetStrategyType)) } diff --git a/test/e2e/replica_set_connection_string_options/replica_set_connection_string_options_test.go b/test/e2e/replica_set_connection_string_options/replica_set_connection_string_options_test.go index 878509f81..6358f9d3a 100644 --- a/test/e2e/replica_set_connection_string_options/replica_set_connection_string_options_test.go +++ b/test/e2e/replica_set_connection_string_options/replica_set_connection_string_options_test.go @@ -1,6 +1,7 @@ package replica_set_connection_string_options import ( + "context" "fmt" "os" "testing" @@ -9,7 +10,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -21,89 +22,90 @@ func TestMain(m *testing.M) { } func TestReplicaSetWithConnectionString(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") scramUser := mdb.GetAuthUsers()[0] - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) /** User options only. */ t.Run("Connection String With User Options Only", func(t *testing.T) { - t.Run("Test Add New Connection String Option to User", mongodbtests.AddConnectionStringOptionToUser(&mdb, "readPreference", "primary")) - t.Run("Test Secrets Are Updated", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("Test Add New Connection String Option to User", mongodbtests.AddConnectionStringOptionToUser(ctx, &mdb, "readPreference", "primary")) + t.Run("Test Secrets Are Updated", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) scramUser = mdb.GetAuthUsers()[0] t.Run("Test Basic Connectivity With User Options", tester.ConnectivitySucceeds()) t.Run("Test SRV Connectivity With User Options", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) t.Run("Test Basic Connectivity with generated connection string secret with user options", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Test SRV Connectivity with generated connection string secret with user options", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) }) /** General options only. */ t.Run("Connection String With General Options Only", func(t *testing.T) { - t.Run("Resetting Connection String Options", mongodbtests.ResetConnectionStringOptions(&mdb)) - t.Run("Test Add New Connection String Option to Resource", mongodbtests.AddConnectionStringOption(&mdb, "readPreference", "primary")) - t.Run("Test Secrets Are Updated", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("Resetting Connection String Options", mongodbtests.ResetConnectionStringOptions(ctx, &mdb)) + t.Run("Test Add New Connection String Option to Resource", mongodbtests.AddConnectionStringOption(ctx, &mdb, "readPreference", "primary")) + t.Run("Test Secrets Are Updated", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) scramUser = mdb.GetAuthUsers()[0] t.Run("Test Basic Connectivity With Resource Options", tester.ConnectivitySucceeds()) t.Run("Test SRV Connectivity With Resource Options", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) t.Run("Test Basic Connectivity with generated connection string secret with resource options", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Test SRV Connectivity with generated connection string secret with resource options", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) }) /** Overwritten options. */ t.Run("Connection String With Overwritten Options", func(t *testing.T) { - t.Run("Test Add New Connection String Option to Resource", mongodbtests.AddConnectionStringOption(&mdb, "readPreference", "primary")) - t.Run("Test Add New Connection String Option to User", mongodbtests.AddConnectionStringOptionToUser(&mdb, "readPreference", "secondary")) - t.Run("Test Secrets Are Updated", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("Test Add New Connection String Option to Resource", mongodbtests.AddConnectionStringOption(ctx, &mdb, "readPreference", "primary")) + t.Run("Test Add New Connection String Option to User", mongodbtests.AddConnectionStringOptionToUser(ctx, &mdb, "readPreference", "secondary")) + t.Run("Test Secrets Are Updated", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) scramUser = mdb.GetAuthUsers()[0] t.Run("Test Basic Connectivity With Overwritten Options", tester.ConnectivitySucceeds()) t.Run("Test SRV Connectivity With Overwritten Options", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) t.Run("Test Basic Connectivity with generated connection string secret with overwritten options", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Test SRV Connectivity with generated connection string secret with overwritten options", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) }) /** Wrong options. */ t.Run("Connection String With Wrong Options", func(t *testing.T) { - t.Run("Resetting Connection String Options", mongodbtests.ResetConnectionStringOptions(&mdb)) - t.Run("Test Add New Connection String Option to Resource", mongodbtests.AddConnectionStringOption(&mdb, "readPreference", "wrong")) - t.Run("Test Secrets Are Updated", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("Resetting Connection String Options", mongodbtests.ResetConnectionStringOptions(ctx, &mdb)) + t.Run("Test Add New Connection String Option to Resource", mongodbtests.AddConnectionStringOption(ctx, &mdb, "readPreference", "wrong")) + t.Run("Test Secrets Are Updated", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) scramUser = mdb.GetAuthUsers()[0] - t.Run("Test Basic Connectivity", tester.ConnectivityRejected(WithURI(mdb.MongoURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) - t.Run("Test SRV Connectivity", tester.ConnectivityRejected(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) + t.Run("Test Basic Connectivity", tester.ConnectivityRejected(ctx, WithURI(mdb.MongoURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) + t.Run("Test SRV Connectivity", tester.ConnectivityRejected(ctx, WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) t.Run("Test Basic Connectivity with generated connection string secret", - tester.ConnectivityRejected(WithURI(mongodbtests.GetConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivityRejected(ctx, WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Test SRV Connectivity with generated connection string secret", - tester.ConnectivityRejected(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivityRejected(ctx, WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) }) } diff --git a/test/e2e/replica_set_cross_namespace_deploy/replica_set_cross_namespace_deploy_test.go b/test/e2e/replica_set_cross_namespace_deploy/replica_set_cross_namespace_deploy_test.go index afd961ee1..2bd41ed86 100644 --- a/test/e2e/replica_set_cross_namespace_deploy/replica_set_cross_namespace_deploy_test.go +++ b/test/e2e/replica_set_cross_namespace_deploy/replica_set_cross_namespace_deploy_test.go @@ -15,7 +15,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -27,8 +27,9 @@ func TestMain(m *testing.M) { } func TestCrossNamespaceDeploy(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() postfix, err := generate.RandomValidDNS1123Label(5) if err != nil { @@ -36,40 +37,40 @@ func TestCrossNamespaceDeploy(t *testing.T) { } namespace := fmt.Sprintf("clusterwide-test-%s", postfix) - err = e2eutil.EnsureNamespace(ctx, namespace) + err = e2eutil.EnsureNamespace(testCtx, namespace) if err != nil { t.Fatal(err) } - if err := createDatabaseServiceAccountRoleAndRoleBinding(t, namespace); err != nil { + if err := createDatabaseServiceAccountRoleAndRoleBinding(ctx, t, namespace); err != nil { t.Fatal(err) } - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", namespace) + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", namespace) - _, err = setup.GeneratePasswordForUser(ctx, user, namespace) + _, err = setup.GeneratePasswordForUser(testCtx, user, namespace) if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) } // createDatabaseServiceAccountRoleAndRoleBinding creates the ServiceAccount, Role and RoleBinding required // for the database StatefulSet in the other namespace. -func createDatabaseServiceAccountRoleAndRoleBinding(t *testing.T, namespace string) error { +func createDatabaseServiceAccountRoleAndRoleBinding(ctx context.Context, t *testing.T, namespace string) error { sa := corev1.ServiceAccount{} - err := e2eutil.TestClient.Get(context.TODO(), types.NamespacedName{Name: "mongodb-database", Namespace: e2eutil.OperatorNamespace}, &sa) + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: "mongodb-database", Namespace: e2eutil.OperatorNamespace}, &sa) if err != nil { t.Fatal(err) } @@ -77,13 +78,13 @@ func createDatabaseServiceAccountRoleAndRoleBinding(t *testing.T, namespace stri sa.Namespace = namespace sa.ObjectMeta.ResourceVersion = "" - err = e2eutil.TestClient.Create(context.TODO(), &sa, &e2eutil.CleanupOptions{}) + err = e2eutil.TestClient.Create(ctx, &sa, &e2eutil.CleanupOptions{}) if err != nil { t.Fatal(err) } role := rbacv1.Role{} - err = e2eutil.TestClient.Get(context.TODO(), types.NamespacedName{Name: "mongodb-database", Namespace: e2eutil.OperatorNamespace}, &role) + err = e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: "mongodb-database", Namespace: e2eutil.OperatorNamespace}, &role) if err != nil { t.Fatal(err) } @@ -91,13 +92,13 @@ func createDatabaseServiceAccountRoleAndRoleBinding(t *testing.T, namespace stri role.Namespace = namespace role.ObjectMeta.ResourceVersion = "" - err = e2eutil.TestClient.Create(context.TODO(), &role, &e2eutil.CleanupOptions{}) + err = e2eutil.TestClient.Create(ctx, &role, &e2eutil.CleanupOptions{}) if err != nil { t.Fatal(err) } rolebinding := rbacv1.RoleBinding{} - err = e2eutil.TestClient.Get(context.TODO(), types.NamespacedName{Name: "mongodb-database", Namespace: e2eutil.OperatorNamespace}, &rolebinding) + err = e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: "mongodb-database", Namespace: e2eutil.OperatorNamespace}, &rolebinding) if err != nil { t.Fatal(err) } @@ -105,7 +106,7 @@ func createDatabaseServiceAccountRoleAndRoleBinding(t *testing.T, namespace stri rolebinding.Namespace = namespace rolebinding.ObjectMeta.ResourceVersion = "" - err = e2eutil.TestClient.Create(context.TODO(), &rolebinding, &e2eutil.CleanupOptions{}) + err = e2eutil.TestClient.Create(ctx, &rolebinding, &e2eutil.CleanupOptions{}) if err != nil { t.Fatal(err) } diff --git a/test/e2e/replica_set_custom_annotations_test/replica_set_custom_annotations_test.go b/test/e2e/replica_set_custom_annotations_test/replica_set_custom_annotations_test.go index 5f5f5dc84..d92d5db1b 100644 --- a/test/e2e/replica_set_custom_annotations_test/replica_set_custom_annotations_test.go +++ b/test/e2e/replica_set_custom_annotations_test/replica_set_custom_annotations_test.go @@ -1,6 +1,7 @@ package replica_set_custom_annotations_test import ( + "context" "fmt" v1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" @@ -22,10 +23,11 @@ func TestMain(m *testing.M) { } func TestReplicaSetCustomAnnotations(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.Template.ObjectMeta = metav1.ObjectMeta{ Labels: e2eutil.TestLabels(), Annotations: e2eutil.TestAnnotations(), @@ -52,26 +54,26 @@ func TestReplicaSetCustomAnnotations(t *testing.T) { } scramUser := mdb.GetAuthUsers()[0] - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet((mdb.Name)))) t.Run("Test Basic Connectivity with generated connection string secret", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Test SRV Connectivity with generated connection string secret", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) - t.Run("Cluster has the expected labels and annotations", mongodbtests.HasExpectedMetadata(&mdb, e2eutil.TestLabels(), e2eutil.TestAnnotations())) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("Cluster has the expected labels and annotations", mongodbtests.HasExpectedMetadata(ctx, &mdb, e2eutil.TestLabels(), e2eutil.TestAnnotations())) } diff --git a/test/e2e/replica_set_custom_persistent_volume/replica_set_custom_persistent_volume_test.go b/test/e2e/replica_set_custom_persistent_volume/replica_set_custom_persistent_volume_test.go index a814dd29f..db16c5ebe 100644 --- a/test/e2e/replica_set_custom_persistent_volume/replica_set_custom_persistent_volume_test.go +++ b/test/e2e/replica_set_custom_persistent_volume/replica_set_custom_persistent_volume_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -65,7 +65,7 @@ func getPersistentVolumeLocal(name string, localPath string, label string) corev // getVolumes returns two persistentVolumes for each of the `members` pod. // one volume will be for the `data` claim and the other will be for the `logs` claim -func getVolumes(ctx *e2eutil.Context, volumeType string, members int) []corev1.PersistentVolume { +func getVolumes(ctx *e2eutil.TestContext, volumeType string, members int) []corev1.PersistentVolume { volumes := make([]corev1.PersistentVolume, members) for i := 0; i < members; i++ { volumes[i] = getPersistentVolumeLocal( @@ -94,7 +94,7 @@ func getPvc(pvcType string, mdb v1.MongoDBCommunity) corev1.PersistentVolumeClai Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"type": pvcType}, }, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{"storage": *resource.NewScaledQuantity(int64(8), resource.Giga)}, }, StorageClassName: &defaultStorageClass, @@ -103,43 +103,44 @@ func getPvc(pvcType string, mdb v1.MongoDBCommunity) corev1.PersistentVolumeClai } func TestReplicaSetCustomPersistentVolumes(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{ getPvc("data", mdb), getPvc("logs", mdb), } - volumesToCreate := getVolumes(ctx, "data", mdb.Spec.Members) - volumesToCreate = append(volumesToCreate, getVolumes(ctx, "logs", mdb.Spec.Members)...) + volumesToCreate := getVolumes(testCtx, "data", mdb.Spec.Members) + volumesToCreate = append(volumesToCreate, getVolumes(testCtx, "logs", mdb.Spec.Members)...) for i := range volumesToCreate { - err := e2eutil.TestClient.Create(context.TODO(), &volumesToCreate[i], &e2eutil.CleanupOptions{TestContext: ctx}) + err := e2eutil.TestClient.Create(ctx, &volumesToCreate[i], &e2eutil.CleanupOptions{TestContext: testCtx}) assert.NoError(t, err) } scramUser := mdb.GetAuthUsers()[0] - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet((mdb.Name)))) t.Run("Test Basic Connectivity with generated connection string secret", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Test SRV Connectivity with generated connection string secret", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) - t.Run("Cluster has the expected persistent volumes", mongodbtests.HasExpectedPersistentVolumes(volumesToCreate)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("Cluster has the expected persistent volumes", mongodbtests.HasExpectedPersistentVolumes(ctx, volumesToCreate)) } diff --git a/test/e2e/replica_set_custom_role/replica_set_custom_role_test.go b/test/e2e/replica_set_custom_role/replica_set_custom_role_test.go index db120ba4c..54075a71d 100644 --- a/test/e2e/replica_set_custom_role/replica_set_custom_role_test.go +++ b/test/e2e/replica_set_custom_role/replica_set_custom_role_test.go @@ -1,6 +1,7 @@ package replica_set_custom_role import ( + "context" "fmt" "os" "testing" @@ -10,7 +11,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -22,15 +23,16 @@ func TestMain(m *testing.M) { } func TestReplicaSetCustomRole(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() someDB := "test" someCollection := "foo" anyDB := "" anyCollection := "" - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") mdb.Spec.Security.Roles = []mdbv1.CustomRole{ { Role: "testRole", @@ -69,28 +71,41 @@ func TestReplicaSetCustomRole(t *testing.T) { }}, Roles: []mdbv1.Role{}, }, + { + Role: "MongodbAutomationAgentUserRole", + DB: "admin", + Privileges: []mdbv1.Privilege{ + { + Resource: mdbv1.Resource{ + AnyResource: true, + }, + Actions: []string{"bypassDefaultMaxTimeMS"}, + }, + }, + Roles: []mdbv1.Role{}, + }, } - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) // Verify automation config roles and roles created in admin database. roles := mdbv1.ConvertCustomRolesToAutomationConfigCustomRole(mdb.Spec.Security.Roles) - t.Run("AutomationConfig has the correct custom role", mongodbtests.AutomationConfigHasTheExpectedCustomRoles(&mdb, roles)) + t.Run("AutomationConfig has the correct custom role", mongodbtests.AutomationConfigHasTheExpectedCustomRoles(ctx, &mdb, roles)) t.Run("Custom Role was created ", tester.VerifyRoles(roles, 1)) } diff --git a/test/e2e/replica_set_enterprise_upgrade/replica_set_enterprise_upgrade.go b/test/e2e/replica_set_enterprise_upgrade/replica_set_enterprise_upgrade.go index b3c6602e1..ff6930252 100644 --- a/test/e2e/replica_set_enterprise_upgrade/replica_set_enterprise_upgrade.go +++ b/test/e2e/replica_set_enterprise_upgrade/replica_set_enterprise_upgrade.go @@ -1,48 +1,49 @@ package replica_set_enterprise_upgrade import ( + "context" "fmt" "testing" "time" "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" - "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" ) -func DeployEnterpriseAndUpgradeTest(t *testing.T, versionsToBeTested []string) { - t.Setenv(construct.MongodbRepoUrl, "docker.io/mongodb") +func DeployEnterpriseAndUpgradeTest(ctx context.Context, t *testing.T, versionsToBeTested []string) { + t.Setenv(construct.MongodbRepoUrlEnv, "docker.io/mongodb") t.Setenv(construct.MongodbImageEnv, "mongodb-enterprise-server") - ctx := setup.Setup(t) - defer ctx.Teardown() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") mdb.Spec.Version = versionsToBeTested[0] - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := mongotester.FromResource(t, mdb) + tester, err := mongotester.FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) for i := 1; i < len(versionsToBeTested); i++ { t.Run(fmt.Sprintf("Testing upgrade from %s to %s", versionsToBeTested[i-1], versionsToBeTested[i]), func(t *testing.T) { defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() - t.Run(fmt.Sprintf("Upgrading to %s", versionsToBeTested[i]), mongodbtests.ChangeVersion(&mdb, versionsToBeTested[i])) - t.Run("Stateful Set Reaches Ready State, after Upgrading", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, i+1)) + t.Run(fmt.Sprintf("Upgrading to %s", versionsToBeTested[i]), mongodbtests.ChangeVersion(ctx, &mdb, versionsToBeTested[i])) + t.Run("Stateful Set Reaches Ready State, after Upgrading", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, i+1)) }) } } diff --git a/test/e2e/replica_set_enterprise_upgrade_4_5/replica_set_enterprise_upgrade_4_5_test.go b/test/e2e/replica_set_enterprise_upgrade_4_5/replica_set_enterprise_upgrade_4_5_test.go index ff7b19dc7..298829059 100644 --- a/test/e2e/replica_set_enterprise_upgrade_4_5/replica_set_enterprise_upgrade_4_5_test.go +++ b/test/e2e/replica_set_enterprise_upgrade_4_5/replica_set_enterprise_upgrade_4_5_test.go @@ -1,6 +1,7 @@ package replica_set import ( + "context" "fmt" "os" "testing" @@ -23,5 +24,6 @@ func TestMain(m *testing.M) { } func TestReplicaSet(t *testing.T) { - replica_set_enterprise_upgrade.DeployEnterpriseAndUpgradeTest(t, versionsForUpgrades) + ctx := context.Background() + replica_set_enterprise_upgrade.DeployEnterpriseAndUpgradeTest(ctx, t, versionsForUpgrades) } diff --git a/test/e2e/replica_set_enterprise_upgrade_5_6/replica_set_enterprise_upgrade_5_6_test.go b/test/e2e/replica_set_enterprise_upgrade_5_6/replica_set_enterprise_upgrade_5_6_test.go index 5c3ecf509..0e0eedef5 100644 --- a/test/e2e/replica_set_enterprise_upgrade_5_6/replica_set_enterprise_upgrade_5_6_test.go +++ b/test/e2e/replica_set_enterprise_upgrade_5_6/replica_set_enterprise_upgrade_5_6_test.go @@ -1,6 +1,7 @@ package replica_set import ( + "context" "fmt" "os" "testing" @@ -22,5 +23,6 @@ func TestMain(m *testing.M) { } func TestReplicaSet(t *testing.T) { - replica_set_enterprise_upgrade.DeployEnterpriseAndUpgradeTest(t, versionsForUpgrades) + ctx := context.Background() + replica_set_enterprise_upgrade.DeployEnterpriseAndUpgradeTest(ctx, t, versionsForUpgrades) } diff --git a/test/e2e/replica_set_enterprise_upgrade_6_7/replica_set_enterprise_upgrade_5_6_test.go b/test/e2e/replica_set_enterprise_upgrade_6_7/replica_set_enterprise_upgrade_5_6_test.go new file mode 100644 index 000000000..c447ca6c6 --- /dev/null +++ b/test/e2e/replica_set_enterprise_upgrade_6_7/replica_set_enterprise_upgrade_5_6_test.go @@ -0,0 +1,28 @@ +package replica_set + +import ( + "context" + "fmt" + "os" + "testing" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade" +) + +var ( + versionsForUpgrades = []string{"6.0.5", "7.0.2"} +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSet(t *testing.T) { + ctx := context.Background() + replica_set_enterprise_upgrade.DeployEnterpriseAndUpgradeTest(ctx, t, versionsForUpgrades) +} diff --git a/test/e2e/replica_set_enterprise_upgrade_7_8/replica_set_enterprise_upgrade_5_6_test.go b/test/e2e/replica_set_enterprise_upgrade_7_8/replica_set_enterprise_upgrade_5_6_test.go new file mode 100644 index 000000000..00cdf8f10 --- /dev/null +++ b/test/e2e/replica_set_enterprise_upgrade_7_8/replica_set_enterprise_upgrade_5_6_test.go @@ -0,0 +1,28 @@ +package replica_set + +import ( + "context" + "fmt" + "os" + "testing" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade" +) + +var ( + versionsForUpgrades = []string{"7.0.12", "8.0.0"} +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSet(t *testing.T) { + ctx := context.Background() + replica_set_enterprise_upgrade.DeployEnterpriseAndUpgradeTest(ctx, t, versionsForUpgrades) +} diff --git a/test/e2e/replica_set_mongod_config/replica_set_mongod_config_test.go b/test/e2e/replica_set_mongod_config/replica_set_mongod_config_test.go index 6954e85a4..1a009c812 100644 --- a/test/e2e/replica_set_mongod_config/replica_set_mongod_config_test.go +++ b/test/e2e/replica_set_mongod_config/replica_set_mongod_config_test.go @@ -1,6 +1,7 @@ package replica_set_mongod_config import ( + "context" "fmt" "os" "testing" @@ -22,12 +23,13 @@ func TestMain(m *testing.M) { } func TestReplicaSet(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } @@ -53,19 +55,19 @@ func TestReplicaSet(t *testing.T) { mdb.Spec.AdditionalMongodConfig.Object = mongodConfig - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) for i := range settings { t.Run(fmt.Sprintf("Mongod setting %s has been set", settings[i]), tester.EnsureMongodConfig(settings[i], values[i])) } t.Run("Mongod setting net.port has been set", tester.EnsureMongodConfig("net.port", int32(40333))) - t.Run("Service has the correct port", mongodbtests.ServiceUsesCorrectPort(&mdb, 40333)) + t.Run("Service has the correct port", mongodbtests.ServiceUsesCorrectPort(ctx, &mdb, 40333)) } diff --git a/test/e2e/replica_set_mongod_port_change_with_arbiters/replica_set_mongod_port_change_with_arbiters_test.go b/test/e2e/replica_set_mongod_port_change_with_arbiters/replica_set_mongod_port_change_with_arbiters_test.go index 91d6f72de..f398e36fc 100644 --- a/test/e2e/replica_set_mongod_port_change_with_arbiters/replica_set_mongod_port_change_with_arbiters_test.go +++ b/test/e2e/replica_set_mongod_port_change_with_arbiters/replica_set_mongod_port_change_with_arbiters_test.go @@ -1,6 +1,7 @@ package replica_set_mongod_config import ( + "context" "fmt" "os" "testing" @@ -11,7 +12,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -23,28 +24,29 @@ func TestMain(m *testing.M) { } func TestReplicaSetMongodPortChangeWithArbiters(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") // FIXME: This behavior has been changed in 6.x timeline and now the arbiter (nor the RS) can't reach the goal state. mdb.Spec.Version = "4.4.19" scramUser := mdb.GetAuthUsers()[0] - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } connectivityTests := func(t *testing.T) { - fmt.Printf("connectionStringForUser: %s\n", mongodbtests.GetConnectionStringForUser(mdb, scramUser)) + fmt.Printf("connectionStringForUser: %s\n", mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)) t.Run("Test Basic Connectivity with generated connection string secret", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) // FIXME after port change in the service mongodb+srv connection stopped working! //t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) @@ -52,30 +54,30 @@ func TestReplicaSetMongodPortChangeWithArbiters(t *testing.T) { // tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)))) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) t.Run("Mongod setting net.port has been set", tester.EnsureMongodConfig("net.port", int32(automationconfig.DefaultDBPort))) - t.Run("Service has the correct port", mongodbtests.ServiceUsesCorrectPort(&mdb, int32(automationconfig.DefaultDBPort))) - t.Run("Stateful Set becomes ready", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("Wait for MongoDB to finish setup cluster", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("Service has the correct port", mongodbtests.ServiceUsesCorrectPort(ctx, &mdb, int32(automationconfig.DefaultDBPort))) + t.Run("Stateful Set becomes ready", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Wait for MongoDB to finish setup cluster", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) t.Run("Connectivity tests", connectivityTests) - t.Run("Scale to 1 Arbiter", mongodbtests.ScaleArbiters(&mdb, 1)) - t.Run("Wait for MongoDB to start scaling arbiters", mongodbtests.MongoDBReachesPendingPhase(&mdb)) - t.Run("Wait for MongoDB to finish scaling arbiters", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("Automation config has expecter arbiter", mongodbtests.AutomationConfigReplicaSetsHaveExpectedArbiters(&mdb, 1)) - t.Run("Stateful Set becomes ready", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("Arbiters Stateful Set becomes ready", mongodbtests.ArbitersStatefulSetBecomesReady(&mdb)) + t.Run("Scale to 1 Arbiter", mongodbtests.ScaleArbiters(ctx, &mdb, 1)) + t.Run("Wait for MongoDB to start scaling arbiters", mongodbtests.MongoDBReachesPendingPhase(ctx, &mdb)) + t.Run("Wait for MongoDB to finish scaling arbiters", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Automation config has expecter arbiter", mongodbtests.AutomationConfigReplicaSetsHaveExpectedArbiters(ctx, &mdb, 1)) + t.Run("Stateful Set becomes ready", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Arbiters Stateful Set becomes ready", mongodbtests.ArbitersStatefulSetBecomesReady(ctx, &mdb)) t.Run("Connectivity tests", connectivityTests) - t.Run("Change port of running cluster", mongodbtests.ChangePort(&mdb, 40333)) - t.Run("Wait for MongoDB to start changing port", mongodbtests.MongoDBReachesPendingPhase(&mdb)) - t.Run("Wait for MongoDB to finish changing port", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("Stateful Set becomes ready", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("Arbiters Stateful Set becomes ready", mongodbtests.ArbitersStatefulSetBecomesReady(&mdb)) + t.Run("Change port of running cluster", mongodbtests.ChangePort(ctx, &mdb, 40333)) + t.Run("Wait for MongoDB to start changing port", mongodbtests.MongoDBReachesPendingPhase(ctx, &mdb)) + t.Run("Wait for MongoDB to finish changing port", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Stateful Set becomes ready", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Arbiters Stateful Set becomes ready", mongodbtests.ArbitersStatefulSetBecomesReady(ctx, &mdb)) t.Run("Mongod setting net.port has been set", tester.EnsureMongodConfig("net.port", int32(40333))) - t.Run("Service has the correct port", mongodbtests.ServiceUsesCorrectPort(&mdb, int32(40333))) + t.Run("Service has the correct port", mongodbtests.ServiceUsesCorrectPort(ctx, &mdb, int32(40333))) t.Run("Connectivity tests", connectivityTests) } diff --git a/test/e2e/replica_set_mongod_readiness/replica_set_mongod_readiness_test.go b/test/e2e/replica_set_mongod_readiness/replica_set_mongod_readiness_test.go index be8652690..d82837fb9 100644 --- a/test/e2e/replica_set_mongod_readiness/replica_set_mongod_readiness_test.go +++ b/test/e2e/replica_set_mongod_readiness/replica_set_mongod_readiness_test.go @@ -1,13 +1,14 @@ package replica_set_mongod_readiness import ( + "context" "fmt" "os" "testing" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -19,32 +20,33 @@ func TestMain(m *testing.M) { } func TestReplicaSet(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Ensure Agent container is marked as non-ready", func(t *testing.T) { - t.Run("Break mongod data files", mongodbtests.ExecInContainer(&mdb, 0, "mongod", "mkdir /data/tmp; mv /data/WiredTiger.wt /data/tmp")) + t.Run("Break mongod data files", mongodbtests.ExecInContainer(ctx, &mdb, 0, "mongod", "mkdir /data/tmp; mv /data/WiredTiger.wt /data/tmp")) // Just moving the file doesn't fail the mongod until any data is written - the easiest way is to kill the mongod // and in this case it won't restart - t.Run("Kill mongod process", mongodbtests.ExecInContainer(&mdb, 0, "mongod", "kill 1")) + t.Run("Kill mongod process", mongodbtests.ExecInContainer(ctx, &mdb, 0, "mongod", "kill 1")) // CLOUDP-89260: mongod uptime 1 minute and readiness probe failureThreshold 40 (40 * 5 -> 200 seconds) // note, that this may take much longer on evergreen than locally - t.Run("Pod agent container becomes not-ready", mongodbtests.PodContainerBecomesNotReady(&mdb, 0, "mongodb-agent")) + t.Run("Pod agent container becomes not-ready", mongodbtests.PodContainerBecomesNotReady(ctx, &mdb, 0, "mongodb-agent")) }) t.Run("Ensure Agent container gets fixed", func(t *testing.T) { // Note, that we call this command on the 'mongodb-agent' container as the 'mongod' container is down and we cannot // execute shell there. But both containers share the same /data directory so we can do it from any of them. - t.Run("Fix mongod data files", mongodbtests.ExecInContainer(&mdb, 0, "mongodb-agent", "mv /data/tmp/WiredTiger.wt /data/")) + t.Run("Fix mongod data files", mongodbtests.ExecInContainer(ctx, &mdb, 0, "mongodb-agent", "mv /data/tmp/WiredTiger.wt /data/")) // Eventually the agent will start mongod again - t.Run("Pod agent container becomes ready", mongodbtests.PodContainerBecomesReady(&mdb, 0, "mongodb-agent")) + t.Run("Pod agent container becomes ready", mongodbtests.PodContainerBecomesReady(ctx, &mdb, 0, "mongodb-agent")) }) } diff --git a/test/e2e/replica_set_mount_connection_string/replica_set_mount_connection_string_test.go b/test/e2e/replica_set_mount_connection_string/replica_set_mount_connection_string_test.go index 5f5c3bf51..b4a03cbec 100644 --- a/test/e2e/replica_set_mount_connection_string/replica_set_mount_connection_string_test.go +++ b/test/e2e/replica_set_mount_connection_string/replica_set_mount_connection_string_test.go @@ -16,7 +16,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -63,49 +63,50 @@ func createPythonTestPod(idx int, namespace, secretName, secretKey string) corev } func TestMountConnectionString(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") scramUser := mdb.GetAuthUsers()[0] - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet((mdb.Name)))) t.Run("Test Basic Connectivity with generated connection string secret", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Test SRV Connectivity with generated connection string secret", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) t.Run("Application Pod can connect to MongoDB using the generated standard connection string.", func(t *testing.T) { testPod := createPythonTestPod(0, mdb.Namespace, fmt.Sprintf("%s-admin-%s", mdb.Name, user.Name), "connectionString.standard") - err := e2eutil.TestClient.Create(context.TODO(), &testPod, &e2eutil.CleanupOptions{ - TestContext: ctx, + err := e2eutil.TestClient.Create(ctx, &testPod, &e2eutil.CleanupOptions{ + TestContext: testCtx, }) assert.NoError(t, err) - assert.NoError(t, wait.ForPodPhase(t, time.Minute*5, testPod, corev1.PodSucceeded)) + assert.NoError(t, wait.ForPodPhase(ctx, t, time.Minute*5, testPod, corev1.PodSucceeded)) }) t.Run("Application Pod can connect to MongoDB using the generated secret SRV connection string", func(t *testing.T) { testPod := createPythonTestPod(1, mdb.Namespace, fmt.Sprintf("%s-admin-%s", mdb.Name, user.Name), "connectionString.standardSrv") - err := e2eutil.TestClient.Create(context.TODO(), &testPod, &e2eutil.CleanupOptions{ - TestContext: ctx, + err := e2eutil.TestClient.Create(ctx, &testPod, &e2eutil.CleanupOptions{ + TestContext: testCtx, }) assert.NoError(t, err) - assert.NoError(t, wait.ForPodPhase(t, time.Minute*5, testPod, corev1.PodSucceeded)) + assert.NoError(t, wait.ForPodPhase(ctx, t, time.Minute*5, testPod, corev1.PodSucceeded)) }) } diff --git a/test/e2e/replica_set_multiple/replica_set_multiple_test.go b/test/e2e/replica_set_multiple/replica_set_multiple_test.go index b6c173197..a38786eb0 100644 --- a/test/e2e/replica_set_multiple/replica_set_multiple_test.go +++ b/test/e2e/replica_set_multiple/replica_set_multiple_test.go @@ -1,6 +1,7 @@ package replica_set_multiple import ( + "context" "fmt" "os" "testing" @@ -11,7 +12,7 @@ import ( mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -25,60 +26,60 @@ func TestMain(m *testing.M) { // TestReplicaSetMultiple creates two MongoDB resources that are handled by the Operator at the // same time. One of them is scaled to 5 and then back to 3 func TestReplicaSetMultiple(t *testing.T) { + ctx := context.Background() - ctx := setup.Setup(t) - defer ctx.Teardown() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb0, user0 := e2eutil.NewTestMongoDB(ctx, "mdb0", "") - mdb1, user1 := e2eutil.NewTestMongoDB(ctx, "mdb1", "") + mdb0, user0 := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + mdb1, user1 := e2eutil.NewTestMongoDB(testCtx, "mdb1", "") - _, err := setup.GeneratePasswordForUser(ctx, user0, "") + _, err := setup.GeneratePasswordForUser(testCtx, user0, "") if err != nil { t.Fatal(err) } - _, err = setup.GeneratePasswordForUser(ctx, user1, "") + _, err = setup.GeneratePasswordForUser(testCtx, user1, "") if err != nil { t.Fatal(err) } - tester0, err := mongotester.FromResource(t, mdb0) + tester0, err := mongotester.FromResource(ctx, t, mdb0) if err != nil { t.Fatal(err) } - tester1, err := mongotester.FromResource(t, mdb1) + tester1, err := mongotester.FromResource(ctx, t, mdb1) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource mdb0", mongodbtests.CreateMongoDBResource(&mdb0, ctx)) - t.Run("Create MongoDB Resource mdb1", mongodbtests.CreateMongoDBResource(&mdb1, ctx)) + t.Run("Create MongoDB Resource mdb0", mongodbtests.CreateMongoDBResource(&mdb0, testCtx)) + t.Run("Create MongoDB Resource mdb1", mongodbtests.CreateMongoDBResource(&mdb1, testCtx)) - t.Run("mdb0: Basic tests", mongodbtests.BasicFunctionality(&mdb0)) - t.Run("mdb1: Basic tests", mongodbtests.BasicFunctionality(&mdb1)) + t.Run("mdb0: Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb0)) + t.Run("mdb1: Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb1)) t.Run("mdb0: Test Basic Connectivity", tester0.ConnectivitySucceeds()) t.Run("mdb1: Test Basic Connectivity", tester1.ConnectivitySucceeds()) - t.Run("mdb0: AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 1)) - t.Run("mdb1: AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb1, 1)) + t.Run("mdb0: AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb0, 1)) + t.Run("mdb1: AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb1, 1)) t.Run("mdb0: Ensure Authentication", tester0.EnsureAuthenticationIsConfigured(3)) t.Run("mdb1: Ensure Authentication", tester1.EnsureAuthenticationIsConfigured(3)) t.Run("MongoDB is reachable while being scaled up", func(t *testing.T) { defer tester0.StartBackgroundConnectivityTest(t, time.Second*10)() - t.Run("Scale MongoDB Resource Up", mongodbtests.Scale(&mdb0, 5)) - t.Run("Stateful Set Scaled Up Correctly", mongodbtests.StatefulSetBecomesReady(&mdb0)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb0)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 3)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb0, - mdbv1.MongoDBCommunityStatus{ - MongoURI: mdb0.MongoURI(""), - Phase: mdbv1.Running, - CurrentMongoDBMembers: 5, - CurrentStatefulSetReplicas: 5, - })) + t.Run("Scale MongoDB Resource Up", mongodbtests.Scale(ctx, &mdb0, 5)) + t.Run("Stateful Set Scaled Up Correctly", mongodbtests.StatefulSetBecomesReady(ctx, &mdb0)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb0)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb0, 3)) + t.Run("Test Status Was Updated", mongodbtests.Status(ctx, &mdb0, mdbv1.MongoDBCommunityStatus{ + MongoURI: mdb0.MongoURI(""), + Phase: mdbv1.Running, + CurrentMongoDBMembers: 5, + CurrentStatefulSetReplicas: 5, + })) // TODO: Currently the scale down process takes too long to reasonably include this in the test //t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb0, 3)) diff --git a/test/e2e/replica_set_operator_upgrade/replica_set_operator_upgrade_test.go b/test/e2e/replica_set_operator_upgrade/replica_set_operator_upgrade_test.go index d45f70457..726c52514 100644 --- a/test/e2e/replica_set_operator_upgrade/replica_set_operator_upgrade_test.go +++ b/test/e2e/replica_set_operator_upgrade/replica_set_operator_upgrade_test.go @@ -1,6 +1,7 @@ package replica_set_operator_upgrade import ( + "context" "fmt" "os" "testing" @@ -21,54 +22,58 @@ func TestMain(m *testing.M) { } func TestReplicaSetOperatorUpgrade(t *testing.T) { + ctx := context.Background() resourceName := "mdb0" testConfig := setup.LoadTestConfigFromEnv() - ctx := setup.SetupWithTestConfig(t, testConfig, true, true, resourceName) - defer ctx.Teardown() + testCtx := setup.SetupWithTestConfig(ctx, t, testConfig, true, true, resourceName) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, resourceName, testConfig.Namespace) + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) + // Prior operator versions did not support MDB7 + mdb.Spec.Version = "6.0.5" scramUser := mdb.GetAuthUsers()[0] mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) mdb.Spec.Arbiters = 1 mdb.Spec.Members = 2 - _, err := setup.GeneratePasswordForUser(ctx, user, testConfig.Namespace) + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb, true)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb, true)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) mongodbtests.SkipTestIfLocal(t, "Ensure MongoDB TLS Configuration", func(t *testing.T) { - t.Run("Has TLS Mode", tester.HasTlsMode("requireSSL", 60, WithTls(mdb))) - t.Run("Basic Connectivity Succeeds", tester.ConnectivitySucceeds(WithTls(mdb))) - t.Run("SRV Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithTls(mdb))) + t.Run("Has TLS Mode", tester.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb))) + t.Run("Basic Connectivity Succeeds", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + t.Run("SRV Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithTls(ctx, mdb))) t.Run("Basic Connectivity With Generated Connection String Secret Succeeds", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(mdb, scramUser)), WithTls(mdb))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)), WithTls(ctx, mdb))) t.Run("SRV Connectivity With Generated Connection String Secret Succeeds", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)), WithTls(mdb))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)), WithTls(ctx, mdb))) t.Run("Connectivity Fails", tester.ConnectivityFails(WithoutTls())) - t.Run("Ensure authentication is configured", tester.EnsureAuthenticationIsConfigured(3, WithTls(mdb))) + t.Run("Ensure authentication is configured", tester.EnsureAuthenticationIsConfigured(3, WithTls(ctx, mdb))) }) // upgrade the operator to master config := setup.LoadTestConfigFromEnv() - err = setup.DeployOperator(config, resourceName, true, false) + err = setup.DeployOperator(ctx, config, resourceName, true, false) assert.NoError(t, err) // Perform the basic tests - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb, true)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb, true)) } // TestReplicaSetOperatorUpgradeFrom0_7_2 is intended to be run locally not in CI. // It simulates deploying cluster using community operator 0.7.2 and then upgrading it using newer version. func TestReplicaSetOperatorUpgradeFrom0_7_2(t *testing.T) { + ctx := context.Background() //nolint t.Skip("Supporting this test in CI requires installing also CRDs from release v0.7.2") resourceName := "mdb-upg" testConfig := setup.LoadTestConfigFromEnv() @@ -77,37 +82,37 @@ func TestReplicaSetOperatorUpgradeFrom0_7_2(t *testing.T) { testConfig.OperatorImage = "quay.io/mongodb/mongodb-kubernetes-operator:0.7.2" testConfig.VersionUpgradeHookImage = "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.3" testConfig.ReadinessProbeImage = "quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.6" - testConfig.AgentImage = "quay.io/mongodb/mongodb-agent:11.0.5.6963-1" + testConfig.AgentImage = "quay.io/mongodb/mongodb-agent-ubi:11.0.5.6963-1" - ctx := setup.SetupWithTestConfig(t, testConfig, true, false, resourceName) - defer ctx.Teardown() + testCtx := setup.SetupWithTestConfig(ctx, t, testConfig, true, false, resourceName) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, resourceName, "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, "") scramUser := mdb.GetAuthUsers()[0] mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } runTests := func(t *testing.T) { - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb, true)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb, true)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) - t.Run("Has TLS Mode", tester.HasTlsMode("requireSSL", 60, WithTls(mdb))) + t.Run("Has TLS Mode", tester.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb))) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) t.Run("Test Basic Connectivity with generated connection string secret", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Test SRV Connectivity with generated connection string secret", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) } @@ -117,7 +122,7 @@ func TestReplicaSetOperatorUpgradeFrom0_7_2(t *testing.T) { // rescale helm operator deployment to zero and run local operator then. testConfig = setup.LoadTestConfigFromEnv() - err = setup.DeployOperator(testConfig, resourceName, true, false) + err = setup.DeployOperator(ctx, testConfig, resourceName, true, false) assert.NoError(t, err) runTests(t) diff --git a/test/e2e/replica_set_recovery/replica_set_recovery_test.go b/test/e2e/replica_set_recovery/replica_set_recovery_test.go index cd50d152b..91c9426b7 100644 --- a/test/e2e/replica_set_recovery/replica_set_recovery_test.go +++ b/test/e2e/replica_set_recovery/replica_set_recovery_test.go @@ -1,6 +1,7 @@ package replica_set_recovery import ( + "context" "crypto/rand" "fmt" "math/big" @@ -13,7 +14,7 @@ import ( mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -25,24 +26,25 @@ func TestMain(m *testing.M) { } func TestReplicaSetRecovery(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") - _, err := setup.GeneratePasswordForUser(ctx, user, "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := mongotester.FromResource(t, mdb) + tester, err := mongotester.FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) t.Run("MongoDB is reachable", func(t *testing.T) { @@ -51,17 +53,16 @@ func TestReplicaSetRecovery(t *testing.T) { if err != nil { t.Fatal(err) } - t.Run("Delete Random Pod", mongodbtests.DeletePod(&mdb, int(n.Int64()))) - t.Run("Test Replica Set Recovers", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, - mdbv1.MongoDBCommunityStatus{ - MongoURI: mdb.MongoURI(""), - Phase: mdbv1.Running, - Version: mdb.GetMongoDBVersion(), - CurrentMongoDBMembers: 3, - CurrentStatefulSetReplicas: 3, - })) + t.Run("Delete Random Pod", mongodbtests.DeletePod(ctx, &mdb, int(n.Int64()))) + t.Run("Test Replica Set Recovers", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Test Status Was Updated", mongodbtests.Status(ctx, &mdb, mdbv1.MongoDBCommunityStatus{ + MongoURI: mdb.MongoURI(""), + Phase: mdbv1.Running, + Version: mdb.GetMongoDBVersion(), + CurrentMongoDBMembers: 3, + CurrentStatefulSetReplicas: 3, + })) }) } diff --git a/test/e2e/replica_set_remove_user/replica_set_remove_user_test.go b/test/e2e/replica_set_remove_user/replica_set_remove_user_test.go new file mode 100644 index 000000000..2abeb93c3 --- /dev/null +++ b/test/e2e/replica_set_remove_user/replica_set_remove_user_test.go @@ -0,0 +1,127 @@ +package replica_set_remove_user + +import ( + "context" + "fmt" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + "os" + "testing" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func intPtr(x int) *int { return &x } +func strPtr(s string) *string { return &s } + +func TestCleanupUsers(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + // config member options + memberOptions := []automationconfig.MemberOptions{ + { + Votes: intPtr(1), + Tags: map[string]string{"foo1": "bar1"}, + Priority: strPtr("1.5"), + }, + { + Votes: intPtr(1), + Tags: map[string]string{"foo2": "bar2"}, + Priority: strPtr("1"), + }, + { + Votes: intPtr(1), + Tags: map[string]string{"foo3": "bar3"}, + Priority: strPtr("2.5"), + }, + } + mdb.Spec.MemberConfig = memberOptions + + settings := map[string]interface{}{ + "electionTimeoutMillis": float64(20), + } + mdb.Spec.AutomationConfigOverride = &mdbv1.AutomationConfigOverride{ + ReplicaSet: mdbv1.OverrideReplicaSet{Settings: mdbv1.MapWrapper{Object: settings}}, + } + + newUser := mdbv1.MongoDBUser{ + Name: fmt.Sprintf("%s-user-2", "mdb-0"), + PasswordSecretRef: mdbv1.SecretKeyReference{ + Key: fmt.Sprintf("%s-password-2", "mdb-0"), + Name: fmt.Sprintf("%s-%s-password-secret-2", "mdb-0", testCtx.ExecutionId), + }, + Roles: []mdbv1.Role{ + // roles on testing db for general connectivity + { + DB: "testing", + Name: "readWrite", + }, + { + DB: "testing", + Name: "clusterAdmin", + }, + // admin roles for reading FCV + { + DB: "admin", + Name: "readWrite", + }, + { + DB: "admin", + Name: "clusterAdmin", + }, + { + DB: "admin", + Name: "userAdmin", + }, + }, + ScramCredentialsSecretName: fmt.Sprintf("%s-my-scram-2", "mdb-0"), + } + + _, err = setup.GeneratePasswordForUser(testCtx, newUser, "") + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) + t.Run("Add new user to MongoDB Resource", mongodbtests.AddUserToMongoDBCommunity(ctx, &mdb, newUser)) + t.Run("MongoDB reaches Running phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + editedUser := mdb.Spec.Users[1] + t.Run("Edit connection string secret name of the added user", mongodbtests.EditConnectionStringSecretNameOfLastUser(ctx, &mdb, "other-secret-name")) + t.Run("MongoDB reaches Running phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Old connection string secret is cleaned up", mongodbtests.ConnectionStringSecretIsCleanedUp(ctx, &mdb, editedUser.GetConnectionStringSecretName(mdb.Name))) + deletedUser := mdb.Spec.Users[1] + t.Run("Remove last user from MongoDB Resource", mongodbtests.RemoveLastUserFromMongoDBCommunity(ctx, &mdb)) + t.Run("MongoDB reaches Pending phase", mongodbtests.MongoDBReachesPendingPhase(ctx, &mdb)) + t.Run("Removed users are added to automation config", mongodbtests.AuthUsersDeletedIsUpdated(ctx, &mdb, deletedUser)) + t.Run("MongoDB reaches Running phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Connection string secrets are cleaned up", mongodbtests.ConnectionStringSecretIsCleanedUp(ctx, &mdb, deletedUser.GetConnectionStringSecretName(mdb.Name))) + t.Run("Delete MongoDB Resource", mongodbtests.DeleteMongoDBResource(&mdb, testCtx)) +} diff --git a/test/e2e/replica_set_scale/replica_set_scaling_test.go b/test/e2e/replica_set_scale/replica_set_scaling_test.go index 709fa99ff..0361ba9f0 100644 --- a/test/e2e/replica_set_scale/replica_set_scaling_test.go +++ b/test/e2e/replica_set_scale/replica_set_scaling_test.go @@ -1,6 +1,7 @@ package replica_set_scale_up import ( + "context" "fmt" "os" "testing" @@ -9,7 +10,7 @@ import ( mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" ) @@ -22,41 +23,41 @@ func TestMain(m *testing.M) { } func TestReplicaSetScaleUp(t *testing.T) { + ctx := context.Background() - ctx := setup.Setup(t) - defer ctx.Teardown() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := mongotester.FromResource(t, mdb) + tester, err := mongotester.FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) t.Run("MongoDB is reachable", func(t *testing.T) { defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() - t.Run("Scale MongoDB Resource Up", mongodbtests.Scale(&mdb, 5)) - t.Run("Stateful Set Scaled Up Correctly", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 3)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, - mdbv1.MongoDBCommunityStatus{ - MongoURI: mdb.MongoURI(""), - Phase: mdbv1.Running, - Version: mdb.GetMongoDBVersion(), - CurrentMongoDBMembers: 5, - CurrentStatefulSetReplicas: 5, - })) + t.Run("Scale MongoDB Resource Up", mongodbtests.Scale(ctx, &mdb, 5)) + t.Run("Stateful Set Scaled Up Correctly", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 3)) + t.Run("Test Status Was Updated", mongodbtests.Status(ctx, &mdb, mdbv1.MongoDBCommunityStatus{ + MongoURI: mdb.MongoURI(""), + Phase: mdbv1.Running, + Version: mdb.GetMongoDBVersion(), + CurrentMongoDBMembers: 5, + CurrentStatefulSetReplicas: 5, + })) // TODO: Currently the scale down process takes too long to reasonably include this in the test //t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 3)) diff --git a/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go b/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go index 6aa5f8c4f..fd03fdafc 100644 --- a/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go +++ b/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go @@ -1,6 +1,7 @@ package replica_set_scale_down import ( + "context" "fmt" "os" "testing" @@ -12,7 +13,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -24,41 +25,41 @@ func TestMain(m *testing.M) { } func TestReplicaSetScaleDown(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "replica-set-scale-down", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "replica-set-scale-down", "") - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) t.Run("MongoDB is reachable", func(t *testing.T) { defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() - t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 1)) - t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 3)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, - mdbv1.MongoDBCommunityStatus{ - MongoURI: mdb.MongoURI(""), - Phase: mdbv1.Running, - Version: mdb.GetMongoDBVersion(), - CurrentMongoDBMembers: 1, - CurrentStatefulSetReplicas: 1, - })) + t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(ctx, &mdb, 1)) + t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 3)) + t.Run("Test Status Was Updated", mongodbtests.Status(ctx, &mdb, mdbv1.MongoDBCommunityStatus{ + MongoURI: mdb.MongoURI(""), + Phase: mdbv1.Running, + Version: mdb.GetMongoDBVersion(), + CurrentMongoDBMembers: 1, + CurrentStatefulSetReplicas: 1, + })) }) } diff --git a/test/e2e/replica_set_tls/replica_set_tls_test.go b/test/e2e/replica_set_tls/replica_set_tls_test.go index ea86db71a..719bcdc8f 100644 --- a/test/e2e/replica_set_tls/replica_set_tls_test.go +++ b/test/e2e/replica_set_tls/replica_set_tls_test.go @@ -1,6 +1,7 @@ package replica_set_tls import ( + "context" "fmt" "os" "testing" @@ -9,7 +10,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -21,40 +22,41 @@ func TestMain(m *testing.M) { } func TestReplicaSetTLS(t *testing.T) { + ctx := context.Background() resourceName := "mdb-tls" - ctx, testConfig := setup.SetupWithTLS(t, resourceName) - defer ctx.Teardown() + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, resourceName, testConfig.Namespace) + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) scramUser := mdb.GetAuthUsers()[0] mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) - _, err := setup.GeneratePasswordForUser(ctx, user, testConfig.Namespace) + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) mongodbtests.SkipTestIfLocal(t, "Ensure MongoDB TLS Configuration", func(t *testing.T) { - t.Run("Has TLS Mode", tester.HasTlsMode("requireSSL", 60, WithTls(mdb))) - t.Run("Basic Connectivity Succeeds", tester.ConnectivitySucceeds(WithTls(mdb))) - t.Run("SRV Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithTls(mdb))) + t.Run("Has TLS Mode", tester.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb))) + t.Run("Basic Connectivity Succeeds", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + t.Run("SRV Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithTls(ctx, mdb))) t.Run("Basic Connectivity With Generated Connection String Secret Succeeds", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(mdb, scramUser)), WithTls(mdb))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)), WithTls(ctx, mdb))) t.Run("SRV Connectivity With Generated Connection String Secret Succeeds", - tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)), WithTls(mdb))) + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)), WithTls(ctx, mdb))) t.Run("Connectivity Fails", tester.ConnectivityFails(WithoutTls())) - t.Run("Ensure authentication is configured", tester.EnsureAuthenticationIsConfigured(3, WithTls(mdb))) + t.Run("Ensure authentication is configured", tester.EnsureAuthenticationIsConfigured(3, WithTls(ctx, mdb))) }) - t.Run("TLS is disabled", mongodbtests.DisableTLS(&mdb)) - t.Run("MongoDB Reaches Failed Phase", mongodbtests.MongoDBReachesFailedPhase(&mdb)) - t.Run("TLS is enabled", mongodbtests.EnableTLS(&mdb)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("TLS is disabled", mongodbtests.DisableTLS(ctx, &mdb)) + t.Run("MongoDB Reaches Failed Phase", mongodbtests.MongoDBReachesFailedPhase(ctx, &mdb)) + t.Run("TLS is enabled", mongodbtests.EnableTLS(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) } diff --git a/test/e2e/replica_set_tls_recreate_mdbc/replica_set_tls_recreate_mdbc_test.go b/test/e2e/replica_set_tls_recreate_mdbc/replica_set_tls_recreate_mdbc_test.go index 8207b6739..751a048c4 100644 --- a/test/e2e/replica_set_tls_recreate_mdbc/replica_set_tls_recreate_mdbc_test.go +++ b/test/e2e/replica_set_tls_recreate_mdbc/replica_set_tls_recreate_mdbc_test.go @@ -10,7 +10,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -22,50 +22,51 @@ func TestMain(m *testing.M) { } func TestReplicaSetTLSRecreateMdbc(t *testing.T) { + ctx := context.Background() resourceName := "mdb-tls" - ctx, testConfig := setup.SetupWithTLS(t, resourceName) - defer ctx.Teardown() + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName) + defer testCtx.Teardown() - mdb1, user := e2eutil.NewTestMongoDB(ctx, resourceName, testConfig.Namespace) + mdb1, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) scramUser := mdb1.GetAuthUsers()[0] mdb1.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) - _, err := setup.GeneratePasswordForUser(ctx, user, testConfig.Namespace) + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb1, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb1)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb1, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb1)) - if err := e2eutil.TestClient.Delete(context.TODO(), &mdb1); err != nil { + if err := e2eutil.TestClient.Delete(ctx, &mdb1); err != nil { t.Fatalf("Failed to delete first test MongoDB: %s", err) } - t.Run("Stateful Set Is Deleted", mongodbtests.StatefulSetIsDeleted(&mdb1)) + t.Run("Stateful Set Is Deleted", mongodbtests.StatefulSetIsDeleted(ctx, &mdb1)) - mdb2, _ := e2eutil.NewTestMongoDB(ctx, resourceName, testConfig.Namespace) + mdb2, _ := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) mdb2.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) - tester1, err := FromResource(t, mdb2) + tester1, err := FromResource(ctx, t, mdb2) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb2, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb2)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb2, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb2)) mongodbtests.SkipTestIfLocal(t, "Ensure MongoDB TLS Configuration", func(t *testing.T) { - t.Run("Has TLS Mode", tester1.HasTlsMode("requireSSL", 60, WithTls(mdb2))) - t.Run("Basic Connectivity Succeeds", tester1.ConnectivitySucceeds(WithTls(mdb2))) - t.Run("SRV Connectivity Succeeds", tester1.ConnectivitySucceeds(WithURI(mdb2.MongoSRVURI("")), WithTls(mdb2))) + t.Run("Has TLS Mode", tester1.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb2))) + t.Run("Basic Connectivity Succeeds", tester1.ConnectivitySucceeds(WithTls(ctx, mdb2))) + t.Run("SRV Connectivity Succeeds", tester1.ConnectivitySucceeds(WithURI(mdb2.MongoSRVURI("")), WithTls(ctx, mdb2))) t.Run("Basic Connectivity With Generated Connection String Secret Succeeds", - tester1.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(mdb2, scramUser)), WithTls(mdb2))) + tester1.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb2, scramUser)), WithTls(ctx, mdb2))) t.Run("SRV Connectivity With Generated Connection String Secret Succeeds", - tester1.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb2, scramUser)), WithTls(mdb2))) + tester1.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb2, scramUser)), WithTls(ctx, mdb2))) t.Run("Connectivity Fails", tester1.ConnectivityFails(WithoutTls())) - t.Run("Ensure authentication is configured", tester1.EnsureAuthenticationIsConfigured(3, WithTls(mdb2))) + t.Run("Ensure authentication is configured", tester1.EnsureAuthenticationIsConfigured(3, WithTls(ctx, mdb2))) }) - t.Run("TLS is disabled", mongodbtests.DisableTLS(&mdb2)) - t.Run("MongoDB Reaches Failed Phase", mongodbtests.MongoDBReachesFailedPhase(&mdb2)) - t.Run("TLS is enabled", mongodbtests.EnableTLS(&mdb2)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb2)) + t.Run("TLS is disabled", mongodbtests.DisableTLS(ctx, &mdb2)) + t.Run("MongoDB Reaches Failed Phase", mongodbtests.MongoDBReachesFailedPhase(ctx, &mdb2)) + t.Run("TLS is enabled", mongodbtests.EnableTLS(ctx, &mdb2)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb2)) } diff --git a/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go b/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go index 6eb51f50c..86c1b6614 100644 --- a/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go +++ b/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go @@ -1,6 +1,7 @@ package replica_set_tls import ( + "context" "fmt" "os" "testing" @@ -11,7 +12,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -23,44 +24,45 @@ func TestMain(m *testing.M) { } func TestReplicaSetTLSRotate(t *testing.T) { + ctx := context.Background() resourceName := "mdb-tls" - ctx, testConfig := setup.SetupWithTLS(t, resourceName) - defer ctx.Teardown() + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, resourceName, testConfig.Namespace) + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) - _, err := setup.GeneratePasswordForUser(ctx, user, testConfig.Namespace) + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - clientCert, err := GetClientCert(mdb) + clientCert, err := GetClientCert(ctx, mdb) if err != nil { t.Fatal(err) } initialCertSerialNumber := clientCert.SerialNumber - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) - t.Run("Wait for TLS to be enabled", tester.HasTlsMode("requireSSL", 60, WithTls(mdb))) - t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(mdb))) - t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3, WithTls(mdb))) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Wait for TLS to be enabled", tester.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb))) + t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3, WithTls(ctx, mdb))) t.Run("Test TLS required", tester.ConnectivityFails(WithoutTls())) t.Run("MongoDB is reachable while certificate is rotated", func(t *testing.T) { - defer tester.StartBackgroundConnectivityTest(t, time.Second*10, WithTls(mdb))() - t.Run("Update certificate secret", tlstests.RotateCertificate(&mdb)) + defer tester.StartBackgroundConnectivityTest(t, time.Second*10, WithTls(ctx, mdb))() + t.Run("Update certificate secret", tlstests.RotateCertificate(ctx, &mdb)) t.Run("Wait for certificate to be rotated", tester.WaitForRotatedCertificate(mdb, initialCertSerialNumber)) - t.Run("Wait for MongoDB to reach Running Phase after rotating server cert", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("Extend CA certificate validity", tlstests.ExtendCACertificate(&mdb)) - t.Run("Wait for MongoDB to start reconciling after extending CA", mongodbtests.MongoDBReachesPendingPhase(&mdb)) - t.Run("Wait for MongoDB to reach Running Phase after extending CA", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("Wait for MongoDB to reach Running Phase after rotating server cert", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Extend CA certificate validity", tlstests.ExtendCACertificate(ctx, &mdb)) + t.Run("Wait for MongoDB to start reconciling after extending CA", mongodbtests.MongoDBReachesPendingPhase(ctx, &mdb)) + t.Run("Wait for MongoDB to reach Running Phase after extending CA", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) }) } diff --git a/test/e2e/replica_set_tls_rotate_delete_sts/replica_set_tls_rotate_delete_sts_test.go b/test/e2e/replica_set_tls_rotate_delete_sts/replica_set_tls_rotate_delete_sts_test.go index 14cda1006..0bc0448bd 100644 --- a/test/e2e/replica_set_tls_rotate_delete_sts/replica_set_tls_rotate_delete_sts_test.go +++ b/test/e2e/replica_set_tls_rotate_delete_sts/replica_set_tls_rotate_delete_sts_test.go @@ -1,6 +1,7 @@ package replica_set_tls_rotate_delete_sts import ( + "context" "os" "testing" @@ -23,43 +24,44 @@ func TestMain(m *testing.M) { } func TestReplicaSetTLSRotateDeleteSts(t *testing.T) { + ctx := context.Background() resourceName := "mdb-tls" - ctx, testConfig := setup.SetupWithTLS(t, resourceName) - defer ctx.Teardown() + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, resourceName, testConfig.Namespace) + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) - _, err := setup.GeneratePasswordForUser(ctx, user, testConfig.Namespace) + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - clientCert, err := GetClientCert(mdb) + clientCert, err := GetClientCert(ctx, mdb) if err != nil { t.Fatal(err) } initialCertSerialNumber := clientCert.SerialNumber - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) - t.Run("Wait for TLS to be enabled", tester.HasTlsMode("requireSSL", 60, WithTls(mdb))) - t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(mdb))) - t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3, WithTls(mdb))) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Wait for TLS to be enabled", tester.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb))) + t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3, WithTls(ctx, mdb))) t.Run("Test TLS required", tester.ConnectivityFails(WithoutTls())) t.Run("MongoDB is reachable while certificate is rotated", func(t *testing.T) { - t.Run("Delete Statefulset", mongodbtests.DeleteStatefulSet(&mdb)) - t.Run("Update certificate secret", tlstests.RotateCertificate(&mdb)) + t.Run("Delete Statefulset", mongodbtests.DeleteStatefulSet(ctx, &mdb)) + t.Run("Update certificate secret", tlstests.RotateCertificate(ctx, &mdb)) t.Run("Wait for certificate to be rotated", tester.WaitForRotatedCertificate(mdb, initialCertSerialNumber)) - t.Run("Test Replica Set Recovers", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("Wait for MongoDB to reach Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(mdb))) + t.Run("Test Replica Set Recovers", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Wait for MongoDB to reach Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) }) } diff --git a/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go b/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go index 8ac787079..eb85477f3 100644 --- a/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go +++ b/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go @@ -1,6 +1,7 @@ package replica_set_tls import ( + "context" "fmt" "os" "testing" @@ -12,7 +13,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -24,50 +25,51 @@ func TestMain(m *testing.M) { } func TestReplicaSetTLSUpgrade(t *testing.T) { + ctx := context.Background() resourceName := "mdb-tls" - ctx, testConfig := setup.SetupWithTLS(t, resourceName) - defer ctx.Teardown() + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, resourceName, testConfig.Namespace) - _, err := setup.GeneratePasswordForUser(ctx, user, testConfig.Namespace) + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) if err != nil { t.Fatal(err) } - tester, err := FromResource(t, mdb) + tester, err := FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds(WithoutTls())) t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) // Enable TLS as optional t.Run("MongoDB is reachable while TLS is being enabled", func(t *testing.T) { defer tester.StartBackgroundConnectivityTest(t, time.Second*15, WithoutTls())() - t.Run("Upgrade to TLS", tlstests.EnableTLS(&mdb, true)) - t.Run("Stateful Set Leaves Ready State, after setting TLS to preferSSL", mongodbtests.StatefulSetBecomesUnready(&mdb)) - t.Run("Stateful Set Reaches Ready State, after setting TLS to preferSSL", mongodbtests.StatefulSetBecomesReady(&mdb)) + t.Run("Upgrade to TLS", tlstests.EnableTLS(ctx, &mdb, true)) + t.Run("Stateful Set Leaves Ready State, after setting TLS to preferSSL", mongodbtests.StatefulSetBecomesUnready(ctx, &mdb)) + t.Run("Stateful Set Reaches Ready State, after setting TLS to preferSSL", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) t.Run("Wait for TLS to be enabled", tester.HasTlsMode("preferSSL", 60, WithoutTls())) }) // Ensure MongoDB is reachable both with and without TLS t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds(WithoutTls())) - t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(mdb))) - t.Run("Internal cluster keyfile authentication is enabled", tester.HasKeyfileAuth(3, WithTls(mdb))) + t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + t.Run("Internal cluster keyfile authentication is enabled", tester.HasKeyfileAuth(3, WithTls(ctx, mdb))) // Make TLS required t.Run("MongoDB is reachable over TLS while making TLS required", func(t *testing.T) { - defer tester.StartBackgroundConnectivityTest(t, time.Second*10, WithTls(mdb))() - t.Run("Make TLS required", tlstests.EnableTLS(&mdb, false)) - t.Run("Stateful Set Reaches Ready State, after setting TLS to requireSSL", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("Wait for TLS to be required", tester.HasTlsMode("requireSSL", 120, WithTls(mdb))) + defer tester.StartBackgroundConnectivityTest(t, time.Second*10, WithTls(ctx, mdb))() + t.Run("Make TLS required", tlstests.EnableTLS(ctx, &mdb, false)) + t.Run("Stateful Set Reaches Ready State, after setting TLS to requireSSL", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Wait for TLS to be required", tester.HasTlsMode("requireSSL", 120, WithTls(ctx, mdb))) }) // Ensure MongoDB is reachable only over TLS - t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(mdb))) + t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) t.Run("Test TLS Required For Connectivity", tester.ConnectivityFails(WithoutTls())) } diff --git a/test/e2e/replica_set_x509/replica_set_x509_test.go b/test/e2e/replica_set_x509/replica_set_x509_test.go index ec45e34b8..a7ed3503c 100644 --- a/test/e2e/replica_set_x509/replica_set_x509_test.go +++ b/test/e2e/replica_set_x509/replica_set_x509_test.go @@ -28,19 +28,20 @@ func TestMain(m *testing.M) { } func TestReplicaSetX509(t *testing.T) { + ctx := context.Background() resourceName := "mdb-tls" helmArgs := []setup.HelmArg{ {Name: "resource.tls.useX509", Value: "true"}, {Name: "resource.tls.sampleX509User", Value: "true"}, } - ctx, testConfig := setup.SetupWithTLS(t, resourceName, helmArgs...) - defer ctx.Teardown() + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName, helmArgs...) + defer testCtx.Teardown() - mdb, _ := e2eutil.NewTestMongoDB(ctx, resourceName, testConfig.Namespace) + mdb, _ := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) mdb.Spec.Security.Authentication.Modes = []v1.AuthMode{"X509"} mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) - tester, err := FromX509Resource(t, mdb) + tester, err := FromX509Resource(ctx, t, mdb) if err != nil { t.Fatal(err) } @@ -51,55 +52,55 @@ func TestReplicaSetX509(t *testing.T) { } users := mdb.GetAuthUsers() - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionalityX509(&mdb)) - t.Run("Agent certificate secrets configured", mongodbtests.AgentX509SecretsExists(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionalityX509(ctx, &mdb)) + t.Run("Agent certificate secrets configured", mongodbtests.AgentX509SecretsExists(ctx, &mdb)) - cert, root, dir := createCerts(t, &mdb) + cert, root, dir := createCerts(ctx, t, &mdb) defer os.RemoveAll(dir) - t.Run("Connectivity Fails without certs", tester.ConnectivityFails(WithURI(mongodbtests.GetConnectionStringForUser(mdb, users[0])), WithTls(mdb))) - t.Run("Connectivity Fails with invalid certs", tester.ConnectivityFails(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(mdb, users[0]), root, cert)))) + t.Run("Connectivity Fails without certs", tester.ConnectivityFails(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, users[0])), WithTls(ctx, mdb))) + t.Run("Connectivity Fails with invalid certs", tester.ConnectivityFails(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(ctx, mdb, users[0]), root, cert)))) }) t.Run("Connection with valid certificate", func(t *testing.T) { t.Run("Update MongoDB Resource", func(t *testing.T) { - err := e2eutil.UpdateMongoDBResource(&mdb, func(m *v1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, &mdb, func(m *v1.MongoDBCommunity) { m.Spec.Users = []v1.MongoDBUser{getValidUser()} }) assert.NoError(t, err) }) - cert, root, dir := createCerts(t, &mdb) + cert, root, dir := createCerts(ctx, t, &mdb) defer os.RemoveAll(dir) users := mdb.GetAuthUsers() - t.Run("Basic tests", mongodbtests.BasicFunctionalityX509(&mdb)) - t.Run("Agent certificate secrets configured", mongodbtests.AgentX509SecretsExists(&mdb)) - t.Run("Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(mdb, users[0]), root, cert)))) + t.Run("Basic tests", mongodbtests.BasicFunctionalityX509(ctx, &mdb)) + t.Run("Agent certificate secrets configured", mongodbtests.AgentX509SecretsExists(ctx, &mdb)) + t.Run("Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(ctx, mdb, users[0]), root, cert)))) }) t.Run("Rotate agent certificate", func(t *testing.T) { - agentCert, err := GetAgentCert(mdb) + agentCert, err := GetAgentCert(ctx, mdb) if err != nil { t.Fatal(err) } initialCertSerialNumber := agentCert.SerialNumber initialAgentPem := &corev1.Secret{} - err = e2eutil.TestClient.Get(context.TODO(), mdb.AgentCertificatePemSecretNamespacedName(), initialAgentPem) + err = e2eutil.TestClient.Get(ctx, mdb.AgentCertificatePemSecretNamespacedName(), initialAgentPem) assert.NoError(t, err) - cert, root, dir := createCerts(t, &mdb) + cert, root, dir := createCerts(ctx, t, &mdb) defer os.RemoveAll(dir) users := mdb.GetAuthUsers() - t.Run("Update certificate secret", tlstests.RotateAgentCertificate(&mdb)) - t.Run("Wait for MongoDB to reach Running Phase after rotating agent cert", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + t.Run("Update certificate secret", tlstests.RotateAgentCertificate(ctx, &mdb)) + t.Run("Wait for MongoDB to reach Running Phase after rotating agent cert", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) - agentCert, err = GetAgentCert(mdb) + agentCert, err = GetAgentCert(ctx, mdb) if err != nil { t.Fatal(err) } @@ -108,48 +109,48 @@ func TestReplicaSetX509(t *testing.T) { assert.NotEqual(t, finalCertSerialNumber, initialCertSerialNumber) finalAgentPem := &corev1.Secret{} - err = e2eutil.TestClient.Get(context.TODO(), mdb.AgentCertificatePemSecretNamespacedName(), finalAgentPem) + err = e2eutil.TestClient.Get(ctx, mdb.AgentCertificatePemSecretNamespacedName(), finalAgentPem) assert.NoError(t, err) assert.NotEqual(t, finalAgentPem.Data, initialAgentPem.Data) - t.Run("Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(mdb, users[0]), root, cert)))) + t.Run("Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(ctx, mdb, users[0]), root, cert)))) }) t.Run("Transition to also allow SCRAM", func(t *testing.T) { t.Run("Update MongoDB Resource", func(t *testing.T) { - err := e2eutil.UpdateMongoDBResource(&mdb, func(m *v1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, &mdb, func(m *v1.MongoDBCommunity) { m.Spec.Security.Authentication.Modes = []v1.AuthMode{"X509", "SCRAM"} m.Spec.Security.Authentication.AgentMode = "X509" }) assert.NoError(t, err) }) - cert, root, dir := createCerts(t, &mdb) + cert, root, dir := createCerts(ctx, t, &mdb) defer os.RemoveAll(dir) users := mdb.GetAuthUsers() - t.Run("Basic tests", mongodbtests.BasicFunctionalityX509(&mdb)) - t.Run("Agent certificate secrets configured", mongodbtests.AgentX509SecretsExists(&mdb)) - t.Run("Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(mdb, users[0]), root, cert)))) + t.Run("Basic tests", mongodbtests.BasicFunctionalityX509(ctx, &mdb)) + t.Run("Agent certificate secrets configured", mongodbtests.AgentX509SecretsExists(ctx, &mdb)) + t.Run("Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(ctx, mdb, users[0]), root, cert)))) }) t.Run("Transition to SCRAM agent", func(t *testing.T) { t.Run("Update MongoDB Resource", func(t *testing.T) { - err := e2eutil.UpdateMongoDBResource(&mdb, func(m *v1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, &mdb, func(m *v1.MongoDBCommunity) { m.Spec.Security.Authentication.AgentMode = "SCRAM" }) assert.NoError(t, err) }) - cert, root, dir := createCerts(t, &mdb) + cert, root, dir := createCerts(ctx, t, &mdb) defer os.RemoveAll(dir) users := mdb.GetAuthUsers() - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) - t.Run("Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(mdb, users[0]), root, cert)))) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(ctx, mdb, users[0]), root, cert)))) }) } @@ -196,13 +197,13 @@ func getInvalidUser() v1.MongoDBUser { } } -func createCerts(t *testing.T, mdb *v1.MongoDBCommunity) (string, string, string) { +func createCerts(ctx context.Context, t *testing.T, mdb *v1.MongoDBCommunity) (string, string, string) { dir, _ := os.MkdirTemp("", "certdir") t.Logf("Creating client certificate pem file") cert, _ := os.CreateTemp(dir, "pem") clientCertSecret := corev1.Secret{} - err := e2eutil.TestClient.Get(context.TODO(), types.NamespacedName{ + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{ Namespace: mdb.Namespace, Name: "my-x509-user-cert", }, &clientCertSecret) diff --git a/test/e2e/setup/setup.go b/test/e2e/setup/setup.go index 6c42e9888..8bf9595bd 100644 --- a/test/e2e/setup/setup.go +++ b/test/e2e/setup/setup.go @@ -41,23 +41,23 @@ const ( Pem tlsSecretType = "PEM" ) -func Setup(t *testing.T) *e2eutil.Context { - ctx, err := e2eutil.NewContext(t, envvar.ReadBool(performCleanupEnv)) +func Setup(ctx context.Context, t *testing.T) *e2eutil.TestContext { + testCtx, err := e2eutil.NewContext(ctx, t, envvar.ReadBool(performCleanupEnv)) // nolint:forbidigo if err != nil { t.Fatal(err) } config := LoadTestConfigFromEnv() - if err := DeployOperator(config, "mdb", false, false); err != nil { + if err := DeployOperator(ctx, config, "mdb", false, false); err != nil { t.Fatal(err) } - return ctx + return testCtx } -func SetupWithTLS(t *testing.T, resourceName string, additionalHelmArgs ...HelmArg) (*e2eutil.Context, TestConfig) { - ctx, err := e2eutil.NewContext(t, envvar.ReadBool(performCleanupEnv)) +func SetupWithTLS(ctx context.Context, t *testing.T, resourceName string, additionalHelmArgs ...HelmArg) (*e2eutil.TestContext, TestConfig) { + textCtx, err := e2eutil.NewContext(ctx, t, envvar.ReadBool(performCleanupEnv)) // nolint:forbidigo if err != nil { t.Fatal(err) @@ -68,15 +68,15 @@ func SetupWithTLS(t *testing.T, resourceName string, additionalHelmArgs ...HelmA t.Fatal(err) } - if err := DeployOperator(config, resourceName, true, false, additionalHelmArgs...); err != nil { + if err := DeployOperator(ctx, config, resourceName, true, false, additionalHelmArgs...); err != nil { t.Fatal(err) } - return ctx, config + return textCtx, config } -func SetupWithTestConfig(t *testing.T, testConfig TestConfig, withTLS, defaultOperator bool, resourceName string) *e2eutil.Context { - ctx, err := e2eutil.NewContext(t, envvar.ReadBool(performCleanupEnv)) +func SetupWithTestConfig(ctx context.Context, t *testing.T, testConfig TestConfig, withTLS, defaultOperator bool, resourceName string) *e2eutil.TestContext { + testCtx, err := e2eutil.NewContext(ctx, t, envvar.ReadBool(performCleanupEnv)) // nolint:forbidigo if err != nil { t.Fatal(err) @@ -88,15 +88,15 @@ func SetupWithTestConfig(t *testing.T, testConfig TestConfig, withTLS, defaultOp } } - if err := DeployOperator(testConfig, resourceName, withTLS, defaultOperator); err != nil { + if err := DeployOperator(ctx, testConfig, resourceName, withTLS, defaultOperator); err != nil { t.Fatal(err) } - return ctx + return testCtx } // GeneratePasswordForUser will create a secret with a password for the given user -func GeneratePasswordForUser(ctx *e2eutil.Context, mdbu mdbv1.MongoDBUser, namespace string) (string, error) { +func GeneratePasswordForUser(testCtx *e2eutil.TestContext, mdbu mdbv1.MongoDBUser, namespace string) (string, error) { passwordKey := mdbu.PasswordSecretRef.Key if passwordKey == "" { passwordKey = "password" @@ -119,7 +119,7 @@ func GeneratePasswordForUser(ctx *e2eutil.Context, mdbu mdbv1.MongoDBUser, names SetLabels(e2eutil.TestLabels()). Build() - return password, e2eutil.TestClient.Create(context.TODO(), &passwordSecret, &e2eutil.CleanupOptions{TestContext: ctx}) + return password, e2eutil.TestClient.Create(testCtx.Ctx, &passwordSecret, &e2eutil.CleanupOptions{TestContext: testCtx}) } // extractRegistryNameAndVersion splits a full image string and returns the individual components. @@ -186,7 +186,7 @@ func getHelmArgs(testConfig TestConfig, watchNamespace string, resourceName stri } // DeployOperator installs all resources required by the operator using helm. -func DeployOperator(config TestConfig, resourceName string, withTLS bool, defaultOperator bool, additionalHelmArgs ...HelmArg) error { +func DeployOperator(ctx context.Context, config TestConfig, resourceName string, withTLS bool, defaultOperator bool, additionalHelmArgs ...HelmArg) error { e2eutil.OperatorNamespace = config.Namespace fmt.Printf("Setting operator namespace to %s\n", e2eutil.OperatorNamespace) watchNamespace := config.Namespace @@ -218,7 +218,7 @@ func DeployOperator(config TestConfig, resourceName string, withTLS bool, defaul return err } - dep, err := waite2e.ForDeploymentToExist("mongodb-kubernetes-operator", time.Second*10, time.Minute*1, e2eutil.OperatorNamespace) + dep, err := waite2e.ForDeploymentToExist(ctx, "mongodb-kubernetes-operator", time.Second*10, time.Minute*1, e2eutil.OperatorNamespace) if err != nil { return err } @@ -232,12 +232,12 @@ func DeployOperator(config TestConfig, resourceName string, withTLS bool, defaul cont.Resources.Requests["cpu"] = quantityCPU } - err = e2eutil.TestClient.Update(context.TODO(), &dep) + err = e2eutil.TestClient.Update(ctx, &dep) if err != nil { return err } - if err := wait.PollImmediate(time.Second, 60*time.Second, hasDeploymentRequiredReplicas(&dep)); err != nil { + if err := wait.PollUntilContextTimeout(ctx, time.Second*2, 120*time.Second, true, hasDeploymentRequiredReplicas(&dep)); err != nil { return errors.New("error building operator deployment: the deployment does not have the required replicas") } fmt.Println("Successfully installed the operator deployment") @@ -265,9 +265,9 @@ func deployCertManager(config TestConfig) error { // hasDeploymentRequiredReplicas returns a condition function that indicates whether the given deployment // currently has the required amount of replicas in the ready state as specified in spec.replicas -func hasDeploymentRequiredReplicas(dep *appsv1.Deployment) wait.ConditionFunc { - return func() (bool, error) { - err := e2eutil.TestClient.Get(context.TODO(), +func hasDeploymentRequiredReplicas(dep *appsv1.Deployment) wait.ConditionWithContextFunc { + return func(ctx context.Context) (bool, error) { + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: dep.Name, Namespace: e2eutil.OperatorNamespace}, dep) @@ -280,6 +280,7 @@ func hasDeploymentRequiredReplicas(dep *appsv1.Deployment) wait.ConditionFunc { if dep.Status.ReadyReplicas == *dep.Spec.Replicas { return true, nil } + fmt.Printf("Deployment not ready! ReadyReplicas: %d, Spec.Replicas: %d\n", dep.Status.ReadyReplicas, *dep.Spec.Replicas) return false, nil } } diff --git a/test/e2e/setup/test_config.go b/test/e2e/setup/test_config.go index 19e7c5d92..1fc247021 100644 --- a/test/e2e/setup/test_config.go +++ b/test/e2e/setup/test_config.go @@ -34,18 +34,19 @@ type TestConfig struct { func LoadTestConfigFromEnv() TestConfig { return TestConfig{ - Namespace: envvar.GetEnvOrDefault(testNamespaceEnvName, "mongodb"), - CertManagerNamespace: envvar.GetEnvOrDefault(testCertManagerNamespaceEnvName, "cert-manager"), - CertManagerVersion: envvar.GetEnvOrDefault(testCertManagerVersionEnvName, "v1.5.3"), - OperatorImage: envvar.GetEnvOrDefault(operatorImageEnvName, "quay.io/mongodb/community-operator-dev:latest"), - MongoDBImage: envvar.GetEnvOrDefault(construct.MongodbImageEnv, "mongodb-community-server"), - MongoDBRepoUrl: envvar.GetEnvOrDefault(construct.MongodbRepoUrl, "quay.io/mongodb"), - VersionUpgradeHookImage: envvar.GetEnvOrDefault(construct.VersionUpgradeHookImageEnv, "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.2"), - AgentImage: envvar.GetEnvOrDefault(construct.AgentImageEnv, "quay.io/mongodb/mongodb-agent:10.29.0.6830-1"), // TODO: better way to decide default agent image. - ClusterWide: envvar.ReadBool(clusterWideEnvName), - PerformCleanup: envvar.ReadBool(performCleanupEnvName), - ReadinessProbeImage: envvar.GetEnvOrDefault(construct.ReadinessProbeImageEnv, "quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.3"), - HelmChartPath: envvar.GetEnvOrDefault(helmChartPathEnvName, "/workspace/helm-charts/charts/community-operator"), - LocalOperator: envvar.ReadBool(LocalOperatorEnvName), + Namespace: envvar.GetEnvOrDefault(testNamespaceEnvName, "mongodb"), // nolint:forbidigo + CertManagerNamespace: envvar.GetEnvOrDefault(testCertManagerNamespaceEnvName, "cert-manager"), // nolint:forbidigo + CertManagerVersion: envvar.GetEnvOrDefault(testCertManagerVersionEnvName, "v1.5.3"), // nolint:forbidigo + OperatorImage: envvar.GetEnvOrDefault(operatorImageEnvName, "quay.io/mongodb/community-operator-dev:latest"), // nolint:forbidigo + MongoDBImage: envvar.GetEnvOrDefault(construct.MongodbImageEnv, "mongodb-community-server"), // nolint:forbidigo + MongoDBRepoUrl: envvar.GetEnvOrDefault(construct.MongodbRepoUrlEnv, "quay.io/mongodb"), // nolint:forbidigo + VersionUpgradeHookImage: envvar.GetEnvOrDefault(construct.VersionUpgradeHookImageEnv, "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.2"), // nolint:forbidigo + // TODO: better way to decide default agent image. + AgentImage: envvar.GetEnvOrDefault(construct.AgentImageEnv, "quay.io/mongodb/mongodb-agent-ubi:10.29.0.6830-1"), // nolint:forbidigo + ClusterWide: envvar.ReadBool(clusterWideEnvName), // nolint:forbidigo + PerformCleanup: envvar.ReadBool(performCleanupEnvName), // nolint:forbidigo + ReadinessProbeImage: envvar.GetEnvOrDefault(construct.ReadinessProbeImageEnv, "quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.3"), // nolint:forbidigo + HelmChartPath: envvar.GetEnvOrDefault(helmChartPathEnvName, "/workspace/helm-charts/charts/community-operator"), // nolint:forbidigo + LocalOperator: envvar.ReadBool(LocalOperatorEnvName), // nolint:forbidigo } } diff --git a/test/e2e/statefulset_arbitrary_config/statefulset_arbitrary_config_test.go b/test/e2e/statefulset_arbitrary_config/statefulset_arbitrary_config_test.go index 3a4f48e21..d622cc68d 100644 --- a/test/e2e/statefulset_arbitrary_config/statefulset_arbitrary_config_test.go +++ b/test/e2e/statefulset_arbitrary_config/statefulset_arbitrary_config_test.go @@ -1,6 +1,7 @@ package statefulset_arbitrary_config_update import ( + "context" "fmt" "os" "reflect" @@ -10,7 +11,7 @@ import ( e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" ) @@ -24,12 +25,13 @@ func TestMain(m *testing.M) { } func TestStatefulSetArbitraryConfig(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } @@ -54,20 +56,20 @@ func TestStatefulSetArbitraryConfig(t *testing.T) { customServiceName := "database" mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.ServiceName = customServiceName - tester, err := mongotester.FromResource(t, mdb) + tester, err := mongotester.FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) - t.Run("Test setting Service Name", mongodbtests.ServiceWithNameExists(customServiceName, mdb.Namespace)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Test setting Service Name", mongodbtests.ServiceWithNameExists(ctx, customServiceName, mdb.Namespace)) t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) - t.Run("Container has been merged by name", mongodbtests.StatefulSetContainerConditionIsTrue(&mdb, "mongodb-agent", func(container corev1.Container) bool { + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("Container has been merged by name", mongodbtests.StatefulSetContainerConditionIsTrue(ctx, &mdb, "mongodb-agent", func(container corev1.Container) bool { return container.ReadinessProbe.TimeoutSeconds == 100 })) - t.Run("Tolerations have been added correctly", mongodbtests.StatefulSetConditionIsTrue(&mdb, func(sts appsv1.StatefulSet) bool { + t.Run("Tolerations have been added correctly", mongodbtests.StatefulSetConditionIsTrue(ctx, &mdb, func(sts appsv1.StatefulSet) bool { return reflect.DeepEqual(overrideTolerations, sts.Spec.Template.Spec.Tolerations) })) } diff --git a/test/e2e/statefulset_arbitrary_config_update/statefulset_arbitrary_config_update_test.go b/test/e2e/statefulset_arbitrary_config_update/statefulset_arbitrary_config_update_test.go index 11b97d806..051189946 100644 --- a/test/e2e/statefulset_arbitrary_config_update/statefulset_arbitrary_config_update_test.go +++ b/test/e2e/statefulset_arbitrary_config_update/statefulset_arbitrary_config_update_test.go @@ -1,6 +1,7 @@ package statefulset_arbitrary_config import ( + "context" "fmt" "os" "reflect" @@ -11,7 +12,7 @@ import ( mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -26,25 +27,26 @@ func TestMain(m *testing.M) { } func TestStatefulSetArbitraryConfig(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - tester, err := mongotester.FromResource(t, mdb) + tester, err := mongotester.FromResource(ctx, t, mdb) if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Test basic connectivity", tester.ConnectivitySucceeds()) - t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 1)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) overrideTolerations := []corev1.Toleration{ { @@ -64,16 +66,16 @@ func TestStatefulSetArbitraryConfig(t *testing.T) { overrideSpec.SpecWrapper.Spec.Template.Spec.Containers[1].ReadinessProbe = &corev1.Probe{TimeoutSeconds: 100} overrideSpec.SpecWrapper.Spec.Template.Spec.Tolerations = overrideTolerations - err = e2eutil.UpdateMongoDBResource(&mdb, func(mdb *mdbv1.MongoDBCommunity) { mdb.Spec.StatefulSetConfiguration = overrideSpec }) + err = e2eutil.UpdateMongoDBResource(ctx, &mdb, func(mdb *mdbv1.MongoDBCommunity) { mdb.Spec.StatefulSetConfiguration = overrideSpec }) assert.NoError(t, err) - t.Run("Basic tests after update", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Basic tests after update", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Test basic connectivity after update", tester.ConnectivitySucceeds()) - t.Run("Container has been merged by name", mongodbtests.StatefulSetContainerConditionIsTrue(&mdb, "mongodb-agent", func(container corev1.Container) bool { + t.Run("Container has been merged by name", mongodbtests.StatefulSetContainerConditionIsTrue(ctx, &mdb, "mongodb-agent", func(container corev1.Container) bool { return container.ReadinessProbe.TimeoutSeconds == 100 })) - t.Run("Tolerations have been added correctly", mongodbtests.StatefulSetConditionIsTrue(&mdb, func(sts appsv1.StatefulSet) bool { + t.Run("Tolerations have been added correctly", mongodbtests.StatefulSetConditionIsTrue(ctx, &mdb, func(sts appsv1.StatefulSet) bool { return reflect.DeepEqual(overrideTolerations, sts.Spec.Template.Spec.Tolerations) })) } diff --git a/test/e2e/statefulset_delete/statefulset_delete_test.go b/test/e2e/statefulset_delete/statefulset_delete_test.go index 2a2aedac0..3117109e6 100644 --- a/test/e2e/statefulset_delete/statefulset_delete_test.go +++ b/test/e2e/statefulset_delete/statefulset_delete_test.go @@ -1,6 +1,7 @@ package statefulset_delete import ( + "context" "fmt" "os" "testing" @@ -8,7 +9,7 @@ import ( mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - setup "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" ) func TestMain(m *testing.M) { @@ -20,29 +21,29 @@ func TestMain(m *testing.M) { } func TestStatefulSetDelete(t *testing.T) { - ctx := setup.Setup(t) - defer ctx.Teardown() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - mdb, user := e2eutil.NewTestMongoDB(ctx, "mdb0", "") + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") - _, err := setup.GeneratePasswordForUser(ctx, user, "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") if err != nil { t.Fatal(err) } - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Basic tests", mongodbtests.BasicFunctionality(&mdb)) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) t.Run("Operator recreates StatefulSet", func(t *testing.T) { - t.Run("Delete Statefulset", mongodbtests.DeleteStatefulSet(&mdb)) - t.Run("Test Replica Set Recovers", mongodbtests.StatefulSetBecomesReady(&mdb)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, - mdbv1.MongoDBCommunityStatus{ - MongoURI: mdb.MongoURI(""), - Phase: mdbv1.Running, - Version: mdb.GetMongoDBVersion(), - CurrentMongoDBMembers: mdb.DesiredReplicas(), - })) + t.Run("Delete Statefulset", mongodbtests.DeleteStatefulSet(ctx, &mdb)) + t.Run("Test Replica Set Recovers", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Test Status Was Updated", mongodbtests.Status(ctx, &mdb, mdbv1.MongoDBCommunityStatus{ + MongoURI: mdb.MongoURI(""), + Phase: mdbv1.Running, + Version: mdb.GetMongoDBVersion(), + CurrentMongoDBMembers: mdb.DesiredReplicas(), + })) }) } diff --git a/test/e2e/tlstests/tlstests.go b/test/e2e/tlstests/tlstests.go index ac6156a17..6d327ec0d 100644 --- a/test/e2e/tlstests/tlstests.go +++ b/test/e2e/tlstests/tlstests.go @@ -18,9 +18,9 @@ import ( ) // EnableTLS will upgrade an existing TLS cluster to use TLS. -func EnableTLS(mdb *mdbv1.MongoDBCommunity, optional bool) func(*testing.T) { +func EnableTLS(ctx context.Context, mdb *mdbv1.MongoDBCommunity, optional bool) func(*testing.T) { return func(t *testing.T) { - err := e2eutil.UpdateMongoDBResource(mdb, func(db *mdbv1.MongoDBCommunity) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { db.Spec.Security.TLS = e2eutil.NewTestTLSConfig(optional) }) if err != nil { @@ -29,7 +29,7 @@ func EnableTLS(mdb *mdbv1.MongoDBCommunity, optional bool) func(*testing.T) { } } -func ExtendCACertificate(mdb *mdbv1.MongoDBCommunity) func(*testing.T) { +func ExtendCACertificate(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { return func(t *testing.T) { certGVR := schema.GroupVersionResource{ Group: "cert-manager.io", @@ -56,44 +56,44 @@ func ExtendCACertificate(mdb *mdbv1.MongoDBCommunity) func(*testing.T) { } payload, err := json.Marshal(patch) assert.NoError(t, err) - _, err = caCertificateClient.Patch(context.TODO(), "tls-selfsigned-ca", types.JSONPatchType, payload, metav1.PatchOptions{}) + _, err = caCertificateClient.Patch(ctx, "tls-selfsigned-ca", types.JSONPatchType, payload, metav1.PatchOptions{}) assert.NoError(t, err) } } -func RotateCertificate(mdb *mdbv1.MongoDBCommunity) func(*testing.T) { +func RotateCertificate(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { return func(t *testing.T) { certKeySecretName := mdb.TLSSecretNamespacedName() - rotateCertManagerSecret(certKeySecretName, t) + rotateCertManagerSecret(ctx, certKeySecretName, t) } } -func RotateAgentCertificate(mdb *mdbv1.MongoDBCommunity) func(*testing.T) { +func RotateAgentCertificate(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { return func(t *testing.T) { agentCertSecretName := mdb.AgentCertificateSecretNamespacedName() - rotateCertManagerSecret(agentCertSecretName, t) + rotateCertManagerSecret(ctx, agentCertSecretName, t) } } -func RotateCACertificate(mdb *mdbv1.MongoDBCommunity) func(*testing.T) { +func RotateCACertificate(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { return func(t *testing.T) { caCertSecretName := mdb.TLSCaCertificateSecretNamespacedName() - rotateCertManagerSecret(caCertSecretName, t) + rotateCertManagerSecret(ctx, caCertSecretName, t) } } -func rotateCertManagerSecret(secretName types.NamespacedName, t *testing.T) { +func rotateCertManagerSecret(ctx context.Context, secretName types.NamespacedName, t *testing.T) { currentSecret := corev1.Secret{} - err := e2eutil.TestClient.Get(context.TODO(), secretName, ¤tSecret) + err := e2eutil.TestClient.Get(ctx, secretName, ¤tSecret) assert.NoError(t, err) // delete current cert secret, cert-manager should generate a new one - err = e2eutil.TestClient.Delete(context.TODO(), ¤tSecret) + err = e2eutil.TestClient.Delete(ctx, ¤tSecret) assert.NoError(t, err) newSecret := corev1.Secret{} - err = wait.Poll(5*time.Second, 1*time.Minute, func() (done bool, err error) { - if err := e2eutil.TestClient.Get(context.TODO(), secretName, &newSecret); err != nil { + err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 1*time.Minute, false, func(ctx context.Context) (done bool, err error) { + if err := e2eutil.TestClient.Get(ctx, secretName, &newSecret); err != nil { return false, nil } return true, nil diff --git a/test/e2e/util/mongotester/mongotester.go b/test/e2e/util/mongotester/mongotester.go index f87d0d500..58ad54181 100644 --- a/test/e2e/util/mongotester/mongotester.go +++ b/test/e2e/util/mongotester/mongotester.go @@ -29,13 +29,15 @@ import ( ) type Tester struct { + ctx context.Context mongoClient *mongo.Client clientOpts []*options.ClientOptions resource *mdbv1.MongoDBCommunity } -func newTester(mdb *mdbv1.MongoDBCommunity, opts ...*options.ClientOptions) *Tester { +func newTester(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...*options.ClientOptions) *Tester { t := &Tester{ + ctx: ctx, resource: mdb, } t.clientOpts = append(t.clientOpts, opts...) @@ -51,7 +53,7 @@ type OptionApplier interface { // FromResource returns a Tester instance from a MongoDB resource. It infers SCRAM username/password // and the hosts from the resource. -func FromResource(t *testing.T, mdb mdbv1.MongoDBCommunity, opts ...OptionApplier) (*Tester, error) { +func FromResource(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity, opts ...OptionApplier) (*Tester, error) { var clientOpts []*options.ClientOptions clientOpts = WithHosts(mdb.Hosts("")).ApplyOption(clientOpts...) @@ -62,7 +64,7 @@ func FromResource(t *testing.T, mdb mdbv1.MongoDBCommunity, opts ...OptionApplie if len(users) == 1 { user := users[0] passwordSecret := corev1.Secret{} - err := e2eutil.TestClient.Get(context.TODO(), types.NamespacedName{Name: user.PasswordSecretRef.Name, Namespace: mdb.Namespace}, &passwordSecret) + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: user.PasswordSecretRef.Name, Namespace: mdb.Namespace}, &passwordSecret) if err != nil { return nil, err } @@ -75,10 +77,10 @@ func FromResource(t *testing.T, mdb mdbv1.MongoDBCommunity, opts ...OptionApplie clientOpts = opt.ApplyOption(clientOpts...) } - return newTester(&mdb, clientOpts...), nil + return newTester(ctx, &mdb, clientOpts...), nil } -func FromX509Resource(t *testing.T, mdb mdbv1.MongoDBCommunity, opts ...OptionApplier) (*Tester, error) { +func FromX509Resource(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity, opts ...OptionApplier) (*Tester, error) { var clientOpts []*options.ClientOptions clientOpts = WithHosts(mdb.Hosts("")).ApplyOption(clientOpts...) @@ -95,7 +97,7 @@ func FromX509Resource(t *testing.T, mdb mdbv1.MongoDBCommunity, opts ...OptionAp clientOpts = opt.ApplyOption(clientOpts...) } - return newTester(&mdb, clientOpts...), nil + return newTester(ctx, &mdb, clientOpts...), nil } // ConnectivitySucceeds performs a basic check that ensures that it is possible @@ -110,7 +112,7 @@ func (m *Tester) ConnectivityFails(opts ...OptionApplier) func(t *testing.T) { return m.connectivityCheck(false, opts...) } -func (m *Tester) ConnectivityRejected(opts ...OptionApplier) func(t *testing.T) { +func (m *Tester) ConnectivityRejected(ctx context.Context, opts ...OptionApplier) func(t *testing.T) { clientOpts := make([]*options.ClientOptions, 0) for _, optApplier := range opts { clientOpts = optApplier.ApplyOption(clientOpts...) @@ -122,7 +124,7 @@ func (m *Tester) ConnectivityRejected(opts ...OptionApplier) func(t *testing.T) t.Skip() } - if err := m.ensureClient(clientOpts...); err == nil { + if err := m.ensureClient(ctx, clientOpts...); err == nil { t.Fatalf("No error, but it should have failed") } } @@ -171,7 +173,7 @@ func (m *Tester) VerifyRoles(expectedRoles []automationconfig.CustomRole, tries return m.hasAdminCommandResult(func(t *testing.T) bool { var result CustomRolesResult err := m.mongoClient.Database("admin"). - RunCommand(context.TODO(), + RunCommand(m.ctx, bson.D{ {Key: "rolesInfo", Value: 1}, {Key: "showPrivileges", Value: true}, @@ -195,7 +197,7 @@ func (m *Tester) hasAdminCommandResult(verify verifyAdminResultFunc, tries int, } return func(t *testing.T) { - if err := m.ensureClient(clientOpts...); err != nil { + if err := m.ensureClient(m.ctx, clientOpts...); err != nil { t.Fatal(err) } @@ -216,7 +218,7 @@ func (m *Tester) hasAdminParameter(key string, expectedValue interface{}, tries return m.hasAdminCommandResult(func(t *testing.T) bool { var result map[string]interface{} err := m.mongoClient.Database("admin"). - RunCommand(context.TODO(), bson.D{{Key: "getParameter", Value: 1}, {Key: key, Value: 1}}). + RunCommand(m.ctx, bson.D{{Key: "getParameter", Value: 1}, {Key: key, Value: 1}}). Decode(&result) if err != nil { t.Logf("Unable to get admin setting %s with error : %s", key, err) @@ -244,16 +246,16 @@ func (m *Tester) connectivityCheck(shouldSucceed bool, opts ...OptionApplier) fu t.Skip() } - ctx, cancel := context.WithTimeout(context.Background(), connectivityOpts.ContextTimeout) + ctx, cancel := context.WithTimeout(m.ctx, connectivityOpts.ContextTimeout) defer cancel() - if err := m.ensureClient(clientOpts...); err != nil { + if err := m.ensureClient(ctx, clientOpts...); err != nil { t.Fatal(err) } attempts := 0 // There can be a short time before the user can auth as the user - err := wait.Poll(connectivityOpts.IntervalTime, connectivityOpts.TimeoutTime, func() (done bool, err error) { + err := wait.PollUntilContextTimeout(ctx, connectivityOpts.IntervalTime, connectivityOpts.TimeoutTime, false, func(ctx context.Context) (done bool, err error) { attempts++ collection := m.mongoClient.Database(connectivityOpts.Database).Collection(connectivityOpts.Collection) _, err = collection.InsertOne(ctx, bson.M{"name": "pi", "value": 3.14159}) @@ -280,7 +282,7 @@ func (m *Tester) connectivityCheck(shouldSucceed bool, opts ...OptionApplier) fu func (m *Tester) WaitForRotatedCertificate(mdb mdbv1.MongoDBCommunity, initialCertSerialNumber *big.Int) func(*testing.T) { return func(t *testing.T) { - tls, err := getClientTLSConfig(mdb) + tls, err := getClientTLSConfig(m.ctx, mdb) assert.NoError(t, err) // Reject all server certificates that don't have the expected serial number @@ -292,13 +294,13 @@ func (m *Tester) WaitForRotatedCertificate(mdb mdbv1.MongoDBCommunity, initialCe return nil } - if err := m.ensureClient(&options.ClientOptions{TLSConfig: tls}); err != nil { + if err := m.ensureClient(m.ctx, &options.ClientOptions{TLSConfig: tls}); err != nil { t.Fatal(err) } // Ping the cluster until it succeeds. The ping will only succeed with the right certificate. - err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) { - if err := m.mongoClient.Ping(context.TODO(), nil); err != nil { + err = wait.PollUntilContextTimeout(m.ctx, 5*time.Second, 5*time.Minute, false, func(ctx context.Context) (done bool, err error) { + if err := m.mongoClient.Ping(m.ctx, nil); err != nil { return false, nil } return true, nil @@ -307,14 +309,24 @@ func (m *Tester) WaitForRotatedCertificate(mdb mdbv1.MongoDBCommunity, initialCe } } +// EnsureMongodConfig is mostly used for checking port changes. Port changes take some until they finish. +// We cannot fully rely on the statefulset or resource being ready/running since it will change its state multiple +// times during a port change. That means a resource might leave, go into and leave running multiple times until +// it truly finished its port change. func (m *Tester) EnsureMongodConfig(selector string, expected interface{}) func(*testing.T) { return func(t *testing.T) { - opts, err := m.getCommandLineOptions() + connectivityOpts := defaults() + err := wait.PollUntilContextTimeout(m.ctx, connectivityOpts.IntervalTime, connectivityOpts.TimeoutTime, false, func(ctx context.Context) (done bool, err error) { + opts, err := m.getCommandLineOptions() + assert.NoError(t, err) + + parsed := objx.New(bsonToMap(opts)).Get("parsed").ObjxMap() + + return expected == parsed.Get(selector).Data(), nil + }) + assert.NoError(t, err) - // The options are stored under the key "parsed" - parsed := objx.New(bsonToMap(opts)).Get("parsed").ObjxMap() - assert.Equal(t, expected, parsed.Get(selector).Data()) } } @@ -324,7 +336,7 @@ func (m *Tester) getCommandLineOptions() (bson.M, error) { var result bson.M err := m.mongoClient. Database("admin"). - RunCommand(context.TODO(), bson.D{primitive.E{Key: "getCmdLineOpts", Value: 1}}). + RunCommand(m.ctx, bson.D{primitive.E{Key: "getCmdLineOpts", Value: 1}}). Decode(&result) return result, err @@ -347,7 +359,7 @@ func bsonToMap(m bson.M) map[string]interface{} { // StartBackgroundConnectivityTest starts periodically checking connectivity to the MongoDB deployment // with the defined interval. A cancel function is returned, which can be called to stop testing connectivity. func (m *Tester) StartBackgroundConnectivityTest(t *testing.T, interval time.Duration, opts ...OptionApplier) func() { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(m.ctx) t.Logf("Starting background connectivity test") // start a go routine which will periodically check basic MongoDB connectivity @@ -365,17 +377,17 @@ func (m *Tester) StartBackgroundConnectivityTest(t *testing.T, interval time.Dur return func() { cancel() if t != nil { - t.Log("Context cancelled, no longer checking connectivity") + t.Log("TestContext cancelled, no longer checking connectivity") } } } // ensureClient establishes a mongo client connection applying any addition // client options on top of what were provided at construction. -func (t *Tester) ensureClient(opts ...*options.ClientOptions) error { +func (t *Tester) ensureClient(ctx context.Context, opts ...*options.ClientOptions) error { allOpts := t.clientOpts allOpts = append(allOpts, opts...) - mongoClient, err := mongo.Connect(context.TODO(), allOpts...) + mongoClient, err := mongo.Connect(ctx, allOpts...) if err != nil { return err } @@ -396,7 +408,7 @@ func (m *Tester) PrometheusEndpointIsReachable(username, password string, useTls client := &http.Client{Transport: customTransport} return func(t *testing.T) { - _ = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { + _ = wait.PollUntilContextTimeout(m.ctx, 5*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { var idx int // Verify that the Prometheus port is enabled and responding with 200 @@ -497,8 +509,8 @@ func WithHosts(hosts []string) OptionApplier { } // WithTls configures the client to use tls -func WithTls(mdb mdbv1.MongoDBCommunity) OptionApplier { - tlsConfig, err := getClientTLSConfig(mdb) +func WithTls(ctx context.Context, mdb mdbv1.MongoDBCommunity) OptionApplier { + tlsConfig, err := getClientTLSConfig(ctx, mdb) if err != nil { panic(fmt.Errorf("could not retrieve TLS config: %s", err)) } @@ -540,10 +552,10 @@ func WithReplicaSet(rsname string) OptionApplier { } // getClientTLSConfig reads in the tls fixtures -func getClientTLSConfig(mdb mdbv1.MongoDBCommunity) (*tls.Config, error) { +func getClientTLSConfig(ctx context.Context, mdb mdbv1.MongoDBCommunity) (*tls.Config, error) { caSecret := corev1.Secret{} caSecretName := types.NamespacedName{Name: mdb.Spec.Security.TLS.CaCertificateSecret.Name, Namespace: mdb.Namespace} - if err := e2eutil.TestClient.Get(context.TODO(), caSecretName, &caSecret); err != nil { + if err := e2eutil.TestClient.Get(ctx, caSecretName, &caSecret); err != nil { return nil, err } caPEM := caSecret.Data["ca.crt"] @@ -556,10 +568,10 @@ func getClientTLSConfig(mdb mdbv1.MongoDBCommunity) (*tls.Config, error) { } // GetAgentCert reads the agent key certificate -func GetAgentCert(mdb mdbv1.MongoDBCommunity) (*x509.Certificate, error) { +func GetAgentCert(ctx context.Context, mdb mdbv1.MongoDBCommunity) (*x509.Certificate, error) { certSecret := corev1.Secret{} certSecretName := mdb.AgentCertificateSecretNamespacedName() - if err := e2eutil.TestClient.Get(context.TODO(), certSecretName, &certSecret); err != nil { + if err := e2eutil.TestClient.Get(ctx, certSecretName, &certSecret); err != nil { return nil, err } block, _ := pem.Decode(certSecret.Data["tls.crt"]) @@ -570,10 +582,10 @@ func GetAgentCert(mdb mdbv1.MongoDBCommunity) (*x509.Certificate, error) { } // GetClientCert reads the client key certificate -func GetClientCert(mdb mdbv1.MongoDBCommunity) (*x509.Certificate, error) { +func GetClientCert(ctx context.Context, mdb mdbv1.MongoDBCommunity) (*x509.Certificate, error) { certSecret := corev1.Secret{} certSecretName := types.NamespacedName{Name: mdb.Spec.Security.TLS.CertificateKeySecret.Name, Namespace: mdb.Namespace} - if err := e2eutil.TestClient.Get(context.TODO(), certSecretName, &certSecret); err != nil { + if err := e2eutil.TestClient.Get(ctx, certSecretName, &certSecret); err != nil { return nil, err } block, _ := pem.Decode(certSecret.Data["tls.crt"]) @@ -583,10 +595,10 @@ func GetClientCert(mdb mdbv1.MongoDBCommunity) (*x509.Certificate, error) { return x509.ParseCertificate(block.Bytes) } -func GetUserCert(mdb mdbv1.MongoDBCommunity, userCertSecret string) (string, error) { +func GetUserCert(ctx context.Context, mdb mdbv1.MongoDBCommunity, userCertSecret string) (string, error) { certSecret := corev1.Secret{} certSecretName := types.NamespacedName{Name: userCertSecret, Namespace: mdb.Namespace} - if err := e2eutil.TestClient.Get(context.TODO(), certSecretName, &certSecret); err != nil { + if err := e2eutil.TestClient.Get(ctx, certSecretName, &certSecret); err != nil { return "", err } crt, _ := pem.Decode(certSecret.Data["tls.crt"]) diff --git a/test/e2e/util/wait/wait.go b/test/e2e/util/wait/wait.go index 19ffc2b57..54798860e 100644 --- a/test/e2e/util/wait/wait.go +++ b/test/e2e/util/wait/wait.go @@ -26,39 +26,39 @@ const ( // ForConfigMapToExist waits until a ConfigMap of the given name exists // using the provided retryInterval and timeout -func ForConfigMapToExist(cmName string, retryInterval, timeout time.Duration) (corev1.ConfigMap, error) { +func ForConfigMapToExist(ctx context.Context, cmName string, retryInterval, timeout time.Duration) (corev1.ConfigMap, error) { cm := corev1.ConfigMap{} - return cm, waitForRuntimeObjectToExist(cmName, retryInterval, timeout, &cm, e2eutil.OperatorNamespace) + return cm, waitForRuntimeObjectToExist(ctx, cmName, retryInterval, timeout, &cm, e2eutil.OperatorNamespace) } // ForSecretToExist waits until a Secret of the given name exists // using the provided retryInterval and timeout -func ForSecretToExist(cmName string, retryInterval, timeout time.Duration, namespace string) (corev1.Secret, error) { +func ForSecretToExist(ctx context.Context, cmName string, retryInterval, timeout time.Duration, namespace string) (corev1.Secret, error) { s := corev1.Secret{} - return s, waitForRuntimeObjectToExist(cmName, retryInterval, timeout, &s, namespace) + return s, waitForRuntimeObjectToExist(ctx, cmName, retryInterval, timeout, &s, namespace) } // ForMongoDBToReachPhase waits until the given MongoDB resource reaches the expected phase -func ForMongoDBToReachPhase(t *testing.T, mdb *mdbv1.MongoDBCommunity, phase mdbv1.Phase, retryInterval, timeout time.Duration) error { - return waitForMongoDBCondition(mdb, retryInterval, timeout, func(db mdbv1.MongoDBCommunity) bool { +func ForMongoDBToReachPhase(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, phase mdbv1.Phase, retryInterval, timeout time.Duration) error { + return waitForMongoDBCondition(ctx, mdb, retryInterval, timeout, func(db mdbv1.MongoDBCommunity) bool { t.Logf("current phase: %s, waiting for phase: %s", db.Status.Phase, phase) return db.Status.Phase == phase }) } // ForMongoDBMessageStatus waits until the given MongoDB resource gets the expected message status -func ForMongoDBMessageStatus(t *testing.T, mdb *mdbv1.MongoDBCommunity, retryInterval, timeout time.Duration, message string) error { - return waitForMongoDBCondition(mdb, retryInterval, timeout, func(db mdbv1.MongoDBCommunity) bool { +func ForMongoDBMessageStatus(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, retryInterval, timeout time.Duration, message string) error { + return waitForMongoDBCondition(ctx, mdb, retryInterval, timeout, func(db mdbv1.MongoDBCommunity) bool { t.Logf("current message: %s, waiting for message: %s", db.Status.Message, message) return db.Status.Message == message }) } // waitForMongoDBCondition polls and waits for a given condition to be true -func waitForMongoDBCondition(mdb *mdbv1.MongoDBCommunity, retryInterval, timeout time.Duration, condition func(mdbv1.MongoDBCommunity) bool) error { +func waitForMongoDBCondition(ctx context.Context, mdb *mdbv1.MongoDBCommunity, retryInterval, timeout time.Duration, condition func(mdbv1.MongoDBCommunity) bool) error { mdbNew := mdbv1.MongoDBCommunity{} - return wait.Poll(retryInterval, timeout, func() (done bool, err error) { - err = e2eutil.TestClient.Get(context.TODO(), mdb.NamespacedName(), &mdbNew) + return wait.PollUntilContextTimeout(ctx, retryInterval, timeout, false, func(ctx context.Context) (done bool, err error) { + err = e2eutil.TestClient.Get(ctx, mdb.NamespacedName(), &mdbNew) if err != nil { return false, err } @@ -69,72 +69,72 @@ func waitForMongoDBCondition(mdb *mdbv1.MongoDBCommunity, retryInterval, timeout // ForDeploymentToExist waits until a Deployment of the given name exists // using the provided retryInterval and timeout -func ForDeploymentToExist(deployName string, retryInterval, timeout time.Duration, namespace string) (appsv1.Deployment, error) { +func ForDeploymentToExist(ctx context.Context, deployName string, retryInterval, timeout time.Duration, namespace string) (appsv1.Deployment, error) { deploy := appsv1.Deployment{} - return deploy, waitForRuntimeObjectToExist(deployName, retryInterval, timeout, &deploy, namespace) + return deploy, waitForRuntimeObjectToExist(ctx, deployName, retryInterval, timeout, &deploy, namespace) } // ForStatefulSetToExist waits until a StatefulSet of the given name exists // using the provided retryInterval and timeout -func ForStatefulSetToExist(stsName string, retryInterval, timeout time.Duration, namespace string) (appsv1.StatefulSet, error) { +func ForStatefulSetToExist(ctx context.Context, stsName string, retryInterval, timeout time.Duration, namespace string) (appsv1.StatefulSet, error) { sts := appsv1.StatefulSet{} - return sts, waitForRuntimeObjectToExist(stsName, retryInterval, timeout, &sts, namespace) + return sts, waitForRuntimeObjectToExist(ctx, stsName, retryInterval, timeout, &sts, namespace) } // ForStatefulSetToBeDeleted waits until a StatefulSet of the given name is deleted // using the provided retryInterval and timeout -func ForStatefulSetToBeDeleted(stsName string, retryInterval, timeout time.Duration, namespace string) error { +func ForStatefulSetToBeDeleted(ctx context.Context, stsName string, retryInterval, timeout time.Duration, namespace string) error { sts := appsv1.StatefulSet{} - return waitForRuntimeObjectToBeDeleted(stsName, retryInterval, timeout, &sts, namespace) + return waitForRuntimeObjectToBeDeleted(ctx, stsName, retryInterval, timeout, &sts, namespace) } // ForStatefulSetToHaveUpdateStrategy waits until all replicas of the StatefulSet with the given name // have reached the ready status -func ForStatefulSetToHaveUpdateStrategy(t *testing.T, mdb *mdbv1.MongoDBCommunity, strategy appsv1.StatefulSetUpdateStrategyType, opts ...Configuration) error { +func ForStatefulSetToHaveUpdateStrategy(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, strategy appsv1.StatefulSetUpdateStrategyType, opts ...Configuration) error { options := newOptions(opts...) - return waitForStatefulSetCondition(t, mdb, options, func(sts appsv1.StatefulSet) bool { + return waitForStatefulSetCondition(ctx, t, mdb, options, func(sts appsv1.StatefulSet) bool { return sts.Spec.UpdateStrategy.Type == strategy }) } // ForStatefulSetToBeReady waits until all replicas of the StatefulSet with the given name // have reached the ready status -func ForStatefulSetToBeReady(t *testing.T, mdb *mdbv1.MongoDBCommunity, opts ...Configuration) error { +func ForStatefulSetToBeReady(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, opts ...Configuration) error { options := newOptions(opts...) - return waitForStatefulSetCondition(t, mdb, options, func(sts appsv1.StatefulSet) bool { + return waitForStatefulSetCondition(ctx, t, mdb, options, func(sts appsv1.StatefulSet) bool { return statefulset.IsReady(sts, mdb.Spec.Members) }) } // ForStatefulSetToBeUnready waits until all replicas of the StatefulSet with the given name // is not ready. -func ForStatefulSetToBeUnready(t *testing.T, mdb *mdbv1.MongoDBCommunity, opts ...Configuration) error { +func ForStatefulSetToBeUnready(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, opts ...Configuration) error { options := newOptions(opts...) - return waitForStatefulSetCondition(t, mdb, options, func(sts appsv1.StatefulSet) bool { + return waitForStatefulSetCondition(ctx, t, mdb, options, func(sts appsv1.StatefulSet) bool { return !statefulset.IsReady(sts, mdb.Spec.Members) }) } // ForArbitersStatefulSetToBeReady waits until all replicas of the StatefulSet with the given name // have reached the ready status. -func ForArbitersStatefulSetToBeReady(t *testing.T, mdb *mdbv1.MongoDBCommunity, opts ...Configuration) error { +func ForArbitersStatefulSetToBeReady(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, opts ...Configuration) error { options := newOptions(opts...) - return waitForStatefulSetConditionWithSpecificSts(t, mdb, ArbitersStatefulSet, options, func(sts appsv1.StatefulSet) bool { + return waitForStatefulSetConditionWithSpecificSts(ctx, t, mdb, ArbitersStatefulSet, options, func(sts appsv1.StatefulSet) bool { return statefulset.IsReady(sts, mdb.Spec.Arbiters) }) } // ForStatefulSetToBeReadyAfterScaleDown waits for just the ready replicas to be correct // and does not account for the updated replicas -func ForStatefulSetToBeReadyAfterScaleDown(t *testing.T, mdb *mdbv1.MongoDBCommunity, opts ...Configuration) error { +func ForStatefulSetToBeReadyAfterScaleDown(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, opts ...Configuration) error { options := newOptions(opts...) - return waitForStatefulSetCondition(t, mdb, options, func(sts appsv1.StatefulSet) bool { + return waitForStatefulSetCondition(ctx, t, mdb, options, func(sts appsv1.StatefulSet) bool { return int32(mdb.Spec.Members) == sts.Status.ReadyReplicas }) } -func waitForStatefulSetConditionWithSpecificSts(t *testing.T, mdb *mdbv1.MongoDBCommunity, statefulSetType StatefulSetType, waitOpts Options, condition func(set appsv1.StatefulSet) bool) error { - _, err := ForStatefulSetToExist(mdb.Name, waitOpts.RetryInterval, waitOpts.Timeout, mdb.Namespace) +func waitForStatefulSetConditionWithSpecificSts(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, statefulSetType StatefulSetType, waitOpts Options, condition func(set appsv1.StatefulSet) bool) error { + _, err := ForStatefulSetToExist(ctx, mdb.Name, waitOpts.RetryInterval, waitOpts.Timeout, mdb.Namespace) if err != nil { return fmt.Errorf("error waiting for stateful set to be created: %s", err) } @@ -144,8 +144,8 @@ func waitForStatefulSetConditionWithSpecificSts(t *testing.T, mdb *mdbv1.MongoDB if statefulSetType == ArbitersStatefulSet { name = mdb.ArbiterNamespacedName() } - return wait.Poll(waitOpts.RetryInterval, waitOpts.Timeout, func() (done bool, err error) { - err = e2eutil.TestClient.Get(context.TODO(), name, &sts) + return wait.PollUntilContextTimeout(ctx, waitOpts.RetryInterval, waitOpts.Timeout, false, func(ctx context.Context) (done bool, err error) { + err = e2eutil.TestClient.Get(ctx, name, &sts) if err != nil { return false, err } @@ -156,19 +156,14 @@ func waitForStatefulSetConditionWithSpecificSts(t *testing.T, mdb *mdbv1.MongoDB }) } -func waitForStatefulSetCondition(t *testing.T, mdb *mdbv1.MongoDBCommunity, waitOpts Options, condition func(set appsv1.StatefulSet) bool) error { +func waitForStatefulSetCondition(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, waitOpts Options, condition func(set appsv1.StatefulSet) bool) error { // uses members statefulset - return waitForStatefulSetConditionWithSpecificSts(t, mdb, MembersStatefulSet, waitOpts, condition) + return waitForStatefulSetConditionWithSpecificSts(ctx, t, mdb, MembersStatefulSet, waitOpts, condition) } -func waitForStatefulSetConditionArbiters(t *testing.T, mdb *mdbv1.MongoDBCommunity, waitOpts Options, condition func(set appsv1.StatefulSet) bool) error { - // uses members statefulset - return waitForStatefulSetConditionWithSpecificSts(t, mdb, ArbitersStatefulSet, waitOpts, condition) -} - -func ForPodReadiness(t *testing.T, isReady bool, containerName string, timeout time.Duration, pod corev1.Pod) error { - return wait.Poll(time.Second*3, timeout, func() (done bool, err error) { - err = e2eutil.TestClient.Get(context.TODO(), types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &pod) +func ForPodReadiness(ctx context.Context, t *testing.T, isReady bool, containerName string, timeout time.Duration, pod corev1.Pod) error { + return wait.PollUntilContextTimeout(ctx, time.Second*3, timeout, false, func(ctx context.Context) (done bool, err error) { + err = e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &pod) if err != nil { return false, err } @@ -182,9 +177,9 @@ func ForPodReadiness(t *testing.T, isReady bool, containerName string, timeout t }) } -func ForPodPhase(t *testing.T, timeout time.Duration, pod corev1.Pod, podPhase corev1.PodPhase) error { - return wait.Poll(time.Second*3, timeout, func() (done bool, err error) { - err = e2eutil.TestClient.Get(context.TODO(), types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &pod) +func ForPodPhase(ctx context.Context, t *testing.T, timeout time.Duration, pod corev1.Pod, podPhase corev1.PodPhase) error { + return wait.PollUntilContextTimeout(ctx, time.Second*3, timeout, false, func(ctx context.Context) (done bool, err error) { + err = e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &pod) if err != nil { return false, err } @@ -195,24 +190,24 @@ func ForPodPhase(t *testing.T, timeout time.Duration, pod corev1.Pod, podPhase c // waitForRuntimeObjectToExist waits until a runtime.Object of the given name exists // using the provided retryInterval and timeout provided. -func waitForRuntimeObjectToExist(name string, retryInterval, timeout time.Duration, obj client.Object, namespace string) error { - return wait.Poll(retryInterval, timeout, func() (done bool, err error) { - return runtimeObjectExists(name, obj, namespace) +func waitForRuntimeObjectToExist(ctx context.Context, name string, retryInterval, timeout time.Duration, obj client.Object, namespace string) error { + return wait.PollUntilContextTimeout(ctx, retryInterval, timeout, false, func(ctx context.Context) (done bool, err error) { + return runtimeObjectExists(ctx, name, obj, namespace) }) } // waitForRuntimeObjectToBeDeleted waits until a runtime.Object of the given name is deleted // using the provided retryInterval and timeout provided. -func waitForRuntimeObjectToBeDeleted(name string, retryInterval, timeout time.Duration, obj client.Object, namespace string) error { - return wait.Poll(retryInterval, timeout, func() (done bool, err error) { - exists, err := runtimeObjectExists(name, obj, namespace) +func waitForRuntimeObjectToBeDeleted(ctx context.Context, name string, retryInterval, timeout time.Duration, obj client.Object, namespace string) error { + return wait.PollUntilContextTimeout(ctx, retryInterval, timeout, false, func(ctx context.Context) (done bool, err error) { + exists, err := runtimeObjectExists(ctx, name, obj, namespace) return !exists, err }) } // runtimeObjectExists checks if a runtime.Object of the given name exists -func runtimeObjectExists(name string, obj client.Object, namespace string) (bool, error) { - err := e2eutil.TestClient.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, obj) +func runtimeObjectExists(ctx context.Context, name string, obj client.Object, namespace string) (bool, error) { + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, obj) if err != nil { return false, client.IgnoreNotFound(err) } diff --git a/test/test-app/requirements.txt b/test/test-app/requirements.txt index cbd93a746..c4165c3f4 100644 --- a/test/test-app/requirements.txt +++ b/test/test-app/requirements.txt @@ -1,2 +1,2 @@ -PyMongo==4.0.1 -dnspython==2.2.0 +PyMongo==4.6.3 +dnspython==2.6.1 diff --git a/testbin/setup-envtest.sh b/testbin/setup-envtest.sh deleted file mode 100644 index 5fc471bc0..000000000 --- a/testbin/setup-envtest.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2020 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o pipefail - -# Turn colors in this script off by setting the NO_COLOR variable in your -# environment to any value: -# -# $ NO_COLOR=1 test.sh -NO_COLOR=${NO_COLOR:-""} -if [ -z "$NO_COLOR" ]; then - header=$'\e[1;33m' - reset=$'\e[0m' -else - header='' - reset='' -fi - -function header_text { - echo "$header$*$reset" -} - -function setup_envtest_env { - header_text "setting up env vars" - - # Setup env vars - KUBEBUILDER_ASSETS=${KUBEBUILDER_ASSETS:-""} - if [[ -z "${KUBEBUILDER_ASSETS}" ]]; then - export KUBEBUILDER_ASSETS=$1/bin - fi -} - -# fetch k8s API gen tools and make it available under envtest_root_dir/bin. -# -# Skip fetching and untaring the tools by setting the SKIP_FETCH_TOOLS variable -# in your environment to any value: -# -# $ SKIP_FETCH_TOOLS=1 ./check-everything.sh -# -# If you skip fetching tools, this script will use the tools already on your -# machine. -function fetch_envtest_tools { - SKIP_FETCH_TOOLS=${SKIP_FETCH_TOOLS:-""} - if [ -n "$SKIP_FETCH_TOOLS" ]; then - return 0 - fi - - tmp_root=/tmp - - k8s_version="${ENVTEST_K8S_VERSION:-1.19.2}" - goarch="$(go env GOARCH)" - goos="$(go env GOOS)" - - if [[ "$goos" != "linux" && "$goos" != "darwin" ]]; then - echo "OS '$goos' not supported. Aborting." >&2 - return 1 - fi - - local dest_dir="${1}" - - # use the pre-existing version in the temporary folder if it matches our k8s version - if [[ -x "${dest_dir}/bin/kube-apiserver" ]]; then - version=$("${dest_dir}"/bin/kube-apiserver --version) - if [[ $version == *"${k8s_version}"* ]]; then - header_text "Using cached envtest tools from ${dest_dir}" - return 0 - fi - fi - - header_text "fetching envtest tools@${k8s_version} (into '${dest_dir}')" - envtest_tools_archive_name="kubebuilder-tools-$k8s_version-$goos-$goarch.tar.gz" - envtest_tools_download_url="https://storage.googleapis.com/kubebuilder-tools/$envtest_tools_archive_name" - - envtest_tools_archive_path="$tmp_root/$envtest_tools_archive_name" - if [ ! -f "$envtest_tools_archive_path" ]; then - curl -sL "${envtest_tools_download_url}" -o "$envtest_tools_archive_path" - fi - - mkdir -p "${dest_dir}" - tar -C "${dest_dir}" --strip-components=1 -zvxf "$envtest_tools_archive_path" -}