diff --git a/.action_templates/e2e-fork-template.yaml b/.action_templates/e2e-fork-template.yaml new file mode 100644 index 000000000..c6378cceb --- /dev/null +++ b/.action_templates/e2e-fork-template.yaml @@ -0,0 +1,27 @@ +name: Run E2E Fork +jobs: + - template: display-github-context + - template: setup + # dependabot gets a read only github token, and so must use pull_request_target instead of pull_request. + if: contains(github.event.pull_request.labels.*.name, 'dependencies') || contains(github.event.pull_request.labels.*.name, 'safe-to-test') + steps: + - template: cancel-previous + - template: checkout-fork + - template: setup-and-install-python + - template: quay-login + - template: set-up-qemu + - template: build-and-push-development-images + - template: tests + steps: + - template: cancel-previous + - template: checkout-fork + - template: set-run-status + - template: setup-and-install-python + - template: setup-kind-cluster + if: steps.last_run_status.outputs.last_run_status != 'success' + - template: run-test-matrix + - template: save-run-status + - template: dump-and-upload-diagnostics + +events: + - template: pull-request-target diff --git a/.action_templates/e2e-pr-template.yaml b/.action_templates/e2e-pr-template.yaml new file mode 100644 index 000000000..8c4e79d14 --- /dev/null +++ b/.action_templates/e2e-pr-template.yaml @@ -0,0 +1,29 @@ +name: Run E2E +jobs: + - template: display-github-context + - template: setup + # run on master, or if a PR is being created from a branch, or if it has been manually triggered. + if: github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' || (github.event.pull_request.head.repo.full_name == github.repository && github.actor != 'dependabot[bot]') + steps: + - template: cancel-previous + - template: checkout + - template: setup-and-install-python + - template: quay-login + - template: set-up-qemu + - template: build-and-push-development-images + - template: tests + steps: + - template: cancel-previous + - template: checkout + - template: set-run-status + - template: setup-and-install-python + - template: setup-kind-cluster + if: steps.last_run_status.outputs.last_run_status != 'success' + - template: run-test-matrix + - template: save-run-status + - template: dump-and-upload-diagnostics + +events: + - template: on-pull-request-master + - template: on-push-master + - template: workflow-dispatch diff --git a/.action_templates/e2e-single-template.yaml b/.action_templates/e2e-single-template.yaml new file mode 100644 index 000000000..36e586af3 --- /dev/null +++ b/.action_templates/e2e-single-template.yaml @@ -0,0 +1,20 @@ +name: Run Single E2E +jobs: + - template: display-github-context + - template: setup + steps: + - template: checkout + - template: setup-and-install-python + - template: quay-login + - template: set-up-qemu + - template: build-and-push-development-images + - template: single-test + steps: + - template: checkout + - template: setup-and-install-python + - template: setup-kind-cluster + - template: run-test-single + - template: dump-and-upload-diagnostics-always + +events: + - template: single-e2e-workflow-dispatch diff --git a/.action_templates/events/on-pull-request-master.yaml b/.action_templates/events/on-pull-request-master.yaml new file mode 100644 index 000000000..9107a3d91 --- /dev/null +++ b/.action_templates/events/on-pull-request-master.yaml @@ -0,0 +1,5 @@ +pull_request: + branches: + - master + paths-ignore: + - 'docs/**' diff --git a/.action_templates/events/on-push-master.yaml b/.action_templates/events/on-push-master.yaml new file mode 100644 index 000000000..844e045c3 --- /dev/null +++ b/.action_templates/events/on-push-master.yaml @@ -0,0 +1,5 @@ +push: + branches: + - master + paths-ignore: + - 'docs/**' diff --git a/.action_templates/events/pull-request-target.yaml b/.action_templates/events/pull-request-target.yaml new file mode 100644 index 000000000..1e7743cd8 --- /dev/null +++ b/.action_templates/events/pull-request-target.yaml @@ -0,0 +1,7 @@ +# pull_request_target means that the secrets of this repo will be used. +pull_request_target: + types: [labeled] + branches: + - master + paths-ignore: + - 'docs/**' diff --git a/.action_templates/events/single-e2e-workflow-dispatch.yaml b/.action_templates/events/single-e2e-workflow-dispatch.yaml new file mode 100644 index 000000000..01cc9fcae --- /dev/null +++ b/.action_templates/events/single-e2e-workflow-dispatch.yaml @@ -0,0 +1,13 @@ +workflow_dispatch: + inputs: + distro: + description: 'Distro to run test' + required: true + default: "ubuntu" + test-name: + description: 'Name of test to run' + required: true + cluster-wide: + description: 'Whether or not the test is cluster wide' + required: true + default: "false" diff --git a/.action_templates/events/workflow-dispatch.yaml b/.action_templates/events/workflow-dispatch.yaml new file mode 100644 index 000000000..5de950ef3 --- /dev/null +++ b/.action_templates/events/workflow-dispatch.yaml @@ -0,0 +1 @@ +workflow_dispatch: {} diff --git a/.action_templates/jobs/display-github-context.yaml b/.action_templates/jobs/display-github-context.yaml new file mode 100644 index 000000000..37ecb1972 --- /dev/null +++ b/.action_templates/jobs/display-github-context.yaml @@ -0,0 +1,8 @@ +action-context: + if: always() + runs-on: ubuntu-latest + steps: + - name: Dump GitHub context + env: + GITHUB_CONTEXT: ${{ toJSON(github) }} + run: echo "$GITHUB_CONTEXT" diff --git a/.action_templates/jobs/setup.yaml b/.action_templates/jobs/setup.yaml new file mode 100644 index 000000000..ad46dc26d --- /dev/null +++ b/.action_templates/jobs/setup.yaml @@ -0,0 +1,11 @@ +setup: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - pipeline-argument: operator + - pipeline-argument: version-upgrade-hook + - pipeline-argument: readiness-probe + - pipeline-argument: agent + - pipeline-argument: e2e diff --git a/.action_templates/jobs/single-test.yaml b/.action_templates/jobs/single-test.yaml new file mode 100644 index 000000000..b06a8a918 --- /dev/null +++ b/.action_templates/jobs/single-test.yaml @@ -0,0 +1,3 @@ +single-test: + runs-on: ubuntu-latest + needs: [setup] diff --git a/.action_templates/jobs/tests.yaml b/.action_templates/jobs/tests.yaml new file mode 100644 index 000000000..f360ee3d6 --- /dev/null +++ b/.action_templates/jobs/tests.yaml @@ -0,0 +1,68 @@ +tests: + runs-on: ubuntu-latest + needs: [setup] + strategy: + fail-fast: false + matrix: + include: + - test-name: replica_set + distro: ubi + - test-name: replica_set_enterprise_upgrade_4_5 + distro: ubi + - test-name: replica_set_enterprise_upgrade_5_6 + distro: ubi + - test-name: replica_set_enterprise_upgrade_6_7 + distro: ubi + - test-name: replica_set_enterprise_upgrade_7_8 + distro: ubi + - test-name: replica_set_recovery + distro: ubi + - test-name: replica_set_mongod_readiness + distro: ubi + - test-name: replica_set_scale + distro: ubi + - test-name: replica_set_scale_down + distro: ubi + - test-name: replica_set_change_version + distro: ubi + - test-name: feature_compatibility_version + distro: ubi + - test-name: prometheus + distro: ubi + - test-name: replica_set_tls + distro: ubi + - test-name: replica_set_tls_recreate_mdbc + distro: ubi + - test-name: replica_set_tls_rotate + distro: ubi + - test-name: replica_set_tls_rotate_delete_sts + distro: ubi + - test-name: replica_set_tls_upgrade + distro: ubi + - test-name: statefulset_arbitrary_config + distro: ubi + - test-name: statefulset_arbitrary_config_update + distro: ubi + - test-name: replica_set_mongod_config + distro: ubi + - test-name: replica_set_cross_namespace_deploy + distro: ubi + cluster-wide: true + - test-name: replica_set_custom_role + distro: ubi + - test-name: replica_set_arbiter + distro: ubi + - test-name: replica_set_custom_persistent_volume + distro: ubi + - test-name: replica_set_mount_connection_string + distro: ubi + - test-name: replica_set_mongod_port_change_with_arbiters + distro: ubi + - test-name: replica_set_operator_upgrade + distro: ubi + - test-name: replica_set_connection_string_options + distro: ubi + - test-name: replica_set_x509 + distro: ubi + - test-name: replica_set_remove_user + distro: ubi diff --git a/.action_templates/steps/build-and-push-development-images.yaml b/.action_templates/steps/build-and-push-development-images.yaml new file mode 100644 index 000000000..4fe3df401 --- /dev/null +++ b/.action_templates/steps/build-and-push-development-images.yaml @@ -0,0 +1,6 @@ +- name: Build and Push Images + run: | + python pipeline.py --image-name ${{ matrix.pipeline-argument }} --tag ${{ github.run_id }} + env: + MONGODB_COMMUNITY_CONFIG: "${{ github.workspace }}/scripts/ci/config.json" + version_id: "${{ github.run_id }}" diff --git a/.action_templates/steps/cancel-previous.yaml b/.action_templates/steps/cancel-previous.yaml new file mode 100644 index 000000000..301d5af50 --- /dev/null +++ b/.action_templates/steps/cancel-previous.yaml @@ -0,0 +1,4 @@ +- name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.12.1 + with: + access_token: ${{ github.token }} diff --git a/.action_templates/steps/checkout-fork.yaml b/.action_templates/steps/checkout-fork.yaml new file mode 100644 index 000000000..abd35041c --- /dev/null +++ b/.action_templates/steps/checkout-fork.yaml @@ -0,0 +1,9 @@ +# We checkout the forked repository code. +# Because we are using pull_request_target the Github Secrets will be passed +# So code should be reviewed before labeling as "safe-to-test" +- name: Checkout Code + uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.sha}} + repository: ${{github.event.pull_request.head.repo.full_name}} + submodules: true diff --git a/.action_templates/steps/checkout.yaml b/.action_templates/steps/checkout.yaml new file mode 100644 index 000000000..da02fc2f3 --- /dev/null +++ b/.action_templates/steps/checkout.yaml @@ -0,0 +1,4 @@ +- name: Checkout Code + uses: actions/checkout@v4 + with: + submodules: true diff --git a/.action_templates/steps/dump-and-upload-diagnostics-always.yaml b/.action_templates/steps/dump-and-upload-diagnostics-always.yaml new file mode 100644 index 000000000..968ecd9ce --- /dev/null +++ b/.action_templates/steps/dump-and-upload-diagnostics-always.yaml @@ -0,0 +1,12 @@ +- name: Dump Diagnostics + if: always() + continue-on-error: true + run: scripts/ci/dump_diagnostics.sh default # default since kind is running in the default namespace + +- name: Upload Diagnostics + if: always() + uses: actions/upload-artifact@v4 + continue-on-error: true + with: + name: "${{ github.event.inputs.test-name }}-${{ github.event.inputs.distro }}-diagnostics" + path: "${{ github.workspace }}/diagnostics" diff --git a/.action_templates/steps/dump-and-upload-diagnostics.yaml b/.action_templates/steps/dump-and-upload-diagnostics.yaml new file mode 100644 index 000000000..17f5d2688 --- /dev/null +++ b/.action_templates/steps/dump-and-upload-diagnostics.yaml @@ -0,0 +1,13 @@ +- name: Dump Diagnostics + id: dump_diagnostics + if: always() && steps.e2e_test.outcome == 'failure' + continue-on-error: true + run: scripts/ci/dump_diagnostics.sh default # default since kind is running in the default namespace + +- name: Upload Diagnostics + if: always() && steps.dump_diagnostics.outcome == 'success' + uses: actions/upload-artifact@v4 + continue-on-error: true + with: + name: "${{ matrix.test-name }}-${{ matrix.distro }}-diagnostics" + path: "${{ github.workspace }}/diagnostics" diff --git a/.action_templates/steps/quay-login.yaml b/.action_templates/steps/quay-login.yaml new file mode 100644 index 000000000..77a8dd06f --- /dev/null +++ b/.action_templates/steps/quay-login.yaml @@ -0,0 +1,6 @@ +- name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_ROBOT_TOKEN }} diff --git a/.action_templates/steps/run-test-matrix.yaml b/.action_templates/steps/run-test-matrix.yaml new file mode 100644 index 000000000..9c572a89c --- /dev/null +++ b/.action_templates/steps/run-test-matrix.yaml @@ -0,0 +1,9 @@ +- name: Run Test + id: e2e_test + if: steps.last_run_status.outputs.last_run_status != 'success' + run: | + cluster_wide=${{ matrix.cluster-wide }} + if [ -z "$cluster_wide" ]; then + cluster_wide="false" + fi + python3 ./scripts/dev/e2e.py --test ${{ matrix.test-name }} --tag ${{ github.run_id }} --config_file ./scripts/ci/config.json --distro ${{ matrix.distro }} --cluster-wide ${cluster_wide} diff --git a/.action_templates/steps/run-test-single.yaml b/.action_templates/steps/run-test-single.yaml new file mode 100644 index 000000000..453425961 --- /dev/null +++ b/.action_templates/steps/run-test-single.yaml @@ -0,0 +1,3 @@ +- name: Run Test Single + run: | + python3 ./scripts/dev/e2e.py --test ${{ github.event.inputs.test-name }} --tag ${{ github.run_id }} --config_file ./scripts/ci/config.json --distro ${{ github.event.inputs.distro }} --cluster-wide ${{ github.event.inputs.cluster-wide }} diff --git a/.action_templates/steps/save-run-status.yaml b/.action_templates/steps/save-run-status.yaml new file mode 100644 index 000000000..84845013b --- /dev/null +++ b/.action_templates/steps/save-run-status.yaml @@ -0,0 +1,3 @@ +- name: Save run status + if: always() + run: echo "::set-output name=last_run_status::${{ steps.e2e_test.outcome }}" > last_run_status diff --git a/.action_templates/steps/set-run-status.yaml b/.action_templates/steps/set-run-status.yaml new file mode 100644 index 000000000..9f4a76541 --- /dev/null +++ b/.action_templates/steps/set-run-status.yaml @@ -0,0 +1,17 @@ +- name: Set default run status + run: echo "::set-output name=last_run_status::pending" > last_run_status + + # Tracking of the state of the previous test run is a workaround to the fact that it is not + # possible to re-run a single failed job, only re-running the entire workflow is currently possible. + # This workaround skips jobs if they have already passed. + # see https://github.com/actions/runner/issues/432 +- name: Restore last run status + id: last_run + uses: actions/cache@v4 + with: + path: last_run_status + key: ${{ github.run_id }}-${{ matrix.test-name }}-${{ matrix.distro }} + +- name: Set last run status + id: last_run_status + run: cat last_run_status diff --git a/.action_templates/steps/set-up-qemu.yaml b/.action_templates/steps/set-up-qemu.yaml new file mode 100644 index 000000000..c84384bfc --- /dev/null +++ b/.action_templates/steps/set-up-qemu.yaml @@ -0,0 +1,2 @@ +- name: Set up QEMU + uses: docker/setup-qemu-action@v3 diff --git a/.action_templates/steps/setup-and-install-python.yaml b/.action_templates/steps/setup-and-install-python.yaml new file mode 100644 index 000000000..b924e01ae --- /dev/null +++ b/.action_templates/steps/setup-and-install-python.yaml @@ -0,0 +1,11 @@ +- name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.10.4' +- name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} +- name: Install Python Dependencies + run: pip install -r requirements.txt diff --git a/.action_templates/steps/setup-kind-cluster.yaml b/.action_templates/steps/setup-kind-cluster.yaml new file mode 100644 index 000000000..b17558382 --- /dev/null +++ b/.action_templates/steps/setup-kind-cluster.yaml @@ -0,0 +1,11 @@ +- name: Setup Kind Cluster + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 + chmod +x ./kind + ./kind create cluster +- name: Create Directories + run: | + docker exec kind-control-plane mkdir -p /opt/data/mongo-data-0 /opt/data/mongo-data-1 /opt/data/mongo-data-2 /opt/data/mongo-logs-0 /opt/data/mongo-logs-1 /opt/data/mongo-logs-2 + +- name: Install CRD + run: kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml diff --git a/.dockerignore b/.dockerignore index 2e207bdcd..4d9a61d4f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,8 +1,13 @@ .github .idea -agent -build/_output zz_* -scripts vendor/ +scripts/ .git/ +bin/ +testbin/ +.mypy_cache/ +main +__debug_bin +# allow agent LICENSE +!scripts/dev/templates/agent/LICENSE diff --git a/.evergreen.yml b/.evergreen.yml deleted file mode 100644 index 2ee7c863d..000000000 --- a/.evergreen.yml +++ /dev/null @@ -1,184 +0,0 @@ -ignore: - - "*.md" - -functions: - clone: - - command: subprocess.exec - type: setup - params: - command: "mkdir -p mongodb-kubernetes-operator" - - command: git.get_project - type: setup - params: - directory: mongodb-kubernetes-operator - - go_test: - - command: subprocess.exec - type: test - params: - include_expansions_in_env: - - version_id - working_dir: mongodb-kubernetes-operator - binary: scripts/ci/run_unit_tests.sh - - setup_operator_sdk: - - command: subprocess.exec - type: setup - params: - working_dir: mongodb-kubernetes-operator/scripts/ci - command: go run download.go - env: - URL: https://github.com/operator-framework/operator-sdk/releases/download/v0.15.1/operator-sdk-v0.15.1-x86_64-linux-gnu - FILENAME: operator-sdk - DIR: ${workdir}/bin - - setup_kubernetes_environment: - - command: subprocess.exec - type: setup - params: - working_dir: mongodb-kubernetes-operator/scripts/ci - command: go run download.go - env: - URL: https://storage.googleapis.com/kubernetes-release/release/v1.15.4/bin/linux/amd64/kubectl - FILENAME: kubectl - DIR: ${workdir}/bin - - - command: subprocess.exec - type: setup - params: - working_dir: mongodb-kubernetes-operator/scripts/ci - command: go run download.go - env: - URL: https://github.com/kubernetes-sigs/kind/releases/download/v0.7.0/kind-linux-amd64 - FILENAME: kind - DIR: ${workdir}/bin - - - command: subprocess.exec - type: setup - params: - add_to_path: - - ${workdir}/bin - working_dir: mongodb-kubernetes-operator - binary: scripts/ci/create_kind_cluster.sh - env: - KUBECONFIG: ${workdir}/kube_config - - run_e2e_test: - - command: subprocess.exec - type: test - params: - add_to_path: - - ${workdir}/bin - working_dir: mongodb-kubernetes-operator - include_expansions_in_env: - - version_id - - test - binary: scripts/ci/run_test.sh - env: - KUBECONFIG: ${workdir}/kube_config - - build_and_push_image: - - command: subprocess.exec - type: setup - params: - include_expansions_in_env: - - version_id - - quay_user_name - - quay_password - - image - - image_type - working_dir: mongodb-kubernetes-operator - binary: scripts/ci/build_and_push_image.sh - -tasks: - - name: build_operator_image - priority: 60 - exec_timeout_secs: 600 - commands: - - func: clone - - func: build_and_push_image - vars: - image_type: operator - image: quay.io/mongodb/community-operator-dev:${version_id} - - - name: build_e2e_image - priority: 60 - exec_timeout_secs: 600 - commands: - - func: clone - - func: build_and_push_image - vars: - image: quay.io/mongodb/community-operator-e2e:${version_id} - image_type: e2e - - - name: build_testrunner_image - priority: 60 - exec_timeout_secs: 600 - commands: - - func: clone - - func: build_and_push_image - vars: - image: quay.io/mongodb/community-operator-testrunner:${version_id} - image_type: testrunner - - - name: unit_tests - commands: - - func: clone - - func: go_test - - - name: e2e_test_replica_set - commands: - - func: clone - - func: setup_kubernetes_environment - - func: run_e2e_test - vars: - test: replica_set - - - name: e2e_test_replica_set_readiness_probe - commands: - - func: clone - - func: setup_kubernetes_environment - - func: run_e2e_test - vars: - test: replica_set_readiness_probe - - - name: e2e_test_replica_set_scale - commands: - - func: clone - - func: setup_kubernetes_environment - - func: run_e2e_test - vars: - test: replica_set_scale - -buildvariants: - - name: go_unit_tests - display_name: go_unit_tests - run_on: - - ubuntu1604-build - tasks: - - name: unit_tests - - - name: e2e_tests - display_name: e2e_tests - run_on: - - ubuntu1604-build - depends_on: - - name: build_operator_image - variant: init_test_run - - name: build_e2e_image - variant: init_test_run - - name: build_testrunner_image - variant: init_test_run - tasks: - - name: e2e_test_replica_set - - name: e2e_test_replica_set_readiness_probe - - name: e2e_test_replica_set_scale - - - name: init_test_run - display_name: init_test_run - run_on: - - ubuntu1604-build - tasks: - - name: build_operator_image - - name: build_e2e_image - - name: build_testrunner_image diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..db61cf612 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @mircea-cosbuc @lsierant @nammn @Julien-Ben @MaciejKaras @lucian-tosa @fealebenpae @m1kola \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..08b2b00ab --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,64 @@ +--- +name: Bug report +about: File a report about a problem with the Operator +title: '' +labels: '' +assignees: '' + +--- +**What did you do to encounter the bug?** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**What did you expect?** +A clear and concise description of what you expected to happen. + +**What happened instead?** +A clear and concise description of what happened instead + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Operator Information** + - Operator Version + - MongoDB Image used + +**Kubernetes Cluster Information** + - Distribution + - Version + - Image Registry location (quay, or an internal registry) + +**Additional context** +Add any other context about the problem here. + +If possible, please include: + - The operator logs + - Below we assume that your replicaset database pods are named `mongo-<>`. For instance: +``` +❯ k get pods +NAME READY STATUS RESTARTS AGE +mongo-0 2/2 Running 0 19h +mongo-1 2/2 Running 0 19h + +❯ k get mdbc +NAME PHASE VERSION +mongo Running 4.4.0 +``` + - yaml definitions of your MongoDB Deployment(s): + - `kubectl get mdbc -oyaml` + - yaml definitions of your kubernetes objects like the statefulset(s), pods (we need to see the state of the containers): + - `kubectl get sts -oyaml` + - `kubectl get pods -oyaml` + - The Pod logs: + - `kubectl logs mongo-0` + - The agent clusterconfig of the faulty members: + - `kubectl exec -it mongo-0 -c mongodb-agent -- cat /var/lib/automation/config/cluster-config.json` + - The agent health status of the faulty members: + - `kubectl exec -it mongo-0 -c mongodb-agent -- cat /var/log/mongodb-mms-automation/healthstatus/agent-health-status.json` + - The verbose agent logs of the faulty members: + - `kubectl exec -it mongo-0 -c mongodb-agent -- cat /var/log/mongodb-mms-automation/automation-agent-verbose.log` + - You might not have the verbose ones, in that case the non-verbose agent logs: + - `kubectl exec -it mongo-0 -c mongodb-agent -- cat /var/log/mongodb-mms-automation/automation-agent.log` diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..5c249255a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: MongoDB Feedback + url: https://feedback.mongodb.com/forums/924355-ops-tools + about: Use our Feedback page for making feature requests. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index f77733901..650880d32 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,3 +1,12 @@ +### Summary: + + ### All Submissions: * [ ] Have you opened an Issue before filing this PR? diff --git a/.github/config_files/config_lint.yaml b/.github/config_files/config_lint.yaml new file mode 100644 index 000000000..435bc8a7b --- /dev/null +++ b/.github/config_files/config_lint.yaml @@ -0,0 +1,14 @@ +checks: + addAllBuiltIn: true + +#Reasons to exclude: + # non-existent-service-account because the service account is created in another file + # minimum-three-replicas because the deployment contains only 1 replica of the operator + # no-readiness-probe & no-liveness-probe because for now, it brings nothing to add these probes + # because they will not check whether the operator is actually ready/living + exclude: + - "non-existent-service-account" + - "minimum-three-replicas" + - "no-liveness-probe" + - "no-readiness-probe" + - "use-namespace" diff --git a/.github/config_files/config_lint_clusterwide.yaml b/.github/config_files/config_lint_clusterwide.yaml new file mode 100644 index 000000000..b69b5147d --- /dev/null +++ b/.github/config_files/config_lint_clusterwide.yaml @@ -0,0 +1,18 @@ +checks: + addAllBuiltIn: true + +#Reasons to exclude: + # non-existent-service-account because the service account is created in another file + # minimum-three-replicas because the deployment contains only 1 replica of the operator + # no-readiness-probe & no-liveness-probe because for now, it brings nothing to add these probes + # because they will not check whether the operator is actually ready/living. + # When using a clusterwide operator, it is required to be able to create StatefulSets and Secrets + # so we exclude "access-to-secrets" and "access-to-create-pods" + exclude: + - "non-existent-service-account" + - "minimum-three-replicas" + - "no-liveness-probe" + - "no-readiness-probe" + - "use-namespace" + - "access-to-secrets" + - "access-to-create-pods" diff --git a/.github/config_files/config_lint_openshift.yaml b/.github/config_files/config_lint_openshift.yaml new file mode 100644 index 000000000..34ff6e440 --- /dev/null +++ b/.github/config_files/config_lint_openshift.yaml @@ -0,0 +1,17 @@ +checks: + addAllBuiltIn: true + + #Reasons to exclude + # non-existent-service-account because the service account is created in another file + # minimum-three-replicas because the deployment contains only 1 replica of the operator + # no-readiness-probe & no-liveness-probe because for now it brings nothing to add theses probes + # because they will not check whether the operator is actually ready/living + # run-as-non-root & no-read-only-root-fs because the security is managed somewhere else + exclude: + - "non-existent-service-account" + - "minimum-three-replicas" + - "no-liveness-probe" + - "no-readiness-probe" + - "run-as-non-root" + - "no-read-only-root-fs" + - "use-namespace" diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..eb3084c66 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,18 @@ +version: 2 +updates: + - package-ecosystem: gomod + directory: "/" + schedule: + interval: weekly + day: monday + ignore: + - dependency-name: k8s.io/api + - dependency-name: k8s.io/apimachinery + - dependency-name: k8s.io/client-go + - dependency-name: k8s.io/code-generator + - dependency-name: sigs.k8s.io/controller-runtime + - package-ecosystem: pip + directory: "/" + schedule: + interval: weekly + day: monday diff --git a/.github/workflows/close-stale-issues.yml b/.github/workflows/close-stale-issues.yml new file mode 100644 index 000000000..942020dbd --- /dev/null +++ b/.github/workflows/close-stale-issues.yml @@ -0,0 +1,25 @@ +# +# Docs: https://github.com/marketplace/actions/close-stale-issues +# +name: Close Stale Issues +on: + schedule: + - cron: '30 1 * * *' + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'This issue is being marked stale because it has been open for 60 days with no activity. Please comment if this issue is still affecting you. If there is no change, this issue will be closed in 30 days.' + stale-pr-message: 'This PR is being marked stale because it has been open for 60 days with no activity. Please update the PR or ask for a fresh review.' + close-issue-message: 'This issue was closed because it became stale and did not receive further updates. If the issue is still affecting you, please re-open it, or file a fresh Issue with updated information.' + + days-before-stale: 60 + days-before-close: 30 + days-before-pr-close: -1 # never close PRs + + exempt-issue-labels: 'bug,feature-request' + ascending: true diff --git a/.github/workflows/code-health.yml b/.github/workflows/code-health.yml new file mode 100644 index 000000000..345941c18 --- /dev/null +++ b/.github/workflows/code-health.yml @@ -0,0 +1,33 @@ +name: Code Health + +on: + pull_request: + branches: [ master ] +jobs: + Black: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Black Check + uses: jpetrucciani/black-check@7f5b2ad20fa5484f1884f07c1937e032ed8cd939 + + Mypy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Mypy linting + uses: jpetrucciani/mypy-check@179fdad632bf3ccf4cabb7ee4307ef25e51d2f96 + with: + path: scripts/*/*.py + + Golangci-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: stable + - name: golangci-lint + uses: golangci/golangci-lint-action@v6 diff --git a/.github/workflows/comment-release-pr.yml b/.github/workflows/comment-release-pr.yml new file mode 100644 index 000000000..3944aa660 --- /dev/null +++ b/.github/workflows/comment-release-pr.yml @@ -0,0 +1,21 @@ +name: Link Github Releases +on: + pull_request: + types: [closed] + +jobs: + comment: + # only link releases on release PRs + if: startsWith(github.event.pull_request.title, 'Release MongoDB Kubernetes Operator') + runs-on: ubuntu-latest + steps: + - uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + github.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: 'Review and publish the release here: https://github.com/mongodb/mongodb-kubernetes-operator/releases' + }) diff --git a/.github/workflows/e2e-dispatch.yml b/.github/workflows/e2e-dispatch.yml new file mode 100644 index 000000000..b3522124d --- /dev/null +++ b/.github/workflows/e2e-dispatch.yml @@ -0,0 +1,134 @@ + +################################################################################## +# +# This file is automatically generated using templates. Changes to this file +# should happen through editing the templates under .action_templates/* +# Manual edits will be overwritten. +# +################################################################################## + +name: Run Single E2E +on: + # template: .action_templates/events/single-e2e-workflow-dispatch.yaml + workflow_dispatch: + inputs: + distro: + description: Distro to run test + required: true + default: ubuntu + test-name: + description: Name of test to run + required: true + cluster-wide: + description: Whether or not the test is cluster wide + required: true + default: 'false' +jobs: + # template: .action_templates/jobs/display-github-context.yaml + action-context: + if: always() + runs-on: ubuntu-latest + steps: + - name: Dump GitHub context + env: + GITHUB_CONTEXT: ${{ toJSON(github) }} + run: echo "$GITHUB_CONTEXT" + # template: .action_templates/jobs/setup.yaml + setup: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - pipeline-argument: operator + - pipeline-argument: version-upgrade-hook + - pipeline-argument: readiness-probe + - pipeline-argument: agent + - pipeline-argument: e2e + steps: + # template: .action_templates/steps/checkout.yaml + - name: Checkout Code + uses: actions/checkout@v4 + with: + submodules: true + # template: .action_templates/steps/setup-and-install-python.yaml + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.10.4 + - name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + - name: Install Python Dependencies + run: pip install -r requirements.txt + # template: .action_templates/steps/quay-login.yaml + - name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_ROBOT_TOKEN }} + # template: .action_templates/steps/set-up-qemu.yaml + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + # template: .action_templates/steps/build-and-push-development-images.yaml + - name: Build and Push Images + run: | + python pipeline.py --image-name ${{ matrix.pipeline-argument }} --tag ${{ github.run_id }} + env: + MONGODB_COMMUNITY_CONFIG: ${{ github.workspace }}/scripts/ci/config.json + version_id: ${{ github.run_id }} + # template: .action_templates/jobs/single-test.yaml + single-test: + runs-on: ubuntu-latest + needs: [setup] + steps: + # template: .action_templates/steps/checkout.yaml + - name: Checkout Code + uses: actions/checkout@v4 + with: + submodules: true + # template: .action_templates/steps/setup-and-install-python.yaml + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.10.4 + - name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + - name: Install Python Dependencies + run: pip install -r requirements.txt + # template: .action_templates/steps/setup-kind-cluster.yaml + - name: Setup Kind Cluster + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 + chmod +x ./kind + ./kind create cluster + - name: Create Directories + run: | + docker exec kind-control-plane mkdir -p /opt/data/mongo-data-0 /opt/data/mongo-data-1 /opt/data/mongo-data-2 /opt/data/mongo-logs-0 /opt/data/mongo-logs-1 /opt/data/mongo-logs-2 + + - name: Install CRD + run: kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml + # template: .action_templates/steps/run-test-single.yaml + - name: Run Test Single + run: | + python3 ./scripts/dev/e2e.py --test ${{ github.event.inputs.test-name }} --tag ${{ github.run_id }} --config_file ./scripts/ci/config.json --distro ${{ github.event.inputs.distro }} --cluster-wide ${{ github.event.inputs.cluster-wide }} + # template: .action_templates/steps/dump-and-upload-diagnostics-always.yaml + - name: Dump Diagnostics + if: always() + continue-on-error: true + run: scripts/ci/dump_diagnostics.sh default # default since kind is running in the default namespace + + - name: Upload Diagnostics + if: always() + uses: actions/upload-artifact@v4 + continue-on-error: true + with: + name: ${{ github.event.inputs.test-name }}-${{ github.event.inputs.distro + }}-diagnostics + path: ${{ github.workspace }}/diagnostics diff --git a/.github/workflows/e2e-fork.yml b/.github/workflows/e2e-fork.yml new file mode 100644 index 000000000..a5c3ae53e --- /dev/null +++ b/.github/workflows/e2e-fork.yml @@ -0,0 +1,240 @@ + +################################################################################## +# +# This file is automatically generated using templates. Changes to this file +# should happen through editing the templates under .action_templates/* +# Manual edits will be overwritten. +# +################################################################################## + +name: Run E2E Fork +on: + # template: .action_templates/events/pull-request-target.yaml + pull_request_target: + types: [labeled] + branches: + - master + paths-ignore: + - docs/** +jobs: + # template: .action_templates/jobs/display-github-context.yaml + action-context: + if: always() + runs-on: ubuntu-latest + steps: + - name: Dump GitHub context + env: + GITHUB_CONTEXT: ${{ toJSON(github) }} + run: echo "$GITHUB_CONTEXT" + # template: .action_templates/jobs/setup.yaml + setup: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - pipeline-argument: operator + - pipeline-argument: version-upgrade-hook + - pipeline-argument: readiness-probe + - pipeline-argument: agent + - pipeline-argument: e2e + if: contains(github.event.pull_request.labels.*.name, 'dependencies') || contains(github.event.pull_request.labels.*.name, + 'safe-to-test') + steps: + # template: .action_templates/steps/cancel-previous.yaml + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.12.1 + with: + access_token: ${{ github.token }} + # template: .action_templates/steps/checkout-fork.yaml + - name: Checkout Code + uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.sha}} + repository: ${{github.event.pull_request.head.repo.full_name}} + submodules: true + # template: .action_templates/steps/setup-and-install-python.yaml + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.10.4 + - name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + - name: Install Python Dependencies + run: pip install -r requirements.txt + # template: .action_templates/steps/quay-login.yaml + - name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_ROBOT_TOKEN }} + # template: .action_templates/steps/set-up-qemu.yaml + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + # template: .action_templates/steps/build-and-push-development-images.yaml + - name: Build and Push Images + run: | + python pipeline.py --image-name ${{ matrix.pipeline-argument }} --tag ${{ github.run_id }} + env: + MONGODB_COMMUNITY_CONFIG: ${{ github.workspace }}/scripts/ci/config.json + version_id: ${{ github.run_id }} + # template: .action_templates/jobs/tests.yaml + tests: + runs-on: ubuntu-latest + needs: [setup] + strategy: + fail-fast: false + matrix: + include: + - test-name: replica_set + distro: ubi + - test-name: replica_set_enterprise_upgrade_4_5 + distro: ubi + - test-name: replica_set_enterprise_upgrade_5_6 + distro: ubi + - test-name: replica_set_enterprise_upgrade_6_7 + distro: ubi + - test-name: replica_set_enterprise_upgrade_7_8 + distro: ubi + - test-name: replica_set_recovery + distro: ubi + - test-name: replica_set_mongod_readiness + distro: ubi + - test-name: replica_set_scale + distro: ubi + - test-name: replica_set_scale_down + distro: ubi + - test-name: replica_set_change_version + distro: ubi + - test-name: feature_compatibility_version + distro: ubi + - test-name: prometheus + distro: ubi + - test-name: replica_set_tls + distro: ubi + - test-name: replica_set_tls_recreate_mdbc + distro: ubi + - test-name: replica_set_tls_rotate + distro: ubi + - test-name: replica_set_tls_rotate_delete_sts + distro: ubi + - test-name: replica_set_tls_upgrade + distro: ubi + - test-name: statefulset_arbitrary_config + distro: ubi + - test-name: statefulset_arbitrary_config_update + distro: ubi + - test-name: replica_set_mongod_config + distro: ubi + - test-name: replica_set_cross_namespace_deploy + distro: ubi + cluster-wide: true + - test-name: replica_set_custom_role + distro: ubi + - test-name: replica_set_arbiter + distro: ubi + - test-name: replica_set_custom_persistent_volume + distro: ubi + - test-name: replica_set_mount_connection_string + distro: ubi + - test-name: replica_set_mongod_port_change_with_arbiters + distro: ubi + - test-name: replica_set_operator_upgrade + distro: ubi + - test-name: replica_set_connection_string_options + distro: ubi + - test-name: replica_set_x509 + distro: ubi + - test-name: replica_set_remove_user + distro: ubi + steps: + # template: .action_templates/steps/cancel-previous.yaml + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.12.1 + with: + access_token: ${{ github.token }} + # template: .action_templates/steps/checkout-fork.yaml + - name: Checkout Code + uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.sha}} + repository: ${{github.event.pull_request.head.repo.full_name}} + submodules: true + # template: .action_templates/steps/set-run-status.yaml + - name: Set default run status + run: echo "::set-output name=last_run_status::pending" > last_run_status + + # Tracking of the state of the previous test run is a workaround to the fact that it is not + # possible to re-run a single failed job, only re-running the entire workflow is currently possible. + # This workaround skips jobs if they have already passed. + # see https://github.com/actions/runner/issues/432 + - name: Restore last run status + id: last_run + uses: actions/cache@v4 + with: + path: last_run_status + key: ${{ github.run_id }}-${{ matrix.test-name }}-${{ matrix.distro }} + + - name: Set last run status + id: last_run_status + run: cat last_run_status + # template: .action_templates/steps/setup-and-install-python.yaml + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.10.4 + - name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + - name: Install Python Dependencies + run: pip install -r requirements.txt + # template: .action_templates/steps/setup-kind-cluster.yaml + - name: Setup Kind Cluster + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 + chmod +x ./kind + ./kind create cluster + if: steps.last_run_status.outputs.last_run_status != 'success' + - name: Create Directories + run: | + docker exec kind-control-plane mkdir -p /opt/data/mongo-data-0 /opt/data/mongo-data-1 /opt/data/mongo-data-2 /opt/data/mongo-logs-0 /opt/data/mongo-logs-1 /opt/data/mongo-logs-2 + + if: steps.last_run_status.outputs.last_run_status != 'success' + - name: Install CRD + run: kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml + if: steps.last_run_status.outputs.last_run_status != 'success' + # template: .action_templates/steps/run-test-matrix.yaml + - name: Run Test + id: e2e_test + if: steps.last_run_status.outputs.last_run_status != 'success' + run: | + cluster_wide=${{ matrix.cluster-wide }} + if [ -z "$cluster_wide" ]; then + cluster_wide="false" + fi + python3 ./scripts/dev/e2e.py --test ${{ matrix.test-name }} --tag ${{ github.run_id }} --config_file ./scripts/ci/config.json --distro ${{ matrix.distro }} --cluster-wide ${cluster_wide} + # template: .action_templates/steps/save-run-status.yaml + - name: Save run status + if: always() + run: echo "::set-output name=last_run_status::${{ steps.e2e_test.outcome }}" + > last_run_status + # template: .action_templates/steps/dump-and-upload-diagnostics.yaml + - name: Dump Diagnostics + id: dump_diagnostics + if: always() && steps.e2e_test.outcome == 'failure' + continue-on-error: true + run: scripts/ci/dump_diagnostics.sh default # default since kind is running in the default namespace + + - name: Upload Diagnostics + if: always() && steps.dump_diagnostics.outcome == 'success' + uses: actions/upload-artifact@v4 + continue-on-error: true + with: + name: ${{ matrix.test-name }}-${{ matrix.distro }}-diagnostics + path: ${{ github.workspace }}/diagnostics diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml new file mode 100644 index 000000000..8501431b6 --- /dev/null +++ b/.github/workflows/e2e.yml @@ -0,0 +1,244 @@ + +################################################################################## +# +# This file is automatically generated using templates. Changes to this file +# should happen through editing the templates under .action_templates/* +# Manual edits will be overwritten. +# +################################################################################## + +name: Run E2E +on: + # template: .action_templates/events/on-pull-request-master.yaml + pull_request: + branches: + - master + paths-ignore: + - docs/** + # template: .action_templates/events/on-push-master.yaml + push: + branches: + - master + paths-ignore: + - docs/** + # template: .action_templates/events/workflow-dispatch.yaml + workflow_dispatch: {} +jobs: + # template: .action_templates/jobs/display-github-context.yaml + action-context: + if: always() + runs-on: ubuntu-latest + steps: + - name: Dump GitHub context + env: + GITHUB_CONTEXT: ${{ toJSON(github) }} + run: echo "$GITHUB_CONTEXT" + # template: .action_templates/jobs/setup.yaml + setup: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - pipeline-argument: operator + - pipeline-argument: version-upgrade-hook + - pipeline-argument: readiness-probe + - pipeline-argument: agent + - pipeline-argument: e2e + if: github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' + || (github.event.pull_request.head.repo.full_name == github.repository && github.actor + != 'dependabot[bot]') + steps: + # template: .action_templates/steps/cancel-previous.yaml + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.12.1 + with: + access_token: ${{ github.token }} + # template: .action_templates/steps/checkout.yaml + - name: Checkout Code + uses: actions/checkout@v4 + with: + submodules: true + # template: .action_templates/steps/setup-and-install-python.yaml + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.10.4 + - name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + - name: Install Python Dependencies + run: pip install -r requirements.txt + # template: .action_templates/steps/quay-login.yaml + - name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_ROBOT_TOKEN }} + # template: .action_templates/steps/set-up-qemu.yaml + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + # template: .action_templates/steps/build-and-push-development-images.yaml + - name: Build and Push Images + run: | + python pipeline.py --image-name ${{ matrix.pipeline-argument }} --tag ${{ github.run_id }} + env: + MONGODB_COMMUNITY_CONFIG: ${{ github.workspace }}/scripts/ci/config.json + version_id: ${{ github.run_id }} + # template: .action_templates/jobs/tests.yaml + tests: + runs-on: ubuntu-latest + needs: [setup] + strategy: + fail-fast: false + matrix: + include: + - test-name: replica_set + distro: ubi + - test-name: replica_set_enterprise_upgrade_4_5 + distro: ubi + - test-name: replica_set_enterprise_upgrade_5_6 + distro: ubi + - test-name: replica_set_enterprise_upgrade_6_7 + distro: ubi + - test-name: replica_set_enterprise_upgrade_7_8 + distro: ubi + - test-name: replica_set_recovery + distro: ubi + - test-name: replica_set_mongod_readiness + distro: ubi + - test-name: replica_set_scale + distro: ubi + - test-name: replica_set_scale_down + distro: ubi + - test-name: replica_set_change_version + distro: ubi + - test-name: feature_compatibility_version + distro: ubi + - test-name: prometheus + distro: ubi + - test-name: replica_set_tls + distro: ubi + - test-name: replica_set_tls_recreate_mdbc + distro: ubi + - test-name: replica_set_tls_rotate + distro: ubi + - test-name: replica_set_tls_rotate_delete_sts + distro: ubi + - test-name: replica_set_tls_upgrade + distro: ubi + - test-name: statefulset_arbitrary_config + distro: ubi + - test-name: statefulset_arbitrary_config_update + distro: ubi + - test-name: replica_set_mongod_config + distro: ubi + - test-name: replica_set_cross_namespace_deploy + distro: ubi + cluster-wide: true + - test-name: replica_set_custom_role + distro: ubi + - test-name: replica_set_arbiter + distro: ubi + - test-name: replica_set_custom_persistent_volume + distro: ubi + - test-name: replica_set_mount_connection_string + distro: ubi + - test-name: replica_set_mongod_port_change_with_arbiters + distro: ubi + - test-name: replica_set_operator_upgrade + distro: ubi + - test-name: replica_set_connection_string_options + distro: ubi + - test-name: replica_set_x509 + distro: ubi + - test-name: replica_set_remove_user + distro: ubi + steps: + # template: .action_templates/steps/cancel-previous.yaml + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.12.1 + with: + access_token: ${{ github.token }} + # template: .action_templates/steps/checkout.yaml + - name: Checkout Code + uses: actions/checkout@v4 + with: + submodules: true + # template: .action_templates/steps/set-run-status.yaml + - name: Set default run status + run: echo "::set-output name=last_run_status::pending" > last_run_status + + # Tracking of the state of the previous test run is a workaround to the fact that it is not + # possible to re-run a single failed job, only re-running the entire workflow is currently possible. + # This workaround skips jobs if they have already passed. + # see https://github.com/actions/runner/issues/432 + - name: Restore last run status + id: last_run + uses: actions/cache@v4 + with: + path: last_run_status + key: ${{ github.run_id }}-${{ matrix.test-name }}-${{ matrix.distro }} + + - name: Set last run status + id: last_run_status + run: cat last_run_status + # template: .action_templates/steps/setup-and-install-python.yaml + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.10.4 + - name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + - name: Install Python Dependencies + run: pip install -r requirements.txt + # template: .action_templates/steps/setup-kind-cluster.yaml + - name: Setup Kind Cluster + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 + chmod +x ./kind + ./kind create cluster + if: steps.last_run_status.outputs.last_run_status != 'success' + - name: Create Directories + run: | + docker exec kind-control-plane mkdir -p /opt/data/mongo-data-0 /opt/data/mongo-data-1 /opt/data/mongo-data-2 /opt/data/mongo-logs-0 /opt/data/mongo-logs-1 /opt/data/mongo-logs-2 + + if: steps.last_run_status.outputs.last_run_status != 'success' + - name: Install CRD + run: kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml + if: steps.last_run_status.outputs.last_run_status != 'success' + # template: .action_templates/steps/run-test-matrix.yaml + - name: Run Test + id: e2e_test + if: steps.last_run_status.outputs.last_run_status != 'success' + run: | + cluster_wide=${{ matrix.cluster-wide }} + if [ -z "$cluster_wide" ]; then + cluster_wide="false" + fi + python3 ./scripts/dev/e2e.py --test ${{ matrix.test-name }} --tag ${{ github.run_id }} --config_file ./scripts/ci/config.json --distro ${{ matrix.distro }} --cluster-wide ${cluster_wide} + # template: .action_templates/steps/save-run-status.yaml + - name: Save run status + if: always() + run: echo "::set-output name=last_run_status::${{ steps.e2e_test.outcome }}" + > last_run_status + # template: .action_templates/steps/dump-and-upload-diagnostics.yaml + - name: Dump Diagnostics + id: dump_diagnostics + if: always() && steps.e2e_test.outcome == 'failure' + continue-on-error: true + run: scripts/ci/dump_diagnostics.sh default # default since kind is running in the default namespace + + - name: Upload Diagnostics + if: always() && steps.dump_diagnostics.outcome == 'success' + uses: actions/upload-artifact@v4 + continue-on-error: true + with: + name: ${{ matrix.test-name }}-${{ matrix.distro }}-diagnostics + path: ${{ github.workspace }}/diagnostics diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 000000000..ecce33378 --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,35 @@ +name: Go + +on: + pull_request: + branches: [master] + +jobs: + + UnitTests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + + - name: Test api + run: go test -v ./api/... + + - name: Test cmd + run: go test -v ./cmd/... + + - name: Test controllers + run: go test -v ./controllers/... + + - name: Test pkg + run: go test -v ./pkg/... + + - name: Test mongotester + run: go test -v ./test/e2e/util/mongotester/... + + - name: Check licenses + run: make check-licenses diff --git a/.github/workflows/kubelinter-check.yml b/.github/workflows/kubelinter-check.yml new file mode 100644 index 000000000..2fcb5b725 --- /dev/null +++ b/.github/workflows/kubelinter-check.yml @@ -0,0 +1,48 @@ +name: Kubelinter-check + +on: + push: + branches: + - master + paths-ignore: + - docs/** + pull_request: + branches: + - master + workflow_dispatch: {} + +jobs: + Kubelinter-check: + name: Run Kube-linter check + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v4 + + - name: Scan directory ./deploy/clusterwide/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: deploy/clusterwide + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint_clusterwide.yaml + version: "48442350" # Note: This is the id for release 0.2.3 returned from api.github.com/repos/stackrox/kube-linter/releases + + - name: Scan directory ./deploy/openshift/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: deploy/openshift + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint_openshift.yaml + version: "48442350" + + - name: Scan directory ./config/manager/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/manager/manager.yaml + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: "48442350" + + - name: Scan directory ./config/samples/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/samples + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: "48442350" diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml new file mode 100644 index 000000000..3442f28df --- /dev/null +++ b/.github/workflows/main.yaml @@ -0,0 +1,53 @@ +--- +########################### +########################### +## Linter GitHub Actions ## +########################### +########################### +name: Lint Code Base + +# +# Documentation: +# https://help.github.com/en/articles/workflow-syntax-for-github-actions +# + +############################# +# Start the job on all push # +############################# +on: + pull_request: + branches: [master] + +############### +# Set the Job # +############### +jobs: + build: + # Name the Job + name: Lint Code Base + # Set the agent to run on + runs-on: ubuntu-latest + + ################## + # Load all steps # + ################## + steps: + ########################## + # Checkout the code base # + ########################## + - name: Checkout Code + uses: actions/checkout@v4 + with: + # Make sure we also get the helm-charts submodule! + submodules: true + + - name: Install missing python packages + run: sudo apt-get install -y --no-install-recommends python3-venv python3-setuptools + + - name: Install dependencies with pip + requirements.txt + run: | + python3 -m venv .venv + .venv/bin/pip install -r requirements.txt + + - name: Move the dependencies + run: mv .venv /home/runner/work/_temp/_github_workflow diff --git a/.github/workflows/release-images.yml b/.github/workflows/release-images.yml new file mode 100644 index 000000000..5ced57eae --- /dev/null +++ b/.github/workflows/release-images.yml @@ -0,0 +1,87 @@ +name: Release Images + +on: + pull_request_review: + types: [submitted] + workflow_dispatch: + +jobs: + release-images: + runs-on: ubuntu-latest + if: startsWith(github.event.pull_request.title, 'Release MongoDB Kubernetes Operator') && github.event.review.state == 'approved' + strategy: + matrix: + include: + - pipeline-argument: operator + release-key: operator + - pipeline-argument: version-upgrade-hook + release-key: version-upgrade-hook + - pipeline-argument: readiness-probe + release-key: readiness-probe + + steps: + - name: Checkout Code + uses: actions/checkout@v4 + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.10.4' + architecture: 'x64' + + - uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + + - name: Install Python Dependencies + run: pip install -r requirements.txt + - name: Determine if release is needed + id: release_status + run: | + OUTPUT=$(scripts/ci/determine_required_releases.py ${{ matrix.release-key }}) + echo "::set-output name=OUTPUT::$OUTPUT" + + - name: Login to Quay.io + uses: docker/login-action@v1 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_ROBOT_TOKEN }} + + # template: .action_templates/steps/set-up-qemu.yaml + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Publish Image To Quay + if: steps.release_status.outputs.OUTPUT == 'unreleased' + run: python pipeline.py --image-name ${{ matrix.pipeline-argument }} --release --sign + env: + MONGODB_COMMUNITY_CONFIG: "${{ github.workspace }}/scripts/ci/config.json" + AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}" + AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}" + GRS_USERNAME: "${{ vars.GRS_USERNAME }}" + GRS_PASSWORD: "${{ secrets.GRS_PASSWORD }}" + PKCS11_URI: "${{ vars.PKCS11_URI }}" + ARTIFACTORY_USERNAME: "${{ vars.ARTIFACTORY_USERNAME }}" + ARTIFACTORY_PASSWORD: "${{ secrets.ARTIFACTORY_PASSWORD }}" + AWS_DEFAULT_REGION: "${{ vars.AWS_DEFAULT_REGION }}" + + create-draft-release: + runs-on: ubuntu-latest + needs: [release-images] + steps: + - name: Checkout Code + uses: actions/checkout@v4 + - name: Determine Release Tag + id: release_tag + run: | + OUTPUT=$(jq -r '."mongodb-kubernetes-operator"' < $GITHUB_WORKSPACE/release.json) + echo "::set-output name=OUTPUT::$OUTPUT" + - name: Create Github Release + uses: ncipollo/release-action@v1 + with: + tag: "v${{ steps.release_tag.outputs.OUTPUT }}" + name: MongoDB Kubernetes Operator + bodyFile: "${{ github.workspace }}/docs/RELEASE_NOTES.md" + draft: true + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-single-image.yml b/.github/workflows/release-single-image.yml new file mode 100644 index 000000000..162454391 --- /dev/null +++ b/.github/workflows/release-single-image.yml @@ -0,0 +1,58 @@ +name: Release Single Image +on: + workflow_dispatch: + inputs: + pipeline-argument: + description: 'Argument to pass to pipeline' + required: true + release-key: + description: 'Corresponding release.json key' + required: true +jobs: + release-single-image: + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v4 + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.10.4' + architecture: 'x64' + + - uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + + - name: Install Python Dependencies + run: pip install -r requirements.txt + - name: Determine if release is needed + id: release_status + run: | + OUTPUT=$(scripts/ci/determine_required_releases.py ${{ github.event.inputs.release-key }}) + echo "::set-output name=OUTPUT::$OUTPUT" + + - name: Login to Quay.io + uses: docker/login-action@v1 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_ROBOT_TOKEN }} + + # template: .action_templates/steps/set-up-qemu.yaml + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Publish Image To Quay + if: steps.release_status.outputs.OUTPUT == 'unreleased' + run: python pipeline.py --image-name ${{ github.event.inputs.pipeline-argument }} --release --sign + env: + MONGODB_COMMUNITY_CONFIG: "${{ github.workspace }}/scripts/ci/config.json" + AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}" + AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}" + GRS_USERNAME: "${{ vars.GRS_USERNAME }}" + GRS_PASSWORD: "${{ secrets.GRS_PASSWORD }}" + PKCS11_URI: "${{ vars.PKCS11_URI }}" + ARTIFACTORY_USERNAME: "${{ vars.ARTIFACTORY_USERNAME }}" + ARTIFACTORY_PASSWORD: "${{ secrets.ARTIFACTORY_PASSWORD }}" diff --git a/.github/workflows/remove-label.yml b/.github/workflows/remove-label.yml new file mode 100644 index 000000000..60316ff49 --- /dev/null +++ b/.github/workflows/remove-label.yml @@ -0,0 +1,13 @@ +name: Remove Label +on: [ pull_request ] +jobs: + remove-safe-to-test-label: + runs-on: ubuntu-latest + name: Remove Label + steps: + - name: + uses: buildsville/add-remove-label@v1 + with: + token: ${{secrets.GITHUB_TOKEN}} + label: safe-to-test + type: remove diff --git a/.gitignore b/.gitignore index e739bddd2..0229263df 100644 --- a/.gitignore +++ b/.gitignore @@ -75,9 +75,29 @@ tags .vscode/* .history # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode - +*mypy_cache +bin/ +venv/ +local-config.json .idea vendor -zz_generated*.go __pycache__ Dockerfile +Dockerfile_python_formatting +logs/* +testbin/bin +# OSX Trash +.DS_Store + +# ignore files generated by sonar +Dockerfile.ubi-* +Dockerfile.ubuntu-* + +diagnostics + +!test/test-app/Dockerfile + +Pipfile +Pipfile.lock +.community-operator-dev +*.iml diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..80d9434c7 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "helm-charts"] + path = helm-charts + url = git@github.com:mongodb/helm-charts.git diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..795e08728 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,61 @@ +--- +######################### +######################### +## Golang Linter rules ## +######################### +######################### + +# configure golangci-lint +# see https://github.com/golangci/golangci-lint/blob/master/.golangci.example.yml +issues: + exclude-rules: + - path: _test\.go + linters: + - dupl + - gosec + - goconst + - golint + text: "underscore" + - path: ^pkg\/util\/envvar + linters: + - forbidigo + - path: ^cmd\/(readiness|versionhook|manager)\/main\.go$ + linters: + - forbidigo +linters: + enable: + - govet + - errcheck + - staticcheck + - unused + - gosimple + - ineffassign + - typecheck + - rowserrcheck + - gosec + - unconvert + - forbidigo +linters-settings: + gosec: + excludes: + - G115 + forbidigo: + forbid: + - p: os\.(Getenv|LookupEnv|Environ|ExpandEnv) + pkg: os + msg: "Reading environemnt variables here is prohibited. Please read environment variables in the main package." + - p: os\.(Clearenv|Unsetenv|Setenv) + msg: "Modifying environemnt variables is prohibited." + pkg: os + - p: envvar\.(Read.*?|MergeWithOverride|GetEnvOrDefault) + pkg: github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar + msg: "Using this envvar package here is prohibited. Please work with environment variables in the main package." + # Rules with the `pkg` depend on it + analyze-types: true + +run: + modules-download-mode: mod + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 5m + # default concurrency is a available CPU number + concurrency: 4 diff --git a/LICENSE b/APACHE2 similarity index 99% rename from LICENSE rename to APACHE2 index d64569567..6c13e8ea0 100644 --- a/LICENSE +++ b/APACHE2 @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright [2021] [MongoDB Inc.] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..35f3f6952 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +This project has adopted the [MongoDB Code of Conduct](https://www.mongodb.com/community-code-of-conduct). +If you see any violations of the above or have any other concerns or questions please contact us +using the following email alias: community-conduct@mongodb.com. diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 000000000..9c600b1bc --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,5 @@ +The MongoDB Agent binary in the agent/ directory may be used under the "Free for Commercial Use - Oct 2020" license found in [agent/LICENSE](scripts/dev/templates/agent/LICENSE). + +The source code of this Operator, and all other content in this repository are available under the Apache v2 license. The text of this license is available in [APACHE2](APACHE2) + +To use this Operator, you must agree to both licenses. diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..6f1811c8f --- /dev/null +++ b/Makefile @@ -0,0 +1,242 @@ +SHELL := /bin/bash + +MONGODB_COMMUNITY_CONFIG ?= $(HOME)/.community-operator-dev/config.json + +# Image URL to use all building/pushing image targets +REPO_URL := $(shell jq -r .repo_url < $(MONGODB_COMMUNITY_CONFIG)) +OPERATOR_IMAGE := $(shell jq -r .operator_image < $(MONGODB_COMMUNITY_CONFIG)) +NAMESPACE := $(shell jq -r .namespace < $(MONGODB_COMMUNITY_CONFIG)) +UPGRADE_HOOK_IMG := $(shell jq -r .version_upgrade_hook_image < $(MONGODB_COMMUNITY_CONFIG)) +READINESS_PROBE_IMG := $(shell jq -r .readiness_probe_image < $(MONGODB_COMMUNITY_CONFIG)) +REGISTRY := $(shell jq -r .repo_url < $(MONGODB_COMMUNITY_CONFIG)) +AGENT_IMAGE_NAME := $(shell jq -r .agent_image < $(MONGODB_COMMUNITY_CONFIG)) +HELM_CHART ?= ./helm-charts/charts/community-operator + +STRING_SET_VALUES := --set namespace=$(NAMESPACE),versionUpgradeHook.name=$(UPGRADE_HOOK_IMG),readinessProbe.name=$(READINESS_PROBE_IMG),registry.operator=$(REPO_URL),operator.operatorImageName=$(OPERATOR_IMAGE),operator.version=latest,registry.agent=$(REGISTRY),registry.versionUpgradeHook=$(REGISTRY),registry.readinessProbe=$(REGISTRY),registry.operator=$(REGISTRY),versionUpgradeHook.version=latest,readinessProbe.version=latest,agent.version=latest,agent.name=$(AGENT_IMAGE_NAME) +STRING_SET_VALUES_LOCAL := $(STRING_SET_VALUES) --set operator.replicas=0 + +DOCKERFILE ?= operator +# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) +CRD_OPTIONS ?= "crd:crdVersions=v1" +RELEASE_NAME_HELM ?= mongodb-kubernetes-operator +TEST_NAMESPACE ?= $(NAMESPACE) + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +BASE_GO_PACKAGE = github.com/mongodb/mongodb-kubernetes-operator +GO_LICENSES = go-licenses +DISALLOWED_LICENSES = restricted # found reciprocal MPL-2.0 + +all: manager + +##@ Development + +fmt: ## Run go fmt against code + go fmt ./... + +vet: ## Run go vet against code + go vet ./... + +generate: controller-gen ## Generate code + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +$(GO_LICENSES): + @if ! which $@ &> /dev/null; then \ + go install github.com/google/go-licenses@latest; \ + fi + +licenses.csv: go.mod $(GO_LICENSES) ## Track licenses in a CSV file + @echo "Tracking licenses into file $@" + @echo "========================================" + GOOS=linux GOARCH=amd64 $(GO_LICENSES) csv --include_tests $(BASE_GO_PACKAGE)/... > $@ + +# We only check that go.mod is NOT newer than licenses.csv because the CI +# tends to generate slightly different results, so content comparison wouldn't work +licenses-tracked: ## Checks license.csv is up to date + @if [ go.mod -nt licenses.csv ]; then \ + echo "License.csv is stale! Please run 'make licenses.csv' and commit"; exit 1; \ + else echo "License.csv OK (up to date)"; fi + +.PHONY: check-licenses-compliance +check-licenses-compliance: licenses.csv ## Check licenses are compliant with our restrictions + @echo "Checking licenses not to be: $(DISALLOWED_LICENSES)" + @echo "============================================" + GOOS=linux GOARCH=amd64 $(GO_LICENSES) check --include_tests $(BASE_GO_PACKAGE)/... \ + --disallowed_types $(DISALLOWED_LICENSES) + @echo "--------------------" + @echo "Licenses check: PASS" + +.PHONY: check-licenses +check-licenses: licenses-tracked check-licenses-compliance ## Check license tracking & compliance + +TEST ?= ./pkg/... ./api/... ./cmd/... ./controllers/... ./test/e2e/util/mongotester/... +test: generate fmt vet manifests ## Run unit tests + go test $(options) $(TEST) -coverprofile cover.out + +manager: generate fmt vet ## Build operator binary + go build -o bin/manager ./cmd/manager/main.go + +run: install ## Run the operator against the configured Kubernetes cluster in ~/.kube/config + eval $$(scripts/dev/get_e2e_env_vars.py $(cleanup)); \ + go run ./cmd/manager/main.go + +debug: install install-rbac ## Run the operator in debug mode with dlv + eval $$(scripts/dev/get_e2e_env_vars.py $(cleanup)); \ + dlv debug ./cmd/manager/main.go + +CONTROLLER_GEN = $(shell pwd)/bin/controller-gen +controller-gen: ## Download controller-gen locally if necessary + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.15.0) + +# Try to use already installed helm from PATH +ifeq (ok,$(shell test -f "$$(which helm)" && echo ok)) + HELM=$(shell which helm) +else + HELM=/usr/local/bin/helm +endif + +helm: ## Download helm locally if necessary + $(call install-helm) + +install-prerequisites-macos: ## installs prerequisites for macos development + scripts/dev/install_prerequisites.sh + +##@ Installation/Uninstallation + +install: manifests helm install-crd ## Install CRDs into a cluster + +install-crd: + kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml + +install-chart: uninstall-crd + $(HELM) upgrade --install $(STRING_SET_VALUES) $(RELEASE_NAME_HELM) $(HELM_CHART) --namespace $(NAMESPACE) --create-namespace + +install-chart-local-operator: uninstall-crd + $(HELM) upgrade --install $(STRING_SET_VALUES_LOCAL) $(RELEASE_NAME_HELM) $(HELM_CHART) --namespace $(NAMESPACE) --create-namespace + +prepare-local-dev: generate-env-file install-chart-local-operator install-rbac setup-sas + +# patches all sas to use the local-image-registry +setup-sas: + scripts/dev/setup_sa.sh + +install-chart-with-tls-enabled: + $(HELM) upgrade --install --set createResource=true $(STRING_SET_VALUES) $(RELEASE_NAME_HELM) $(HELM_CHART) --namespace $(NAMESPACE) --create-namespace + +install-rbac: + $(HELM) template $(STRING_SET_VALUES) -s templates/database_roles.yaml $(HELM_CHART) | kubectl apply -f - + $(HELM) template $(STRING_SET_VALUES) -s templates/operator_roles.yaml $(HELM_CHART) | kubectl apply -f - + +uninstall-crd: + kubectl delete crd --ignore-not-found mongodbcommunity.mongodbcommunity.mongodb.com + +uninstall-chart: + $(HELM) uninstall $(RELEASE_NAME_HELM) -n $(NAMESPACE) + +uninstall-rbac: + $(HELM) template $(STRING_SET_VALUES) -s templates/database_roles.yaml $(HELM_CHART) | kubectl delete -f - + $(HELM) template $(STRING_SET_VALUES) -s templates/operator_roles.yaml $(HELM_CHART) | kubectl delete -f - + +uninstall: manifests helm uninstall-chart uninstall-crd ## Uninstall CRDs from a cluster + +##@ Deployment + +deploy: manifests helm install-chart install-crd ## Deploy controller in the configured Kubernetes cluster in ~/.kube/config + +undeploy: uninstall-chart uninstall-crd ## UnDeploy controller from the configured Kubernetes cluster in ~/.kube/config + +manifests: controller-gen ## Generate manifests e.g. CRD, RBAC etc. + $(CONTROLLER_GEN) $(CRD_OPTIONS) paths="./..." output:crd:artifacts:config=config/crd/bases + cp config/crd/bases/* $(HELM_CHART)/crds + +##@ E2E + +# Run e2e tests locally using go build while also setting up a proxy in the shell to allow +# the test to run as if it were inside the cluster. This enables mongodb connectivity while running locally. +# "MDB_LOCAL_OPERATOR=true" ensures the operator pod is not spun up while running the e2e test - since you're +# running it locally. +e2e-telepresence: cleanup-e2e install ## Run e2e tests locally using go build while also setting up a proxy e.g. make e2e-telepresence test=replica_set cleanup=true + export MDB_LOCAL_OPERATOR=true; \ + telepresence connect; \ + eval $$(scripts/dev/get_e2e_env_vars.py $(cleanup)); \ + go test -v -timeout=30m -failfast $(options) ./test/e2e/$(test) ; \ + telepresence quit + +e2e-k8s: cleanup-e2e install e2e-image ## Run e2e test by deploying test image in kubernetes, you can provide e2e.py flags e.g. make e2e-k8s test=replica_set e2eflags="--perform-cleanup". + python scripts/dev/e2e.py $(e2eflags) --test $(test) + +e2e: cleanup-e2e install ## Run e2e test locally. e.g. make e2e test=replica_set cleanup=true + eval $$(scripts/dev/get_e2e_env_vars.py $(cleanup)); \ + go test -v -short -timeout=30m -failfast $(options) ./test/e2e/$(test) + +e2e-gh: ## Trigger a Github Action of the given test + scripts/dev/run_e2e_gh.sh $(test) + +cleanup-e2e: ## Cleans up e2e test env + kubectl delete mdbc,all,secrets -l e2e-test=true -n ${TEST_NAMESPACE} || true + # Most of the tests use StatefulSets, which in turn use stable storage. In order to + # avoid interleaving tests with each other, we need to drop them all. + kubectl delete pvc --all -n $(NAMESPACE) || true + kubectl delete pv --all -n $(NAMESPACE) || true + +generate-env-file: ## generates a local-test.env for local testing + mkdir -p .community-operator-dev + { python scripts/dev/get_e2e_env_vars.py | tee >(cut -d' ' -f2 > .community-operator-dev/local-test.env) ;} > .community-operator-dev/local-test.export.env + . .community-operator-dev/local-test.export.env + +##@ Image + +operator-image: ## Build and push the operator image + python pipeline.py --image-name operator $(IMG_BUILD_ARGS) + +e2e-image: ## Build and push e2e test image + python pipeline.py --image-name e2e $(IMG_BUILD_ARGS) + +agent-image: ## Build and push agent image + python pipeline.py --image-name agent $(IMG_BUILD_ARGS) + +readiness-probe-image: ## Build and push readiness probe image + python pipeline.py --image-name readiness-probe $(IMG_BUILD_ARGS) + +version-upgrade-post-start-hook-image: ## Build and push version upgrade post start hook image + python pipeline.py --image-name version-upgrade-hook $(IMG_BUILD_ARGS) + +all-images: operator-image e2e-image agent-image readiness-probe-image version-upgrade-post-start-hook-image ## create all required images + +define install-helm +@[ -f $(HELM) ] || { \ +set -e ;\ +TMP_DIR=$$(mktemp -d) ;\ +cd $$TMP_DIR ;\ +curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 ;\ +chmod 700 get_helm.sh ;\ +./get_helm.sh ;\ +rm -rf $(TMP_DIR) ;\ +} +endef + +# go-install-tool will 'go install' any package $2 and install it to $1. +PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) +define go-install-tool +@[ -f $(1) ] || { \ +set -e ;\ +TMP_DIR=$$(mktemp -d) ;\ +cd $$TMP_DIR ;\ +go mod init tmp ;\ +echo "Downloading $(2)" ;\ +GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\ +rm -rf $$TMP_DIR ;\ +} +endef + +help: ## Show this help screen. + @echo 'Usage: make ... ' + @echo '' + @echo 'Available targets are:' + @echo '' + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z0-9_-]+:.*?##/ { printf " \033[36m%-25s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/PROJECT b/PROJECT new file mode 100644 index 000000000..fcd3ceff3 --- /dev/null +++ b/PROJECT @@ -0,0 +1,25 @@ +domain: mongodb.com +layout: +- go.kubebuilder.io/v3 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} +projectName: mko-v1 +repo: github.com/mongodb/mongodb-kubernetes-operator +resources: +- api: + crdVersion: v1 + namespaced: true + group: mongodbcommunity + kind: MongoDBCommunity + version: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: mongodb.com + group: mongodbcommunity + kind: SimpleMongoDBCommunity + path: github.com/mongodb/mongodb-kubernetes-operator/api/v1alpha1 + version: v1alpha1 +version: "3" diff --git a/README.md b/README.md index 92e4f6196..5476e4383 100644 --- a/README.md +++ b/README.md @@ -1,78 +1,89 @@ -# MongoDB Community Kubernetes Operator # +> **DEPRECATED:** This repository is deprecated but we will continue a best-effort support until November 2025. Please use the new repository at [mongodb/mongodb-kubernetes](https://github.com/mongodb/mongodb-kubernetes) instead. +> +> For more information on this decision - what it means and entails - see the [announcement](https://github.com/mongodb/mongodb-kubernetes/releases/tag/v1.0.0) and our [public documentation](https://www.mongodb.com/docs/kubernetes/current/). +> +> A detailed migration guide is available to help you transition smoothly - see [guide](https://github.com/mongodb/mongodb-kubernetes/blob/master/docs/migration/community-operator-migration.md). There will be no functional changes in the new repository - only a better and unified experience as well as improved visibility into the development process. + - +# MongoDB Community Kubernetes Operator # -This is a [Kubernetes Operator](https://coreos.com/operators/) which deploys MongoDB Community into Kubernetes clusters. + -This codebase is currently _pre-alpha_, and is not ready for use. +This is a [Kubernetes Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) which deploys MongoDB Community into Kubernetes clusters. If you are a MongoDB Enterprise customer, or need Enterprise features such as Backup, you can use the [MongoDB Enterprise Operator for Kubernetes](https://github.com/mongodb/mongodb-enterprise-kubernetes). -## Installation +Here is a talk from MongoDB Live 2020 about the Community Operator: +* [Run it in Kubernetes! Community and Enterprise MongoDB in Containers](https://www.youtube.com/watch?v=2Xszdg-4T6A&t=1368s) -### Prerequisites +> **Note** +> +> Hi, I'm Dan Mckean 👋 I'm the Product Manager for MongoDB's support of Kubernetes. +> +> The [Community Operator](https://github.com/mongodb/mongodb-kubernetes-operator) is something I inherited when I started, but it doesn't get as much attention from us as we'd like, and we're trying to understand how it's used in order to establish it's future. It will help us establish exactly what level of support we can offer, and what sort of timeframe we aim to provide support in 🙂 +> +>Here's a super short survey (it's much easier for us to review all the feedback that way!): [https://docs.google.com/forms/d/e/1FAIpQLSfwrwyxBSlUyJ6AmC-eYlgW_3JEdfA48SB2i5--_WpiynMW2w/viewform?usp=sf_link](https://docs.google.com/forms/d/e/1FAIpQLSfwrwyxBSlUyJ6AmC-eYlgW_3JEdfA48SB2i5--_WpiynMW2w/viewform?usp=sf_link) +> +> If you'd rather email me instead: [dan.mckean@mongodb.com](mailto:dan.mckean@mongodb.com?subject=MongoDB%20Community%20Operator%20feedback) -Before you install the MongoDB Community Kubernetes Operator, you must: +## Table of Contents -1. Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). -2. Have a Kubernetes solution available to use. - If you need a Kubernetes solution, see the [Kubernetes documentation on picking the right solution](https://kubernetes.io/docs/setup). For testing, MongoDB recommends [Kind](https://kind.sigs.k8s.io/). -3. Clone this repository. - ``` - git clone https://github.com/mongodb/mongodb-kubernetes-operator.git - ``` +- [Documentation](#documentation) +- [Supported Features](#supported-features) + - [Planned Features](#planned-features) +- [Contribute](#contribute) +- [License](#license) -### Installing the MongoDB Community Kubernetes Operator +## Documentation -The MongoDB Community Kubernetes Operator is a [Custom Resource Definition](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) and a controller. +See the [documentation](docs) to learn how to: -To install the MongoDB Community Kubernetes Operator: +1. [Install or upgrade](docs/install-upgrade.md) the Operator. +1. [Deploy and configure](docs/deploy-configure.md) MongoDB resources. +1. [Configure Logging](docs/logging.md) of the MongoDB resource components. +1. [Create a database user](docs/users.md) with SCRAM authentication. +1. [Secure MongoDB resource connections](docs/secure.md) using TLS. -1. Change to the directory in which you cloned the repository. -2. Install the [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/). - a. Invoke the following `kubectl` command: - ``` - kubectl create -f deploy/crds/mongodb.com_mongodb_crd.yaml - ``` - b. Verify that the Custom Resource Definitions installed successfully: - ``` - kubectl get crd/mongodb.mongodb.com - ``` -3. Install the Operator. - a. Invoke the following `kubectl` command to install the Operator in the specified namespace: - ``` - kubectl create -f deploy --namespace - ``` - b. Verify that the Operator installed successsfully: - ``` - kubectl get pods --namespace - ``` +*NOTE: [MongoDB Enterprise Kubernetes Operator](https://www.mongodb.com/docs/kubernetes-operator/master/) docs are for the enterprise operator use case and NOT for the community operator. In addition to the docs mentioned above, you can refer to this [blog post](https://www.mongodb.com/blog/post/run-secure-containerized-mongodb-deployments-using-the-mongo-db-community-kubernetes-oper) as well to learn more about community operator deployment* -## Usage +## Supported Features -The `/deploy/crds` directory contains example MongoDB resources that you can modify and deploy. +The MongoDB Community Kubernetes Operator supports the following features: -### Deploying a MongoDB Resource +- Create [replica sets](https://www.mongodb.com/docs/manual/replication/) +- Upgrade and downgrade MongoDB server version +- Scale replica sets up and down +- Read from and write to the replica set while scaling, upgrading, and downgrading. These operations are done in an "always up" manner. +- Report MongoDB server state via the [MongoDBCommunity resource](/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml) `status` field +- Use any of the available [Docker MongoDB images](https://hub.docker.com/_/mongo/) +- Connect to the replica set from inside the Kubernetes cluster (no external connectivity) +- Secure client-to-server and server-to-server connections with TLS +- Create users with [SCRAM](https://www.mongodb.com/docs/manual/core/security-scram/) authentication +- Create custom roles +- Enable a [metrics target that can be used with Prometheus](docs/prometheus/README.md) -To deploy your first replica set: +## Contribute -1. Invoke the following `kubectl` command: - ``` - kubectl apply -f deploy/crds/mongodb.com_v1_mongodb_cr.yaml --namespace - ``` -2. Verify that the MongoDB resource deployed: - ``` - kubectl get mongodb --namespace - ``` +Before you contribute to the MongoDB Community Kubernetes Operator, please read: -## Contributing +- [MongoDB Community Kubernetes Operator Architecture](docs/architecture.md) +- [Contributing to MongoDB Community Kubernetes Operator](docs/contributing.md) Please file issues before filing PRs. For PRs to be accepted, contributors must sign our [CLA](https://www.mongodb.com/legal/contributor-agreement). Reviewers, please ensure that the CLA has been signed by referring to [the contributors tool](https://contributors.corp.mongodb.com/) (internal link). -## License +## Linting -The source code of this Operator is available under the Apache v2 license. +This project uses the following linters upon every Pull Request: + +* `gosec` is a tool that find security problems in the code +* `Black` is a tool that verifies if Python code is properly formatted +* `MyPy` is a Static Type Checker for Python +* `Kube-linter` is a tool that verified if all Kubernetes YAML manifests are formatted correctly +* `Go vet` A built-in Go static checker +* `Snyk` The vulnerability scanner + +## License -The MongoDB Agent binary in the agent/ directory may be used under the "Free for Commercial Use" license found in agent/LICENSE. +Please see the [LICENSE](LICENSE.md) file. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..e35b8facc --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,8 @@ +# Security Policy + +## Reporting a Vulnerability + +Any security concerns or vulnerabilities discovered in one of MongoDB’s products or hosted services +can be responsibly disclosed by utilizing one of the methods described in our [create a vulnerability report](https://www.mongodb.com/docs/manual/tutorial/create-a-vulnerability-report/) docs page. + +While we greatly appreciate community reports regarding security issues, at this time MongoDB does not provide compensation for vulnerability reports. diff --git a/agent/Dockerfile b/agent/Dockerfile deleted file mode 100644 index 91ce29d42..000000000 --- a/agent/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -FROM ubuntu:16.04 - -# https://jira.mongodb.org/browse/CLOUDP-58488 -# Adds -noDaemonize option -ARG agent_image=https://s3.amazonaws.com/mciuploads/mms-automation/mongodb-mms-build-agent/builds/patches/5e600292d6d80a561311da80/automation-agent/dev/mongodb-mms-automation-agent-10.12.0.6191-1.rhel7_x86_64.tar.gz -ARG agent_version=10.12.0.6191 - -RUN apt-get -qq update \ - && apt-get -y -qq install \ - curl \ - && apt-get upgrade -y -qq \ - && apt-get dist-upgrade -y -qq \ - && rm -rf /var/lib/apt/lists/* - -RUN mkdir -p agent \ - && curl $agent_image -o agent/mongodb-agent.tar.gz \ - && tar xfz agent/mongodb-agent.tar.gz \ - && mv mongodb-mms-automation-agent-$agent_version-1.rhel7_x86_64/mongodb-mms-automation-agent agent/mongodb-agent \ - && chmod +x agent/mongodb-agent \ - && mkdir -p /var/lib/automation/config \ - && chmod -R +r /var/lib/automation/config - -# TODO: this mongo client is used for testing purposes and should be removed in the future -RUN curl -LO http://downloads.mongodb.org/linux/mongodb-linux-x86_64-ubuntu1604-4.0.6.tgz && \ - tar xfz mongodb-linux-x86_64-ubuntu1604-4.0.6.tgz && \ - mv mongodb-linux-x86_64-ubuntu1604-4.0.6/bin/mongo /usr/bin && \ - rm -rf mongodb-linux-x86_64-ubuntu1604-4.0.6.tgz mongodb-linux-x86_64-ubuntu1604-4.0.6 - - -RUN mkdir -p /var/lib/mongodb-mms-automation/probes/ \ - && curl --retry 3 https://readinessprobe.s3-us-west-1.amazonaws.com/readinessprobe -o /var/lib/mongodb-mms-automation/probes/readinessprobe \ - && chmod +x /var/lib/mongodb-mms-automation/probes/readinessprobe \ - && mkdir -p /var/log/mongodb-mms-automation/ \ - && chmod -R +wr /var/log/mongodb-mms-automation/ - -CMD ["agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] diff --git a/api/v1/doc.go b/api/v1/doc.go new file mode 100644 index 000000000..a6a3905a8 --- /dev/null +++ b/api/v1/doc.go @@ -0,0 +1,4 @@ +package v1 + +// +k8s:deepcopy-gen=package +// +versionName=v1 diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go new file mode 100644 index 000000000..8f1eb64c5 --- /dev/null +++ b/api/v1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the mongodbcommunity v1 API group +// +kubebuilder:object:generate=true +// +groupName=mongodbcommunity.mongodb.com +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "mongodbcommunity.mongodb.com", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1/mongodbcommunity_types.go b/api/v1/mongodbcommunity_types.go new file mode 100644 index 000000000..6a5e4bf0c --- /dev/null +++ b/api/v1/mongodbcommunity_types.go @@ -0,0 +1,1227 @@ +package v1 + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" + "github.com/stretchr/objx" +) + +type Type string + +const ( + ReplicaSet Type = "ReplicaSet" + defaultDBForUser string = "admin" +) + +type Phase string + +const ( + Running Phase = "Running" + Failed Phase = "Failed" + Pending Phase = "Pending" + defaultPasswordKey = "password" + + // Keep in sync with controllers/prometheus.go + defaultPrometheusPort = 9216 +) + +// SCRAM-SHA-256 and SCRAM-SHA-1 are the supported auth modes. +const ( + defaultMode AuthMode = "SCRAM-SHA-256" +) + +const ( + defaultClusterDomain = "cluster.local" +) + +// Connection string options that should be ignored as they are set through other means. +var ( + protectedConnectionStringOptions = map[string]struct{}{ + "replicaSet": {}, + "ssl": {}, + "tls": {}, + } +) + +// MongoDBCommunitySpec defines the desired state of MongoDB +type MongoDBCommunitySpec struct { + // Members is the number of members in the replica set + // +optional + Members int `json:"members"` + // Type defines which type of MongoDB deployment the resource should create + // +kubebuilder:validation:Enum=ReplicaSet + Type Type `json:"type"` + // Version defines which version of MongoDB will be used + Version string `json:"version,omitempty"` + + // Arbiters is the number of arbiters to add to the Replica Set. + // It is not recommended to have more than one arbiter per Replica Set. + // More info: https://www.mongodb.com/docs/manual/tutorial/add-replica-set-arbiter/ + // +optional + Arbiters int `json:"arbiters"` + + // FeatureCompatibilityVersion configures the feature compatibility version that will + // be set for the deployment + // +optional + FeatureCompatibilityVersion string `json:"featureCompatibilityVersion,omitempty"` + + // ReplicaSetHorizons Add this parameter and values if you need your database + // to be accessed outside of Kubernetes. This setting allows you to + // provide different DNS settings within the Kubernetes cluster and + // to the Kubernetes cluster. The Kubernetes Operator uses split horizon + // DNS for replica set members. This feature allows communication both + // within the Kubernetes cluster and from outside Kubernetes. + // +optional + ReplicaSetHorizons ReplicaSetHorizonConfiguration `json:"replicaSetHorizons,omitempty"` + + // Security configures security features, such as TLS, and authentication settings for a deployment + // +required + Security Security `json:"security"` + + // Users specifies the MongoDB users that should be configured in your deployment + // +required + Users []MongoDBUser `json:"users"` + + // +optional + StatefulSetConfiguration StatefulSetConfiguration `json:"statefulSet,omitempty"` + + // AgentConfiguration sets options for the MongoDB automation agent + // +optional + AgentConfiguration AgentConfiguration `json:"agent,omitempty"` + + // AdditionalMongodConfig is additional configuration that can be passed to + // each data-bearing mongod at runtime. Uses the same structure as the mongod + // configuration file: https://www.mongodb.com/docs/manual/reference/configuration-options/ + // +kubebuilder:validation:Type=object + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + AdditionalMongodConfig MongodConfiguration `json:"additionalMongodConfig,omitempty"` + + // AutomationConfigOverride is merged on top of the operator created automation config. Processes are merged + // by name. Currently Only the process.disabled field is supported. + AutomationConfigOverride *AutomationConfigOverride `json:"automationConfig,omitempty"` + + // Prometheus configurations. + // +optional + Prometheus *Prometheus `json:"prometheus,omitempty"` + + // Additional options to be appended to the connection string. These options apply to the entire resource and to each user. + // +kubebuilder:validation:Type=object + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + AdditionalConnectionStringConfig MapWrapper `json:"additionalConnectionStringConfig,omitempty"` + + // MemberConfig + // +optional + MemberConfig []automationconfig.MemberOptions `json:"memberConfig,omitempty"` +} + +// MapWrapper is a wrapper for a map to be used by other structs. +// The CRD generator does not support map[string]interface{} +// on the top level and hence we need to work around this with +// a wrapping struct. +type MapWrapper struct { + Object map[string]interface{} `json:"-"` +} + +// MarshalJSON defers JSON encoding to the wrapped map +func (m *MapWrapper) MarshalJSON() ([]byte, error) { + return json.Marshal(m.Object) +} + +// UnmarshalJSON will decode the data into the wrapped map +func (m *MapWrapper) UnmarshalJSON(data []byte) error { + if m.Object == nil { + m.Object = map[string]interface{}{} + } + + // Handle keys like net.port to be set as nested maps. + // Without this after unmarshalling there is just key "net.port" which is not + // a nested map and methods like GetPort() cannot access the value. + tmpMap := map[string]interface{}{} + err := json.Unmarshal(data, &tmpMap) + if err != nil { + return err + } + + for k, v := range tmpMap { + m.SetOption(k, v) + } + + return nil +} + +func (m *MapWrapper) DeepCopy() *MapWrapper { + if m != nil && m.Object != nil { + return &MapWrapper{ + Object: runtime.DeepCopyJSON(m.Object), + } + } + c := NewMapWrapper() + return &c +} + +// NewMapWrapper returns an empty MapWrapper +func NewMapWrapper() MapWrapper { + return MapWrapper{Object: map[string]interface{}{}} +} + +// SetOption updated the MapWrapper with a new option +func (m MapWrapper) SetOption(key string, value interface{}) MapWrapper { + m.Object = objx.New(m.Object).Set(key, value) + return m +} + +// ReplicaSetHorizonConfiguration holds the split horizon DNS settings for +// replica set members. +type ReplicaSetHorizonConfiguration []automationconfig.ReplicaSetHorizons + +// CustomRole defines a custom MongoDB role. +type CustomRole struct { + // The name of the role. + Role string `json:"role"` + // The database of the role. + DB string `json:"db"` + // The privileges to grant the role. + Privileges []Privilege `json:"privileges"` + // An array of roles from which this role inherits privileges. + // +optional + Roles []Role `json:"roles"` + // The authentication restrictions the server enforces on the role. + // +optional + AuthenticationRestrictions []AuthenticationRestriction `json:"authenticationRestrictions,omitempty"` +} + +type Prometheus struct { + // Port where metrics endpoint will bind to. Defaults to 9216. + // +optional + Port int `json:"port,omitempty"` + + // HTTP Basic Auth Username for metrics endpoint. + Username string `json:"username"` + + // Name of a Secret containing a HTTP Basic Auth Password. + PasswordSecretRef SecretKeyReference `json:"passwordSecretRef"` + + // Indicates path to the metrics endpoint. + // +kubebuilder:validation:Pattern=^\/[a-z0-9]+$ + MetricsPath string `json:"metricsPath,omitempty"` + + // Name of a Secret (type kubernetes.io/tls) holding the certificates to use in the + // Prometheus endpoint. + // +optional + TLSSecretRef SecretKeyReference `json:"tlsSecretKeyRef,omitempty"` +} + +func (p Prometheus) GetPasswordKey() string { + if p.PasswordSecretRef.Key != "" { + return p.PasswordSecretRef.Key + } + + return "password" +} + +func (p Prometheus) GetPort() int { + if p.Port != 0 { + return p.Port + } + + return defaultPrometheusPort +} + +// ConvertToAutomationConfigCustomRole converts between a custom role defined by the crd and a custom role +// that can be used in the automation config. +func (c CustomRole) ConvertToAutomationConfigCustomRole() automationconfig.CustomRole { + ac := automationconfig.CustomRole{Role: c.Role, DB: c.DB, Roles: []automationconfig.Role{}} + + // Add privileges. + for _, privilege := range c.Privileges { + ac.Privileges = append(ac.Privileges, automationconfig.Privilege{ + Resource: automationconfig.Resource{ + DB: privilege.Resource.DB, + Collection: privilege.Resource.Collection, + AnyResource: privilege.Resource.AnyResource, + Cluster: privilege.Resource.Cluster, + }, + Actions: privilege.Actions, + }) + } + + // Add roles. + for _, dbRole := range c.Roles { + ac.Roles = append(ac.Roles, automationconfig.Role{ + Role: dbRole.Name, + Database: dbRole.DB, + }) + } + + // Add authentication restrictions (if any). + for _, restriction := range c.AuthenticationRestrictions { + ac.AuthenticationRestrictions = append(ac.AuthenticationRestrictions, + automationconfig.AuthenticationRestriction{ + ClientSource: restriction.ClientSource, + ServerAddress: restriction.ServerAddress, + }) + } + + return ac +} + +// ConvertCustomRolesToAutomationConfigCustomRole converts custom roles to custom roles +// that can be used in the automation config. +func ConvertCustomRolesToAutomationConfigCustomRole(roles []CustomRole) []automationconfig.CustomRole { + acRoles := []automationconfig.CustomRole{} + for _, role := range roles { + acRoles = append(acRoles, role.ConvertToAutomationConfigCustomRole()) + } + return acRoles +} + +// Privilege defines the actions a role is allowed to perform on a given resource. +type Privilege struct { + Resource Resource `json:"resource"` + Actions []string `json:"actions"` +} + +// Resource specifies specifies the resources upon which a privilege permits actions. +// See https://www.mongodb.com/docs/manual/reference/resource-document for more. +type Resource struct { + // +optional + DB *string `json:"db,omitempty"` + // +optional + Collection *string `json:"collection,omitempty"` + // +optional + Cluster bool `json:"cluster,omitempty"` + // +optional + AnyResource bool `json:"anyResource,omitempty"` +} + +// AuthenticationRestriction specifies a list of IP addresses and CIDR ranges users +// are allowed to connect to or from. +type AuthenticationRestriction struct { + ClientSource []string `json:"clientSource"` + ServerAddress []string `json:"serverAddress"` +} + +// AutomationConfigOverride contains fields which will be overridden in the operator created config. +type AutomationConfigOverride struct { + Processes []OverrideProcess `json:"processes,omitempty"` + ReplicaSet OverrideReplicaSet `json:"replicaSet,omitempty"` +} + +type OverrideReplicaSet struct { + // Id can be used together with additionalMongodConfig.replication.replSetName + // to manage clusters where replSetName differs from the MongoDBCommunity resource name + Id *string `json:"id,omitempty"` + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + Settings MapWrapper `json:"settings,omitempty"` +} + +// Note: We do not use the automationconfig.Process type directly here as unmarshalling cannot happen directly +// with the Args26 which is a map[string]interface{} + +// OverrideProcess contains fields that we can override on the AutomationConfig processes. +type OverrideProcess struct { + Name string `json:"name"` + Disabled bool `json:"disabled"` + LogRotate *automationconfig.CrdLogRotate `json:"logRotate,omitempty"` +} + +// StatefulSetConfiguration holds the optional custom StatefulSet +// that should be merged into the operator created one. +type StatefulSetConfiguration struct { + // +kubebuilder:pruning:PreserveUnknownFields + SpecWrapper StatefulSetSpecWrapper `json:"spec"` + // +optional + MetadataWrapper StatefulSetMetadataWrapper `json:"metadata"` +} + +type LogLevel string + +const ( + LogLevelDebug LogLevel = "DEBUG" + LogLevelInfo LogLevel = "INFO" + LogLevelWarn LogLevel = "WARN" + LogLevelError LogLevel = "ERROR" + LogLevelFatal LogLevel = "FATAL" +) + +type AgentConfiguration struct { + // +optional + LogLevel LogLevel `json:"logLevel"` + // +optional + LogFile string `json:"logFile"` + // +optional + MaxLogFileDurationHours int `json:"maxLogFileDurationHours"` + // +optional + // LogRotate if enabled, will enable LogRotate for all processes. + LogRotate *automationconfig.CrdLogRotate `json:"logRotate,omitempty"` + // +optional + // AuditLogRotate if enabled, will enable AuditLogRotate for all processes. + AuditLogRotate *automationconfig.CrdLogRotate `json:"auditLogRotate,omitempty"` + // +optional + // SystemLog configures system log of mongod + SystemLog *automationconfig.SystemLog `json:"systemLog,omitempty"` +} + +// StatefulSetSpecWrapper is a wrapper around StatefulSetSpec with a custom implementation +// of MarshalJSON and UnmarshalJSON which delegate to the underlying Spec to avoid CRD pollution. + +type StatefulSetSpecWrapper struct { + Spec appsv1.StatefulSetSpec `json:"-"` +} + +// MarshalJSON defers JSON encoding to the wrapped map +func (m *StatefulSetSpecWrapper) MarshalJSON() ([]byte, error) { + return json.Marshal(m.Spec) +} + +// UnmarshalJSON will decode the data into the wrapped map +func (m *StatefulSetSpecWrapper) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &m.Spec) +} + +func (m *StatefulSetSpecWrapper) DeepCopy() *StatefulSetSpecWrapper { + return &StatefulSetSpecWrapper{ + Spec: m.Spec, + } +} + +// StatefulSetMetadataWrapper is a wrapper around Labels and Annotations +type StatefulSetMetadataWrapper struct { + // +optional + Labels map[string]string `json:"labels,omitempty"` + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + +func (m *StatefulSetMetadataWrapper) DeepCopy() *StatefulSetMetadataWrapper { + return &StatefulSetMetadataWrapper{ + Labels: m.Labels, + Annotations: m.Annotations, + } +} + +// MongodConfiguration holds the optional mongod configuration +// that should be merged with the operator created one. +type MongodConfiguration struct { + MapWrapper `json:"-"` +} + +// NewMongodConfiguration returns an empty MongodConfiguration +func NewMongodConfiguration() MongodConfiguration { + return MongodConfiguration{MapWrapper{map[string]interface{}{}}} +} + +// GetDBDataDir returns the db path which should be used. +func (m MongodConfiguration) GetDBDataDir() string { + return objx.New(m.Object).Get("storage.dbPath").Str(automationconfig.DefaultMongoDBDataDir) +} + +// GetDBPort returns the port that should be used for the mongod process. +// If port is not specified, the default port of 27017 will be used. +func (m MongodConfiguration) GetDBPort() int { + portValue := objx.New(m.Object).Get("net.port") + + // Underlying map could be manipulated in code, e.g. via SetDBPort (e.g. in unit tests) - then it will be as int, + // or it could be deserialized from JSON and then integer in an untyped map will be deserialized as float64. + // It's behavior of https://pkg.go.dev/encoding/json#Unmarshal that is converting JSON integers as float64. + if portValue.IsInt() { + return portValue.Int(automationconfig.DefaultDBPort) + } else if portValue.IsFloat64() { + return int(portValue.Float64(float64(automationconfig.DefaultDBPort))) + } + + return automationconfig.DefaultDBPort +} + +// SetDBPort ensures that port is stored as float64 +func (m MongodConfiguration) SetDBPort(port int) MongodConfiguration { + m.SetOption("net.port", float64(port)) + return m +} + +type MongoDBUser struct { + // Name is the username of the user + Name string `json:"name"` + + // DB is the database the user is stored in. Defaults to "admin" + // +optional + // +kubebuilder:validation:Optional + // +kubebuilder:default:=admin + DB string `json:"db,omitempty"` + + // PasswordSecretRef is a reference to the secret containing this user's password + // +optional + PasswordSecretRef SecretKeyReference `json:"passwordSecretRef,omitempty"` + + // Roles is an array of roles assigned to this user + Roles []Role `json:"roles"` + + // ScramCredentialsSecretName appended by string "scram-credentials" is the name of the secret object created by the mongoDB operator for storing SCRAM credentials + // These secrets names must be different for each user in a deployment. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + // +optional + ScramCredentialsSecretName string `json:"scramCredentialsSecretName,omitempty"` + + // ConnectionStringSecretName is the name of the secret object created by the operator which exposes the connection strings for the user. + // If provided, this secret must be different for each user in a deployment. + // +optional + ConnectionStringSecretName string `json:"connectionStringSecretName,omitempty"` + + // ConnectionStringSecretNamespace is the namespace of the secret object created by the operator which exposes the connection strings for the user. + // +optional + ConnectionStringSecretNamespace string `json:"connectionStringSecretNamespace,omitempty"` + + // Additional options to be appended to the connection string. + // These options apply only to this user and will override any existing options in the resource. + // +kubebuilder:validation:Type=object + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + AdditionalConnectionStringConfig MapWrapper `json:"additionalConnectionStringConfig,omitempty"` +} + +func (m MongoDBUser) GetPasswordSecretKey() string { + if m.PasswordSecretRef.Key == "" { + return defaultPasswordKey + } + return m.PasswordSecretRef.Key +} + +// GetScramCredentialsSecretName gets the final SCRAM credentials secret-name by appending the user provided +// scramsCredentialSecretName with "scram-credentials" +func (m MongoDBUser) GetScramCredentialsSecretName() string { + return fmt.Sprintf("%s-%s", m.ScramCredentialsSecretName, "scram-credentials") +} + +// GetConnectionStringSecretName gets the connection string secret name provided by the user or generated +// from the SCRAM user configuration. +func (m MongoDBUser) GetConnectionStringSecretName(resourceName string) string { + if m.ConnectionStringSecretName != "" { + return m.ConnectionStringSecretName + } + + return normalizeName(fmt.Sprintf("%s-%s-%s", resourceName, m.DB, m.Name)) +} + +// GetConnectionStringSecretNamespace gets the connection string secret namespace provided by the user or generated +// from the SCRAM user configuration. +func (m MongoDBUser) GetConnectionStringSecretNamespace(resourceNamespace string) string { + if m.ConnectionStringSecretNamespace != "" { + return m.ConnectionStringSecretNamespace + } + + return resourceNamespace +} + +// normalizeName returns a string that conforms to RFC-1123 +func normalizeName(name string) string { + errors := validation.IsDNS1123Subdomain(name) + if len(errors) == 0 { + return name + } + + // convert name to lowercase and replace invalid characters with '-' + name = strings.ToLower(name) + re := regexp.MustCompile("[^a-z0-9-]+") + name = re.ReplaceAllString(name, "-") + + // Remove duplicate `-` resulting from contiguous non-allowed chars. + re = regexp.MustCompile(`\-+`) + name = re.ReplaceAllString(name, "-") + + name = strings.Trim(name, "-") + + if len(name) > validation.DNS1123SubdomainMaxLength { + name = name[0:validation.DNS1123SubdomainMaxLength] + } + return name +} + +// SecretKeyReference is a reference to the secret containing the user's password +type SecretKeyReference struct { + // Name is the name of the secret storing this user's password + Name string `json:"name"` + + // Key is the key in the secret storing this password. Defaults to "password" + // +optional + Key string `json:"key"` +} + +// Role is the database role this user should have +type Role struct { + // DB is the database the role can act on + DB string `json:"db"` + // Name is the name of the role + Name string `json:"name"` +} + +type Security struct { + // +optional + Authentication Authentication `json:"authentication"` + // TLS configuration for both client-server and server-server communication + // +optional + TLS TLS `json:"tls"` + // User-specified custom MongoDB roles that should be configured in the deployment. + // +optional + Roles []CustomRole `json:"roles,omitempty"` +} + +// TLS is the configuration used to set up TLS encryption +type TLS struct { + Enabled bool `json:"enabled"` + + // Optional configures if TLS should be required or optional for connections + // +optional + Optional bool `json:"optional"` + + // CertificateKeySecret is a reference to a Secret containing a private key and certificate to use for TLS. + // The key and cert are expected to be PEM encoded and available at "tls.key" and "tls.crt". + // This is the same format used for the standard "kubernetes.io/tls" Secret type, but no specific type is required. + // Alternatively, an entry tls.pem, containing the concatenation of cert and key, can be provided. + // If all of tls.pem, tls.crt and tls.key are present, the tls.pem one needs to be equal to the concatenation of tls.crt and tls.key + // +optional + CertificateKeySecret corev1.LocalObjectReference `json:"certificateKeySecretRef"` + + // CaCertificateSecret is a reference to a Secret containing the certificate for the CA which signed the server certificates + // The certificate is expected to be available under the key "ca.crt" + // +optional + CaCertificateSecret *corev1.LocalObjectReference `json:"caCertificateSecretRef,omitempty"` + + // CaConfigMap is a reference to a ConfigMap containing the certificate for the CA which signed the server certificates + // The certificate is expected to be available under the key "ca.crt" + // This field is ignored when CaCertificateSecretRef is configured + // +optional + CaConfigMap *corev1.LocalObjectReference `json:"caConfigMapRef,omitempty"` +} + +type Authentication struct { + // Modes is an array specifying which authentication methods should be enabled. + Modes []AuthMode `json:"modes"` + + // AgentMode contains the authentication mode used by the automation agent. + // +optional + AgentMode AuthMode `json:"agentMode,omitempty"` + + // AgentCertificateSecret is a reference to a Secret containing the certificate and the key for the automation agent + // The secret needs to have available: + // - certificate under key: "tls.crt" + // - private key under key: "tls.key" + // If additionally, tls.pem is present, then it needs to be equal to the concatenation of tls.crt and tls.key + // +optional + AgentCertificateSecret *corev1.LocalObjectReference `json:"agentCertificateSecretRef,omitempty"` + + // IgnoreUnknownUsers set to true will ensure any users added manually (not through the CRD) + // will not be removed. + + // TODO: defaults will work once we update to v1 CRD. + + // +optional + // +kubebuilder:default:=true + // +nullable + IgnoreUnknownUsers *bool `json:"ignoreUnknownUsers,omitempty"` +} + +// +kubebuilder:validation:Enum=SCRAM;SCRAM-SHA-256;SCRAM-SHA-1;X509 +type AuthMode string + +func IsAuthPresent(authModes []AuthMode, auth string) bool { + for _, authMode := range authModes { + if string(authMode) == auth { + return true + } + } + return false +} + +// ConvertAuthModeToAuthMechanism acts as a map but is immutable. It allows users to use different labels to describe the +// same authentication mode. +func ConvertAuthModeToAuthMechanism(authModeLabel AuthMode) string { + switch authModeLabel { + case "SCRAM", "SCRAM-SHA-256": + return constants.Sha256 + case "SCRAM-SHA-1": + return constants.Sha1 + case "X509": + return constants.X509 + default: + return "" + } +} + +// MongoDBCommunityStatus defines the observed state of MongoDB +type MongoDBCommunityStatus struct { + MongoURI string `json:"mongoUri"` + Phase Phase `json:"phase"` + Version string `json:"version,omitempty"` + + CurrentStatefulSetReplicas int `json:"currentStatefulSetReplicas"` + CurrentMongoDBMembers int `json:"currentMongoDBMembers"` + + CurrentStatefulSetArbitersReplicas int `json:"currentStatefulSetArbitersReplicas,omitempty"` + CurrentMongoDBArbiters int `json:"currentMongoDBArbiters,omitempty"` + + Message string `json:"message,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MongoDBCommunity is the Schema for the mongodbs API +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=mongodbcommunity,scope=Namespaced,shortName=mdbc,singular=mongodbcommunity +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Current state of the MongoDB deployment" +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version",description="Version of MongoDB server" +// +kubebuilder:metadata:annotations="service.binding/type=mongodb" +// +kubebuilder:metadata:annotations="service.binding/provider=community" +// +kubebuilder:metadata:annotations="service.binding=path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret" +// +kubebuilder:metadata:annotations="service.binding/connectionString=path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=connectionString.standardSrv" +// +kubebuilder:metadata:annotations="service.binding/username=path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=username" +// +kubebuilder:metadata:annotations="service.binding/password=path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=password" +type MongoDBCommunity struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MongoDBCommunitySpec `json:"spec,omitempty"` + Status MongoDBCommunityStatus `json:"status,omitempty"` +} + +func (m *MongoDBCommunity) GetMongodConfiguration() MongodConfiguration { + mongodConfig := NewMongodConfiguration() + for k, v := range m.Spec.AdditionalMongodConfig.Object { + mongodConfig.SetOption(k, v) + } + return mongodConfig +} + +func (m *MongoDBCommunity) GetAgentPasswordSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Name + "-agent-password", Namespace: m.Namespace} +} + +func (m *MongoDBCommunity) GetAgentKeyfileSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Name + "-keyfile", Namespace: m.Namespace} +} + +func (m *MongoDBCommunity) GetOwnerReferences() []metav1.OwnerReference { + ownerReference := *metav1.NewControllerRef(m, schema.GroupVersionKind{ + Group: GroupVersion.Group, + Version: GroupVersion.Version, + Kind: m.Kind, + }) + return []metav1.OwnerReference{ownerReference} +} + +// GetAuthOptions returns a set of Options that are used to configure scram +// authentication. +func (m *MongoDBCommunity) GetAuthOptions() authtypes.Options { + ignoreUnknownUsers := true + if m.Spec.Security.Authentication.IgnoreUnknownUsers != nil { + ignoreUnknownUsers = *m.Spec.Security.Authentication.IgnoreUnknownUsers + } + + authModes := m.Spec.Security.Authentication.Modes + defaultAuthMechanism := ConvertAuthModeToAuthMechanism(defaultMode) + autoAuthMechanism := ConvertAuthModeToAuthMechanism(m.Spec.GetAgentAuthMode()) + authMechanisms := make([]string, len(authModes)) + + if autoAuthMechanism == "" { + autoAuthMechanism = defaultAuthMechanism + } + + if len(authModes) == 0 { + authMechanisms = []string{defaultAuthMechanism} + } else { + for i, authMode := range authModes { + if authMech := ConvertAuthModeToAuthMechanism(authMode); authMech != "" { + authMechanisms[i] = authMech + } + } + } + + return authtypes.Options{ + AuthoritativeSet: !ignoreUnknownUsers, + KeyFile: constants.AutomationAgentKeyFilePathInContainer, + AuthMechanisms: authMechanisms, + AgentName: constants.AgentName, + AutoAuthMechanism: autoAuthMechanism, + } +} + +// GetAuthUsers converts all the users from the spec into users +// that can be used to configure authentication. +func (m *MongoDBCommunity) GetAuthUsers() []authtypes.User { + users := make([]authtypes.User, len(m.Spec.Users)) + for i, u := range m.Spec.Users { + roles := make([]authtypes.Role, len(u.Roles)) + for j, r := range u.Roles { + + roles[j] = authtypes.Role{ + Name: r.Name, + Database: r.DB, + } + } + + // When the MongoDB resource has been fetched from Kubernetes, + // the User's database will be set to "admin" because this is set + // by default on the CRD, but when running e2e tests, the resource + // we are working with is local -- it has not been posted to the + // Kubernetes API and the `u.DB` was not set to the default ("admin"). + // This is why the "admin" value is being set here. + if u.DB == "" { + u.DB = defaultDBForUser + } + + users[i] = authtypes.User{ + Username: u.Name, + Database: u.DB, + Roles: roles, + ConnectionStringSecretName: u.GetConnectionStringSecretName(m.Name), + ConnectionStringSecretNamespace: u.GetConnectionStringSecretNamespace(m.Namespace), + ConnectionStringOptions: u.AdditionalConnectionStringConfig.Object, + } + + if u.DB != constants.ExternalDB { + users[i].ScramCredentialsSecretName = u.GetScramCredentialsSecretName() + users[i].PasswordSecretKey = u.GetPasswordSecretKey() + users[i].PasswordSecretName = u.PasswordSecretRef.Name + } + } + return users +} + +// AgentCertificateSecretNamespacedName returns the namespaced name of the secret containing the agent certificate. +func (m *MongoDBCommunity) AgentCertificateSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{ + Namespace: m.Namespace, + Name: m.Spec.GetAgentCertificateRef(), + } +} + +// AgentCertificatePemSecretNamespacedName returns the namespaced name of the secret containing the agent certificate in pem format. +func (m *MongoDBCommunity) AgentCertificatePemSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{ + Namespace: m.Namespace, + Name: m.Spec.GetAgentCertificateRef() + "-pem", + } +} + +// GetAgentCertificateRef returns the name of the secret containing the agent certificate. +// If it is specified in the CR, it will return this. Otherwise, it default to agent-certs. +func (m *MongoDBCommunitySpec) GetAgentCertificateRef() string { + agentCertSecret := "agent-certs" + if m.Security.Authentication.AgentCertificateSecret != nil && m.Security.Authentication.AgentCertificateSecret.Name != "" { + agentCertSecret = m.Security.Authentication.AgentCertificateSecret.Name + } + return agentCertSecret +} + +// GetAgentAuthMode return the agent authentication mode. If the agent auth mode is specified, it will return this. +// Otherwise, if the spec.security.authentication.modes array is empty, it will default to SCRAM-SHA-256. +// If spec.security.authentication.modes has one element, the agent auth mode will default to that. +// If spec.security.authentication.modes has more than one element, then agent auth will need to be specified, +// with one exception: if spec.security.authentication.modes contains only SCRAM-SHA-256 and SCRAM-SHA-1, then it defaults to SCRAM-SHA-256 (for backwards compatibility). +func (m *MongoDBCommunitySpec) GetAgentAuthMode() AuthMode { + if m.Security.Authentication.AgentMode != "" { + return m.Security.Authentication.AgentMode + } + + if len(m.Security.Authentication.Modes) == 0 { + return "SCRAM-SHA-256" + } else if len(m.Security.Authentication.Modes) == 1 { + return m.Security.Authentication.Modes[0] + } else if len(m.Security.Authentication.Modes) == 2 { + if (IsAuthPresent(m.Security.Authentication.Modes, "SCRAM") || IsAuthPresent(m.Security.Authentication.Modes, "SCRAM-SHA-256")) && + IsAuthPresent(m.Security.Authentication.Modes, "SCRAM-SHA-1") { + return "SCRAM-SHA-256" + } + } + return "" +} + +func (m *MongoDBCommunitySpec) IsAgentX509() bool { + return m.GetAgentAuthMode() == "X509" +} + +// IsStillScaling returns true if this resource is currently scaling, +// considering both arbiters and regular members. +func (m *MongoDBCommunity) IsStillScaling() bool { + arbiters := automationConfigReplicasScaler{ + current: m.CurrentArbiters(), + desired: m.DesiredArbiters(), + forceIndividualScaling: true, + } + + return scale.IsStillScaling(m) || scale.IsStillScaling(arbiters) +} + +// AutomationConfigMembersThisReconciliation determines the correct number of +// automation config replica set members based on our desired number, and our +// current number. +func (m *MongoDBCommunity) AutomationConfigMembersThisReconciliation() int { + return scale.ReplicasThisReconciliation(automationConfigReplicasScaler{ + current: m.Status.CurrentMongoDBMembers, + desired: m.Spec.Members, + }) +} + +// AutomationConfigArbitersThisReconciliation determines the correct number of +// automation config replica set arbiters based on our desired number, and our +// current number. +// +// Will not update arbiters until members have reached desired number. +func (m *MongoDBCommunity) AutomationConfigArbitersThisReconciliation() int { + if scale.IsStillScaling(m) { + return m.Status.CurrentMongoDBArbiters + } + + return scale.ReplicasThisReconciliation(automationConfigReplicasScaler{ + desired: m.Spec.Arbiters, + current: m.Status.CurrentMongoDBArbiters, + forceIndividualScaling: true, + }) +} + +// GetOptionsString return a string format of the connection string +// options that can be appended directly to the connection string. +// +// Only takes into account options for the resource and not any user. +func (m *MongoDBCommunity) GetOptionsString() string { + generalOptionsMap := m.Spec.AdditionalConnectionStringConfig.Object + optionValues := make([]string, len(generalOptionsMap)) + i := 0 + + for key, value := range generalOptionsMap { + if _, protected := protectedConnectionStringOptions[key]; !protected { + optionValues[i] = fmt.Sprintf("%s=%v", key, value) + i += 1 + } + } + + optionValues = optionValues[:i] + + optionsString := "" + if i > 0 { + optionsString = "&" + strings.Join(optionValues, "&") + } + return optionsString +} + +// GetUserOptionsString return a string format of the connection string +// options that can be appended directly to the connection string. +// +// Takes into account both user options and resource options. +// User options will override any existing options in the resource. +func (m *MongoDBCommunity) GetUserOptionsString(user authtypes.User) string { + generalOptionsMap := m.Spec.AdditionalConnectionStringConfig.Object + userOptionsMap := user.ConnectionStringOptions + optionValues := make([]string, len(generalOptionsMap)+len(userOptionsMap)) + i := 0 + for key, value := range userOptionsMap { + if _, protected := protectedConnectionStringOptions[key]; !protected { + optionValues[i] = fmt.Sprintf("%s=%v", key, value) + i += 1 + } + } + + for key, value := range generalOptionsMap { + _, ok := userOptionsMap[key] + if _, protected := protectedConnectionStringOptions[key]; !ok && !protected { + optionValues[i] = fmt.Sprintf("%s=%v", key, value) + i += 1 + } + } + + optionValues = optionValues[:i] + + optionsString := "" + if i > 0 { + optionsString = "&" + strings.Join(optionValues, "&") + } + return optionsString +} + +// MongoURI returns a mongo uri which can be used to connect to this deployment +func (m *MongoDBCommunity) MongoURI(clusterDomain string) string { + optionsString := m.GetOptionsString() + + return fmt.Sprintf("mongodb://%s/?replicaSet=%s%s", strings.Join(m.Hosts(clusterDomain), ","), m.Name, optionsString) +} + +// MongoSRVURI returns a mongo srv uri which can be used to connect to this deployment +func (m *MongoDBCommunity) MongoSRVURI(clusterDomain string) string { + if clusterDomain == "" { + clusterDomain = defaultClusterDomain + } + + optionsString := m.GetOptionsString() + + return fmt.Sprintf("mongodb+srv://%s.%s.svc.%s/?replicaSet=%s%s", m.ServiceName(), m.Namespace, clusterDomain, m.Name, optionsString) +} + +// MongoAuthUserURI returns a mongo uri which can be used to connect to this deployment +// and includes the authentication data for the user +func (m *MongoDBCommunity) MongoAuthUserURI(user authtypes.User, password string, clusterDomain string) string { + optionsString := m.GetUserOptionsString(user) + return fmt.Sprintf("mongodb://%s%s/%s?replicaSet=%s&ssl=%t%s", + user.GetLoginString(password), + strings.Join(m.Hosts(clusterDomain), ","), + user.Database, + m.Name, + m.Spec.Security.TLS.Enabled, + optionsString) +} + +// MongoAuthUserSRVURI returns a mongo srv uri which can be used to connect to this deployment +// and includes the authentication data for the user +func (m *MongoDBCommunity) MongoAuthUserSRVURI(user authtypes.User, password string, clusterDomain string) string { + if clusterDomain == "" { + clusterDomain = defaultClusterDomain + } + + optionsString := m.GetUserOptionsString(user) + return fmt.Sprintf("mongodb+srv://%s%s.%s.svc.%s/%s?replicaSet=%s&ssl=%t%s", + user.GetLoginString(password), + m.ServiceName(), + m.Namespace, + clusterDomain, + user.Database, + m.Name, + m.Spec.Security.TLS.Enabled, + optionsString) +} + +func (m *MongoDBCommunity) Hosts(clusterDomain string) []string { + hosts := make([]string, m.Spec.Members) + + if clusterDomain == "" { + clusterDomain = defaultClusterDomain + } + + for i := 0; i < m.Spec.Members; i++ { + hosts[i] = fmt.Sprintf("%s-%d.%s.%s.svc.%s:%d", + m.Name, i, + m.ServiceName(), + m.Namespace, + clusterDomain, + m.GetMongodConfiguration().GetDBPort()) + } + return hosts +} + +// ServiceName returns the name of the Service that should be created for this resource. +func (m *MongoDBCommunity) ServiceName() string { + serviceName := m.Spec.StatefulSetConfiguration.SpecWrapper.Spec.ServiceName + if serviceName != "" { + return serviceName + } + return m.Name + "-svc" +} + +func (m *MongoDBCommunity) ArbiterNamespacedName() types.NamespacedName { + return types.NamespacedName{Namespace: m.Namespace, Name: m.Name + "-arb"} +} + +func (m *MongoDBCommunity) AutomationConfigSecretName() string { + return m.Name + "-config" +} + +// TLSCaCertificateSecretNamespacedName will get the namespaced name of the Secret containing the CA certificate +// As the Secret will be mounted to our pods, it has to be in the same namespace as the MongoDB resource +func (m *MongoDBCommunity) TLSCaCertificateSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Spec.Security.TLS.CaCertificateSecret.Name, Namespace: m.Namespace} +} + +// TLSConfigMapNamespacedName will get the namespaced name of the ConfigMap containing the CA certificate +// As the ConfigMap will be mounted to our pods, it has to be in the same namespace as the MongoDB resource +func (m *MongoDBCommunity) TLSConfigMapNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Spec.Security.TLS.CaConfigMap.Name, Namespace: m.Namespace} +} + +// TLSSecretNamespacedName will get the namespaced name of the Secret containing the server certificate and key +func (m *MongoDBCommunity) TLSSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Spec.Security.TLS.CertificateKeySecret.Name, Namespace: m.Namespace} +} + +// PrometheusTLSSecretNamespacedName will get the namespaced name of the Secret containing the server certificate and key +func (m *MongoDBCommunity) PrometheusTLSSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Spec.Prometheus.TLSSecretRef.Name, Namespace: m.Namespace} +} + +func (m *MongoDBCommunity) TLSOperatorCASecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Name + "-ca-certificate", Namespace: m.Namespace} +} + +// TLSOperatorSecretNamespacedName will get the namespaced name of the Secret created by the operator +// containing the combined certificate and key. +func (m *MongoDBCommunity) TLSOperatorSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Name + "-server-certificate-key", Namespace: m.Namespace} +} + +// PrometheusTLSOperatorSecretNamespacedName will get the namespaced name of the Secret created by the operator +// containing the combined certificate and key. +func (m *MongoDBCommunity) PrometheusTLSOperatorSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Name + "-prometheus-certificate-key", Namespace: m.Namespace} +} + +func (m *MongoDBCommunity) NamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Name, Namespace: m.Namespace} +} + +func (m *MongoDBCommunity) DesiredReplicas() int { + return m.Spec.Members +} + +func (m *MongoDBCommunity) CurrentReplicas() int { + return m.Status.CurrentStatefulSetReplicas +} + +// ForcedIndividualScaling if set to true, will always scale the deployment 1 by +// 1, even if the resource has been just created. +// +// The reason for this is that we have 2 types of resources that are scaled at +// different times: a) Regular members, which can be scaled from 0->n, for +// instance, when the resource was just created; and b) Arbiters, which will be +// scaled from 0->M 1 by 1 at all times. +// +// This was done to simplify the process of scaling arbiters, *after* members +// have reached the desired amount of replicas. +func (m *MongoDBCommunity) ForcedIndividualScaling() bool { + return false +} + +func (m *MongoDBCommunity) DesiredArbiters() int { + return m.Spec.Arbiters +} + +func (m *MongoDBCommunity) CurrentArbiters() int { + return m.Status.CurrentStatefulSetArbitersReplicas +} + +func (m *MongoDBCommunity) GetMongoDBVersion() string { + return m.Spec.Version +} + +// GetMongoDBVersionForAnnotation returns the MDB version used to annotate the object. +// Here it's the same as GetMongoDBVersion, but a different name is used in order to make +// the usage clearer in enterprise (where it's a method of OpsManager but is used for the AppDB) +func (m *MongoDBCommunity) GetMongoDBVersionForAnnotation() string { + return m.GetMongoDBVersion() +} + +func (m *MongoDBCommunity) StatefulSetReplicasThisReconciliation() int { + return scale.ReplicasThisReconciliation(automationConfigReplicasScaler{ + desired: m.DesiredReplicas(), + current: m.CurrentReplicas(), + forceIndividualScaling: false, + }) +} + +func (m *MongoDBCommunity) StatefulSetArbitersThisReconciliation() int { + return scale.ReplicasThisReconciliation(automationConfigReplicasScaler{ + desired: m.DesiredArbiters(), + current: m.CurrentArbiters(), + forceIndividualScaling: true, + }) +} + +// GetUpdateStrategyType returns the type of RollingUpgradeStrategy that the +// MongoDB StatefulSet should be configured with. +func (m *MongoDBCommunity) GetUpdateStrategyType() appsv1.StatefulSetUpdateStrategyType { + if !m.IsChangingVersion() { + return appsv1.RollingUpdateStatefulSetStrategyType + } + return appsv1.OnDeleteStatefulSetStrategyType +} + +// IsChangingVersion returns true if an attempted version change is occurring. +func (m *MongoDBCommunity) IsChangingVersion() bool { + lastVersion := m.getLastVersion() + return lastVersion != "" && lastVersion != m.Spec.Version +} + +// GetLastVersion returns the MDB version the statefulset was configured with. +func (m *MongoDBCommunity) getLastVersion() string { + return annotations.GetAnnotation(m, annotations.LastAppliedMongoDBVersion) +} + +func (m *MongoDBCommunity) HasSeparateDataAndLogsVolumes() bool { + return true +} + +func (m *MongoDBCommunity) GetAnnotations() map[string]string { + return m.Annotations +} + +func (m *MongoDBCommunity) DataVolumeName() string { + return "data-volume" +} + +func (m *MongoDBCommunity) LogsVolumeName() string { + return "logs-volume" +} + +func (m *MongoDBCommunity) NeedsAutomationConfigVolume() bool { + return true +} + +func (m MongoDBCommunity) GetAgentLogLevel() LogLevel { + return m.Spec.AgentConfiguration.LogLevel +} + +func (m MongoDBCommunity) GetAgentLogFile() string { + return m.Spec.AgentConfiguration.LogFile +} + +func (m MongoDBCommunity) GetAgentMaxLogFileDurationHours() int { + return m.Spec.AgentConfiguration.MaxLogFileDurationHours +} + +type automationConfigReplicasScaler struct { + current, desired int + forceIndividualScaling bool +} + +func (a automationConfigReplicasScaler) DesiredReplicas() int { + return a.desired +} + +func (a automationConfigReplicasScaler) CurrentReplicas() int { + return a.current +} + +func (a automationConfigReplicasScaler) ForcedIndividualScaling() bool { + return a.forceIndividualScaling +} + +// +kubebuilder:object:root=true + +// MongoDBCommunityList contains a list of MongoDB +type MongoDBCommunityList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MongoDBCommunity `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MongoDBCommunity{}, &MongoDBCommunityList{}) +} diff --git a/api/v1/mongodbcommunity_types_test.go b/api/v1/mongodbcommunity_types_test.go new file mode 100644 index 000000000..19b365527 --- /dev/null +++ b/api/v1/mongodbcommunity_types_test.go @@ -0,0 +1,694 @@ +package v1 + +import ( + "encoding/json" + + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type args struct { + members int + name string + namespace string + clusterDomain string + additionalMongodConfig map[string]interface{} + additionalConnectionStringConfig map[string]interface{} + userConnectionStringConfig map[string]interface{} + connectionString string +} + +func TestMongoDB_MongoURI(t *testing.T) { + tests := []args{ + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + clusterDomain: "", + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/?replicaSet=my-rs", + }, + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + clusterDomain: "my.cluster", + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.my.cluster:27017,my-rs-1.my-rs-svc.my-namespace.svc.my.cluster:27017/?replicaSet=my-rs", + }, + { + members: 1, + name: "my-single-rs", + namespace: "my-single-namespace", + clusterDomain: "", + connectionString: "mongodb://my-single-rs-0.my-single-rs-svc.my-single-namespace.svc.cluster.local:27017/?replicaSet=my-single-rs", + }, + { + members: 1, + name: "my-single-rs", + namespace: "my-single-namespace", + clusterDomain: "my.cluster", + connectionString: "mongodb://my-single-rs-0.my-single-rs-svc.my-single-namespace.svc.my.cluster:27017/?replicaSet=my-single-rs", + }, + { + members: 5, + name: "my-big-rs", + namespace: "my-big-namespace", + clusterDomain: "", + connectionString: "mongodb://my-big-rs-0.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-1.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-2.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-3.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-4.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017/?replicaSet=my-big-rs", + }, + { + members: 5, + name: "my-big-rs", + namespace: "my-big-namespace", + clusterDomain: "my.cluster", + connectionString: "mongodb://my-big-rs-0.my-big-rs-svc.my-big-namespace.svc.my.cluster:27017,my-big-rs-1.my-big-rs-svc.my-big-namespace.svc.my.cluster:27017,my-big-rs-2.my-big-rs-svc.my-big-namespace.svc.my.cluster:27017,my-big-rs-3.my-big-rs-svc.my-big-namespace.svc.my.cluster:27017,my-big-rs-4.my-big-rs-svc.my-big-namespace.svc.my.cluster:27017/?replicaSet=my-big-rs", + }, + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + clusterDomain: "", + additionalMongodConfig: map[string]interface{}{ + "net.port": 40333., + }, + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:40333,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:40333/?replicaSet=my-rs", + }, + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + clusterDomain: "my.cluster", + additionalMongodConfig: map[string]interface{}{ + "net.port": 40333., + }, + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.my.cluster:40333,my-rs-1.my-rs-svc.my-namespace.svc.my.cluster:40333/?replicaSet=my-rs", + }, + } + + for _, params := range tests { + mdb := newReplicaSet(params.members, params.name, params.namespace) + mdb.Spec.AdditionalMongodConfig.Object = params.additionalMongodConfig + assert.Equal(t, mdb.MongoURI(params.clusterDomain), params.connectionString) + } +} + +func TestMongoDB_MongoURI_With_Options(t *testing.T) { + tests := []args{ + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/?replicaSet=my-rs&readPreference=primary", + }, + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + additionalConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary", "replicaSet": "differentName", "tls": true, "ssl": true}, + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/?replicaSet=my-rs&readPreference=primary", + }, + { + members: 1, + name: "my-single-rs", + namespace: "my-single-namespace", + additionalConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary"}, + connectionString: "mongodb://my-single-rs-0.my-single-rs-svc.my-single-namespace.svc.cluster.local:27017/?replicaSet=my-single-rs&readPreference=primary", + }, + { + members: 5, + name: "my-big-rs", + namespace: "my-big-namespace", + additionalConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary"}, + connectionString: "mongodb://my-big-rs-0.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-1.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-2.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-3.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-4.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017/?replicaSet=my-big-rs&readPreference=primary", + }, + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + additionalConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary"}, + additionalMongodConfig: map[string]interface{}{ + "net.port": 40333., + }, + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:40333,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:40333/?replicaSet=my-rs&readPreference=primary", + }, + } + + for _, params := range tests { + mdb := newReplicaSet(params.members, params.name, params.namespace) + mdb.Spec.AdditionalMongodConfig.Object = params.additionalMongodConfig + mdb.Spec.AdditionalConnectionStringConfig.Object = params.additionalConnectionStringConfig + assert.Equal(t, mdb.MongoURI(params.clusterDomain), params.connectionString) + } +} + +func TestMongoDB_MongoSRVURI(t *testing.T) { + mdb := newReplicaSet(2, "my-rs", "my-namespace") + assert.Equal(t, mdb.MongoSRVURI(""), "mongodb+srv://my-rs-svc.my-namespace.svc.cluster.local/?replicaSet=my-rs") + assert.Equal(t, mdb.MongoSRVURI("my.cluster"), "mongodb+srv://my-rs-svc.my-namespace.svc.my.cluster/?replicaSet=my-rs") +} + +func TestMongoDB_MongoSRVURI_With_Options(t *testing.T) { + mdb := newReplicaSet(2, "my-rs", "my-namespace") + mdb.Spec.AdditionalConnectionStringConfig.Object = map[string]interface{}{ + "readPreference": "primary"} + assert.Equal(t, mdb.MongoSRVURI(""), "mongodb+srv://my-rs-svc.my-namespace.svc.cluster.local/?replicaSet=my-rs&readPreference=primary") + assert.Equal(t, mdb.MongoSRVURI("my.cluster"), "mongodb+srv://my-rs-svc.my-namespace.svc.my.cluster/?replicaSet=my-rs&readPreference=primary") + + mdb = newReplicaSet(2, "my-rs", "my-namespace") + mdb.Spec.AdditionalConnectionStringConfig.Object = map[string]interface{}{ + "readPreference": "primary", "replicaSet": "differentName", "tls": true, "ssl": true} + assert.Equal(t, mdb.MongoSRVURI(""), "mongodb+srv://my-rs-svc.my-namespace.svc.cluster.local/?replicaSet=my-rs&readPreference=primary") + assert.Equal(t, mdb.MongoSRVURI("my.cluster"), "mongodb+srv://my-rs-svc.my-namespace.svc.my.cluster/?replicaSet=my-rs&readPreference=primary") +} + +func TestMongodConfiguration(t *testing.T) { + mc := NewMongodConfiguration() + assert.Equal(t, mc.Object, map[string]interface{}{}) + assert.Equal(t, mc.GetDBDataDir(), "/data") + assert.Equal(t, mc.GetDBPort(), 27017) + mc.SetOption("net.port", 40333.) + assert.Equal(t, mc.GetDBPort(), 40333) + mc.SetOption("storage", map[string]interface{}{"dbPath": "/other/data/path"}) + assert.Equal(t, mc.GetDBDataDir(), "/other/data/path") + assert.Equal(t, mc.Object, map[string]interface{}{ + "net": map[string]interface{}{ + "port": 40333., + }, + "storage": map[string]interface{}{ + "dbPath": "/other/data/path", + }, + }) +} + +func TestMongodConfigurationWithNestedMapsAfterUnmarshalling(t *testing.T) { + jsonStr := ` + { + "net.port": 40333, + "storage.dbPath": "/other/data/path" + } + ` + mc := NewMongodConfiguration() + require.NoError(t, json.Unmarshal([]byte(jsonStr), &mc)) + assert.Equal(t, map[string]interface{}{ + "net": map[string]interface{}{ + "port": 40333., + }, + "storage": map[string]interface{}{ + "dbPath": "/other/data/path", + }, + }, mc.Object) +} + +func TestGetAuthOptions(t *testing.T) { + t.Run("Default AutoAuthMechanism set if modes array empty", func(t *testing.T) { + mdb := newModesArray(nil, "empty-modes-array", "my-namespace") + + options := mdb.GetAuthOptions() + + assert.EqualValues(t, defaultMode, options.AutoAuthMechanism) + assert.EqualValues(t, []string{constants.Sha256}, options.AuthMechanisms) + }) +} + +func TestGetScramCredentialsSecretName(t *testing.T) { + testusers := []struct { + in MongoDBUser + exp string + }{ + { + MongoDBUser{ + Name: "mdb-0", + DB: "admin", + Roles: []Role{ + // roles on testing db for general connectivity + { + DB: "testing", + Name: "readWrite", + }, + { + DB: "testing", + Name: "clusterAdmin", + }, + // admin roles for reading FCV + { + DB: "admin", + Name: "readWrite", + }, + { + DB: "admin", + Name: "clusterAdmin", + }, + }, + ScramCredentialsSecretName: "scram-credential-secret-name-0", + }, + "scram-credential-secret-name-0-scram-credentials", + }, + { + MongoDBUser{ + Name: "mdb-1", + DB: "admin", + Roles: []Role{ + // roles on testing db for general connectivity + { + DB: "testing", + Name: "readWrite", + }, + { + DB: "testing", + Name: "clusterAdmin", + }, + // admin roles for reading FCV + { + DB: "admin", + Name: "readWrite", + }, + { + DB: "admin", + Name: "clusterAdmin", + }, + }, + ScramCredentialsSecretName: "scram-credential-secret-name-1", + }, + "scram-credential-secret-name-1-scram-credentials", + }, + } + + for _, tt := range testusers { + assert.Equal(t, tt.exp, tt.in.GetScramCredentialsSecretName()) + } + +} + +func TestGetConnectionStringSecretName(t *testing.T) { + testusers := []struct { + in MongoDBUser + exp string + }{ + { + MongoDBUser{ + Name: "mdb-0", + DB: "admin", + ScramCredentialsSecretName: "scram-credential-secret-name-0", + }, + "replica-set-admin-mdb-0", + }, + { + MongoDBUser{ + Name: "?_normalize/_-username/?@with/[]?no]?/:allowed:chars[only?", + DB: "admin", + ScramCredentialsSecretName: "scram-credential-secret-name-0", + }, + "replica-set-admin-normalize-username-with-no-allowed-chars-only", + }, + { + MongoDBUser{ + Name: "AppUser", + DB: "Administrators", + ScramCredentialsSecretName: "scram-credential-secret-name-0", + }, + "replica-set-administrators-appuser", + }, + { + MongoDBUser{ + Name: "mdb-0", + DB: "admin", + ScramCredentialsSecretName: "scram-credential-secret-name-0", + ConnectionStringSecretName: "connection-string-secret", + }, + "connection-string-secret", + }, + { + MongoDBUser{ + Name: "mdb-2", + DB: "admin", + ScramCredentialsSecretName: "scram-credential-secret-name-2", + ConnectionStringSecretName: "connection-string-secret-2", + ConnectionStringSecretNamespace: "other-namespace", + }, + "connection-string-secret-2", + }, + } + + for _, tt := range testusers { + assert.Equal(t, tt.exp, tt.in.GetConnectionStringSecretName("replica-set")) + } +} + +func TestMongoDBCommunity_MongoAuthUserURI(t *testing.T) { + testuser := authtypes.User{ + Username: "testuser", + Database: "admin", + } + mdb := newReplicaSet(2, "my-rs", "my-namespace") + + tests := []args{ + { + additionalConnectionStringConfig: map[string]interface{}{}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary", "replicaSet": "differentName", "tls": true, "ssl": true}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary", "replicaSet": "differentName", "tls": true, "ssl": true}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{"readPreference": "secondary"}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false&readPreference=secondary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{"retryReads": true}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false&retryReads=true&readPreference=primary", + }, + } + + for _, params := range tests { + mdb.Spec.AdditionalConnectionStringConfig.Object = params.additionalConnectionStringConfig + testuser.ConnectionStringOptions = params.userConnectionStringConfig + assert.Equal(t, mdb.MongoAuthUserURI(testuser, "password", ""), params.connectionString) + } + + testuser = authtypes.User{ + Username: "testuser", + Database: "$external", + } + mdb = newReplicaSet(2, "my-rs", "my-namespace") + + assert.Equal(t, mdb.MongoAuthUserURI(testuser, "", ""), "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/$external?replicaSet=my-rs&ssl=false") +} + +func TestMongoDBCommunity_MongoAuthUserSRVURI(t *testing.T) { + testuser := authtypes.User{ + Username: "testuser", + Database: "admin", + } + mdb := newReplicaSet(2, "my-rs", "my-namespace") + + tests := []args{ + { + additionalConnectionStringConfig: map[string]interface{}{}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary", "replicaSet": "differentName", "tls": true, "ssl": true}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary", "replicaSet": "differentName", "tls": true, "ssl": true}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{"readPreference": "secondary"}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false&readPreference=secondary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{"retryReads": true}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false&retryReads=true&readPreference=primary", + }, + } + + for _, params := range tests { + mdb.Spec.AdditionalConnectionStringConfig.Object = params.additionalConnectionStringConfig + testuser.ConnectionStringOptions = params.userConnectionStringConfig + assert.Equal(t, mdb.MongoAuthUserSRVURI(testuser, "password", ""), params.connectionString) + } + + testuser = authtypes.User{ + Username: "testuser", + Database: "$external", + } + mdb = newReplicaSet(2, "my-rs", "my-namespace") + + assert.Equal(t, mdb.MongoAuthUserSRVURI(testuser, "", ""), "mongodb+srv://my-rs-svc.my-namespace.svc.cluster.local/$external?replicaSet=my-rs&ssl=false") +} + +func TestConvertAuthModeToAuthMechanism(t *testing.T) { + assert.Equal(t, constants.X509, ConvertAuthModeToAuthMechanism("X509")) + assert.Equal(t, constants.Sha256, ConvertAuthModeToAuthMechanism("SCRAM")) + assert.Equal(t, constants.Sha256, ConvertAuthModeToAuthMechanism("SCRAM-SHA-256")) + assert.Equal(t, constants.Sha1, ConvertAuthModeToAuthMechanism("SCRAM-SHA-1")) + assert.Equal(t, "", ConvertAuthModeToAuthMechanism("LDAP")) +} + +func TestMongoDBCommunity_GetAuthOptions(t *testing.T) { + mdb := newReplicaSet(3, "mdb", "mongodb") + mdb.Spec.Security.Authentication.Modes = []AuthMode{"SCRAM", "X509"} + + opts := mdb.GetAuthOptions() + + assert.Equal(t, constants.Sha256, opts.AutoAuthMechanism) + assert.Equal(t, []string{constants.Sha256, constants.X509}, opts.AuthMechanisms) + assert.Equal(t, false, opts.AuthoritativeSet) + + mdb.Spec.Security.Authentication.Modes = []AuthMode{"X509"} + mdb.Spec.Security.Authentication.AgentMode = "X509" + + opts = mdb.GetAuthOptions() + assert.Equal(t, constants.X509, opts.AutoAuthMechanism) + assert.Equal(t, []string{constants.X509}, opts.AuthMechanisms) +} + +func TestMongoDBCommunity_GetAuthUsers(t *testing.T) { + mdb := newReplicaSet(3, "mdb", "mongodb") + mdb.Spec.Users = []MongoDBUser{ + { + Name: "my-user", + DB: "admin", + PasswordSecretRef: SecretKeyReference{Name: "my-user-password"}, + Roles: []Role{ + { + DB: "admin", + Name: "readWriteAnyDatabase", + }, + }, + ScramCredentialsSecretName: "my-scram", + ConnectionStringSecretName: "", + AdditionalConnectionStringConfig: MapWrapper{}, + }, + { + Name: "CN=my-x509-authenticated-user,OU=organizationalunit,O=organization", + DB: "$external", + PasswordSecretRef: SecretKeyReference{}, + Roles: []Role{ + { + DB: "admin", + Name: "readWriteAnyDatabase", + }, + }, + ScramCredentialsSecretName: "", + ConnectionStringSecretName: "", + AdditionalConnectionStringConfig: MapWrapper{}, + }, + } + + authUsers := mdb.GetAuthUsers() + + assert.Equal(t, authtypes.User{ + Username: "my-user", + Database: "admin", + Roles: []authtypes.Role{{ + Database: "admin", + Name: "readWriteAnyDatabase", + }}, + PasswordSecretKey: "password", + PasswordSecretName: "my-user-password", + ScramCredentialsSecretName: "my-scram-scram-credentials", + ConnectionStringSecretName: "mdb-admin-my-user", + ConnectionStringSecretNamespace: mdb.Namespace, + ConnectionStringOptions: nil, + }, authUsers[0]) + assert.Equal(t, authtypes.User{ + Username: "CN=my-x509-authenticated-user,OU=organizationalunit,O=organization", + Database: "$external", + Roles: []authtypes.Role{{ + Database: "admin", + Name: "readWriteAnyDatabase", + }}, + PasswordSecretKey: "", + PasswordSecretName: "", + ScramCredentialsSecretName: "", + ConnectionStringSecretName: "mdb-external-cn-my-x509-authenticated-user-ou-organizationalunit-o-organization", + ConnectionStringSecretNamespace: mdb.Namespace, + ConnectionStringOptions: nil, + }, authUsers[1]) +} + +func newReplicaSet(members int, name, namespace string) MongoDBCommunity { + return MongoDBCommunity{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: MongoDBCommunitySpec{ + Members: members, + }, + } +} + +func newModesArray(modes []AuthMode, name, namespace string) MongoDBCommunity { + return MongoDBCommunity{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: MongoDBCommunitySpec{ + Security: Security{ + Authentication: Authentication{ + Modes: modes, + IgnoreUnknownUsers: nil, + }, + }, + }, + } +} + +func TestMongoDBCommunitySpec_GetAgentCertificateRef(t *testing.T) { + m := newReplicaSet(3, "mdb", "mdb") + + assert.Equal(t, "agent-certs", m.Spec.GetAgentCertificateRef()) + + m.Spec.Security.Authentication.AgentCertificateSecret = &corev1.LocalObjectReference{Name: "my-agent-certificate"} + + assert.Equal(t, "my-agent-certificate", m.Spec.GetAgentCertificateRef()) +} + +func TestMongoDBCommunity_AgentCertificateSecretNamespacedName(t *testing.T) { + m := newReplicaSet(3, "mdb", "mdb") + + assert.Equal(t, "agent-certs", m.AgentCertificateSecretNamespacedName().Name) + assert.Equal(t, "mdb", m.AgentCertificateSecretNamespacedName().Namespace) + + m.Spec.Security.Authentication.AgentCertificateSecret = &corev1.LocalObjectReference{Name: "agent-certs-custom"} + assert.Equal(t, "agent-certs-custom", m.AgentCertificateSecretNamespacedName().Name) +} + +func TestMongoDBCommunity_AgentCertificatePemSecretNamespacedName(t *testing.T) { + m := newReplicaSet(3, "mdb", "mdb") + + assert.Equal(t, "agent-certs-pem", m.AgentCertificatePemSecretNamespacedName().Name) + assert.Equal(t, "mdb", m.AgentCertificatePemSecretNamespacedName().Namespace) + + m.Spec.Security.Authentication.AgentCertificateSecret = &corev1.LocalObjectReference{Name: "agent-certs-custom"} + assert.Equal(t, "agent-certs-custom-pem", m.AgentCertificatePemSecretNamespacedName().Name) + +} + +func TestMongoDBCommunitySpec_GetAgentAuthMode(t *testing.T) { + type fields struct { + agentAuth AuthMode + modes []AuthMode + } + tests := []struct { + name string + fields fields + want AuthMode + }{ + { + name: "Agent auth not specified and modes array empty", + fields: fields{ + agentAuth: "", + modes: []AuthMode{}, + }, + want: AuthMode("SCRAM-SHA-256"), + }, + { + name: "Agent auth specified and modes array empty", + fields: fields{ + agentAuth: "X509", + modes: []AuthMode{}, + }, + want: AuthMode("X509"), + }, + { + name: "Modes array one element", + fields: fields{ + agentAuth: "", + modes: []AuthMode{"X509"}, + }, + want: AuthMode("X509"), + }, + { + name: "Modes array has sha256 and sha1", + fields: fields{ + agentAuth: "", + modes: []AuthMode{"SCRAM-SHA-256", "SCRAM-SHA-1"}, + }, + want: AuthMode("SCRAM-SHA-256"), + }, + { + name: "Modes array has scram and sha1", + fields: fields{ + agentAuth: "", + modes: []AuthMode{"SCRAM", "SCRAM-SHA-1"}, + }, + want: AuthMode("SCRAM-SHA-256"), + }, + { + name: "Modes array has 2 different auth modes", + fields: fields{ + agentAuth: "", + modes: []AuthMode{"SCRAM", "X509"}, + }, + want: AuthMode(""), + }, + { + name: "Modes array has 3 auth modes", + fields: fields{ + agentAuth: "", + modes: []AuthMode{"SCRAM-SHA-256", "SCRAM-SHA-1", "X509"}, + }, + want: AuthMode(""), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := newReplicaSet(3, "mdb", "mdb") + m.Spec.Security.Authentication.Modes = tt.fields.modes + m.Spec.Security.Authentication.AgentMode = tt.fields.agentAuth + assert.Equalf(t, tt.want, m.Spec.GetAgentAuthMode(), "GetAgentAuthMode()") + }) + } +} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..df22b4876 --- /dev/null +++ b/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,584 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentConfiguration) DeepCopyInto(out *AgentConfiguration) { + *out = *in + if in.LogRotate != nil { + in, out := &in.LogRotate, &out.LogRotate + *out = new(automationconfig.CrdLogRotate) + **out = **in + } + if in.AuditLogRotate != nil { + in, out := &in.AuditLogRotate, &out.AuditLogRotate + *out = new(automationconfig.CrdLogRotate) + **out = **in + } + if in.SystemLog != nil { + in, out := &in.SystemLog, &out.SystemLog + *out = new(automationconfig.SystemLog) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentConfiguration. +func (in *AgentConfiguration) DeepCopy() *AgentConfiguration { + if in == nil { + return nil + } + out := new(AgentConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + if in.Modes != nil { + in, out := &in.Modes, &out.Modes + *out = make([]AuthMode, len(*in)) + copy(*out, *in) + } + if in.AgentCertificateSecret != nil { + in, out := &in.AgentCertificateSecret, &out.AgentCertificateSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.IgnoreUnknownUsers != nil { + in, out := &in.IgnoreUnknownUsers, &out.IgnoreUnknownUsers + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationRestriction) DeepCopyInto(out *AuthenticationRestriction) { + *out = *in + if in.ClientSource != nil { + in, out := &in.ClientSource, &out.ClientSource + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServerAddress != nil { + in, out := &in.ServerAddress, &out.ServerAddress + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationRestriction. +func (in *AuthenticationRestriction) DeepCopy() *AuthenticationRestriction { + if in == nil { + return nil + } + out := new(AuthenticationRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomationConfigOverride) DeepCopyInto(out *AutomationConfigOverride) { + *out = *in + if in.Processes != nil { + in, out := &in.Processes, &out.Processes + *out = make([]OverrideProcess, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ReplicaSet.DeepCopyInto(&out.ReplicaSet) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomationConfigOverride. +func (in *AutomationConfigOverride) DeepCopy() *AutomationConfigOverride { + if in == nil { + return nil + } + out := new(AutomationConfigOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRole) DeepCopyInto(out *CustomRole) { + *out = *in + if in.Privileges != nil { + in, out := &in.Privileges, &out.Privileges + *out = make([]Privilege, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]Role, len(*in)) + copy(*out, *in) + } + if in.AuthenticationRestrictions != nil { + in, out := &in.AuthenticationRestrictions, &out.AuthenticationRestrictions + *out = make([]AuthenticationRestriction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRole. +func (in *CustomRole) DeepCopy() *CustomRole { + if in == nil { + return nil + } + out := new(CustomRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MapWrapper) DeepCopyInto(out *MapWrapper) { + clone := in.DeepCopy() + *out = *clone +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBCommunity) DeepCopyInto(out *MongoDBCommunity) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBCommunity. +func (in *MongoDBCommunity) DeepCopy() *MongoDBCommunity { + if in == nil { + return nil + } + out := new(MongoDBCommunity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongoDBCommunity) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBCommunityList) DeepCopyInto(out *MongoDBCommunityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MongoDBCommunity, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBCommunityList. +func (in *MongoDBCommunityList) DeepCopy() *MongoDBCommunityList { + if in == nil { + return nil + } + out := new(MongoDBCommunityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MongoDBCommunityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBCommunitySpec) DeepCopyInto(out *MongoDBCommunitySpec) { + *out = *in + if in.ReplicaSetHorizons != nil { + in, out := &in.ReplicaSetHorizons, &out.ReplicaSetHorizons + *out = make(ReplicaSetHorizonConfiguration, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(automationconfig.ReplicaSetHorizons, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + } + } + in.Security.DeepCopyInto(&out.Security) + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]MongoDBUser, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.StatefulSetConfiguration.DeepCopyInto(&out.StatefulSetConfiguration) + in.AgentConfiguration.DeepCopyInto(&out.AgentConfiguration) + in.AdditionalMongodConfig.DeepCopyInto(&out.AdditionalMongodConfig) + if in.AutomationConfigOverride != nil { + in, out := &in.AutomationConfigOverride, &out.AutomationConfigOverride + *out = new(AutomationConfigOverride) + (*in).DeepCopyInto(*out) + } + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = new(Prometheus) + **out = **in + } + in.AdditionalConnectionStringConfig.DeepCopyInto(&out.AdditionalConnectionStringConfig) + if in.MemberConfig != nil { + in, out := &in.MemberConfig, &out.MemberConfig + *out = make([]automationconfig.MemberOptions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBCommunitySpec. +func (in *MongoDBCommunitySpec) DeepCopy() *MongoDBCommunitySpec { + if in == nil { + return nil + } + out := new(MongoDBCommunitySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBCommunityStatus) DeepCopyInto(out *MongoDBCommunityStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBCommunityStatus. +func (in *MongoDBCommunityStatus) DeepCopy() *MongoDBCommunityStatus { + if in == nil { + return nil + } + out := new(MongoDBCommunityStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBUser) DeepCopyInto(out *MongoDBUser) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]Role, len(*in)) + copy(*out, *in) + } + in.AdditionalConnectionStringConfig.DeepCopyInto(&out.AdditionalConnectionStringConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBUser. +func (in *MongoDBUser) DeepCopy() *MongoDBUser { + if in == nil { + return nil + } + out := new(MongoDBUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongodConfiguration) DeepCopyInto(out *MongodConfiguration) { + *out = *in + in.MapWrapper.DeepCopyInto(&out.MapWrapper) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodConfiguration. +func (in *MongodConfiguration) DeepCopy() *MongodConfiguration { + if in == nil { + return nil + } + out := new(MongodConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideProcess) DeepCopyInto(out *OverrideProcess) { + *out = *in + if in.LogRotate != nil { + in, out := &in.LogRotate, &out.LogRotate + *out = new(automationconfig.CrdLogRotate) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideProcess. +func (in *OverrideProcess) DeepCopy() *OverrideProcess { + if in == nil { + return nil + } + out := new(OverrideProcess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideReplicaSet) DeepCopyInto(out *OverrideReplicaSet) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + in.Settings.DeepCopyInto(&out.Settings) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideReplicaSet. +func (in *OverrideReplicaSet) DeepCopy() *OverrideReplicaSet { + if in == nil { + return nil + } + out := new(OverrideReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Privilege) DeepCopyInto(out *Privilege) { + *out = *in + in.Resource.DeepCopyInto(&out.Resource) + if in.Actions != nil { + in, out := &in.Actions, &out.Actions + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Privilege. +func (in *Privilege) DeepCopy() *Privilege { + if in == nil { + return nil + } + out := new(Privilege) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Prometheus) DeepCopyInto(out *Prometheus) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + out.TLSSecretRef = in.TLSSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus. +func (in *Prometheus) DeepCopy() *Prometheus { + if in == nil { + return nil + } + out := new(Prometheus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ReplicaSetHorizonConfiguration) DeepCopyInto(out *ReplicaSetHorizonConfiguration) { + { + in := &in + *out = make(ReplicaSetHorizonConfiguration, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(automationconfig.ReplicaSetHorizons, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetHorizonConfiguration. +func (in ReplicaSetHorizonConfiguration) DeepCopy() ReplicaSetHorizonConfiguration { + if in == nil { + return nil + } + out := new(ReplicaSetHorizonConfiguration) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Resource) DeepCopyInto(out *Resource) { + *out = *in + if in.DB != nil { + in, out := &in.DB, &out.DB + *out = new(string) + **out = **in + } + if in.Collection != nil { + in, out := &in.Collection, &out.Collection + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resource. +func (in *Resource) DeepCopy() *Resource { + if in == nil { + return nil + } + out := new(Resource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeyReference) DeepCopyInto(out *SecretKeyReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeyReference. +func (in *SecretKeyReference) DeepCopy() *SecretKeyReference { + if in == nil { + return nil + } + out := new(SecretKeyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Security) DeepCopyInto(out *Security) { + *out = *in + in.Authentication.DeepCopyInto(&out.Authentication) + in.TLS.DeepCopyInto(&out.TLS) + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]CustomRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Security. +func (in *Security) DeepCopy() *Security { + if in == nil { + return nil + } + out := new(Security) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetConfiguration) DeepCopyInto(out *StatefulSetConfiguration) { + *out = *in + in.SpecWrapper.DeepCopyInto(&out.SpecWrapper) + in.MetadataWrapper.DeepCopyInto(&out.MetadataWrapper) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetConfiguration. +func (in *StatefulSetConfiguration) DeepCopy() *StatefulSetConfiguration { + if in == nil { + return nil + } + out := new(StatefulSetConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetMetadataWrapper) DeepCopyInto(out *StatefulSetMetadataWrapper) { + clone := in.DeepCopy() + *out = *clone +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetSpecWrapper) DeepCopyInto(out *StatefulSetSpecWrapper) { + clone := in.DeepCopy() + *out = *clone +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLS) DeepCopyInto(out *TLS) { + *out = *in + out.CertificateKeySecret = in.CertificateKeySecret + if in.CaCertificateSecret != nil { + in, out := &in.CaCertificateSecret, &out.CaCertificateSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.CaConfigMap != nil { + in, out := &in.CaConfigMap, &out.CaConfigMap + *out = new(corev1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLS. +func (in *TLS) DeepCopy() *TLS { + if in == nil { + return nil + } + out := new(TLS) + in.DeepCopyInto(out) + return out +} diff --git a/build/Dockerfile b/build/Dockerfile deleted file mode 100644 index ac45631e1..000000000 --- a/build/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu:16.04 - -ENV OPERATOR=/usr/local/bin/mongodb-kubernetes-operator \ - USER_UID=1001 \ - USER_NAME=mongodb-kubernetes-operator - -# install operator binary -COPY build/_output/bin/mongodb-kubernetes-operator ${OPERATOR} - -COPY build/bin /usr/local/bin -RUN /usr/local/bin/user_setup - -ENTRYPOINT ["/usr/local/bin/entrypoint"] - -USER ${USER_UID} diff --git a/build/bin/entrypoint b/build/bin/entrypoint index 560092a8e..9dfc7bd5e 100755 --- a/build/bin/entrypoint +++ b/build/bin/entrypoint @@ -3,10 +3,11 @@ # This is documented here: # https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines -if ! whoami &>/dev/null; then +if ! whoami >/dev/null 2>&1; then if [ -w /etc/passwd ]; then echo "${USER_NAME:-mongodb-kubernetes-operator}:x:$(id -u):$(id -g):${USER_NAME:-mongodb-kubernetes-operator} user:${HOME}:/sbin/nologin" >> /etc/passwd fi fi -exec ${OPERATOR} $@ +echo "Running ./${OPERATOR}" +"./${OPERATOR}" "$@" diff --git a/build/bin/user_setup b/build/bin/user_setup index 1e36064cb..1d741d62e 100755 --- a/build/bin/user_setup +++ b/build/bin/user_setup @@ -2,12 +2,12 @@ set -x # ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) -mkdir -p ${HOME} -chown ${USER_UID}:0 ${HOME} -chmod ug+rwx ${HOME} +mkdir -p "${HOME}" +chown" ${USER_UID}":0 "${HOME}" +chmod ug+rwx" ${HOME}" # runtime user will need to be able to self-insert in /etc/passwd chmod g+rw /etc/passwd # no need for this script to remain in the image after running -rm $0 +rm "$0" diff --git a/cmd/manager/main.go b/cmd/manager/main.go index e75a35491..b8dd5d184 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -4,21 +4,36 @@ import ( "fmt" "os" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/cache" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/controllers" + "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" "go.uber.org/zap" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" ) -// Change below variables to serve metrics on different host or port. var ( - metricsHost = "0.0.0.0" - metricsPort int32 = 8383 - operatorMetricsPort int32 = 8686 + scheme = runtime.NewScheme() +) + +const ( + WatchNamespaceEnv = "WATCH_NAMESPACE" ) +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(mdbv1.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + func configureLogger() (*zap.Logger, error) { // TODO: configure non development logger logger, err := zap.NewDevelopment() @@ -40,52 +55,76 @@ func hasRequiredVariables(logger *zap.Logger, envVariables ...string) bool { func main() { log, err := configureLogger() if err != nil { - os.Exit(1) + log.Sugar().Fatalf("Failed to configure logger: %v", err) } - if !hasRequiredVariables(log, "AGENT_IMAGE") { + if !hasRequiredVariables( + log, + construct.MongodbRepoUrlEnv, + construct.MongodbImageEnv, + construct.AgentImageEnv, + construct.VersionUpgradeHookImageEnv, + construct.ReadinessProbeImageEnv, + ) { os.Exit(1) } - // get watch namespace from environment variable - namespace, nsSpecified := os.LookupEnv("WATCH_NAMESPACE") + // Get watch namespace from environment variable. + namespace, nsSpecified := os.LookupEnv(WatchNamespaceEnv) if !nsSpecified { - os.Exit(1) + log.Sugar().Fatal("No namespace specified to watch") } - log.Info(fmt.Sprintf("Watching namespace: %s", namespace)) + // If namespace is a wildcard use the empty string to represent all namespaces + watchNamespace := "" + if namespace == "*" { + log.Info("Watching all namespaces") + } else { + watchNamespace = namespace + log.Sugar().Infof("Watching namespace: %s", watchNamespace) + } // Get a config to talk to the apiserver cfg, err := config.GetConfig() if err != nil { - os.Exit(1) + log.Sugar().Fatalf("Unable to get config: %v", err) } // Create a new Cmd to provide shared dependencies and start components mgr, err := manager.New(cfg, manager.Options{ - Namespace: namespace, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{watchNamespace: {}}, + }, }) - if err != nil { - os.Exit(1) + log.Sugar().Fatalf("Unable to create manager: %v", err) } log.Info("Registering Components.") // Setup Scheme for all resources - if err := apis.AddToScheme(mgr.GetScheme()); err != nil { - os.Exit(1) + if err := mdbv1.AddToScheme(mgr.GetScheme()); err != nil { + log.Sugar().Fatalf("Unable to add mdbv1 to scheme: %v", err) } - // Setup all Controllers - if err := controller.AddToManager(mgr); err != nil { - os.Exit(1) + // Setup Controller. + if err = controllers.NewReconciler( + mgr, + os.Getenv(construct.MongodbRepoUrlEnv), + os.Getenv(construct.MongodbImageEnv), + envvar.GetEnvOrDefault(construct.MongoDBImageTypeEnv, construct.DefaultImageType), + os.Getenv(construct.AgentImageEnv), + os.Getenv(construct.VersionUpgradeHookImageEnv), + os.Getenv(construct.ReadinessProbeImageEnv), + ).SetupWithManager(mgr); err != nil { + log.Sugar().Fatalf("Unable to create controller: %v", err) } + // +kubebuilder:scaffold:builder log.Info("Starting the Cmd.") // Start the Cmd if err := mgr.Start(signals.SetupSignalHandler()); err != nil { - os.Exit(1) + log.Sugar().Fatalf("Unable to start manager: %v", err) } } diff --git a/cmd/readiness/main.go b/cmd/readiness/main.go new file mode 100644 index 000000000..6cf9e7804 --- /dev/null +++ b/cmd/readiness/main.go @@ -0,0 +1,289 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "time" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/headless" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/health" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "go.uber.org/zap" +) + +const ( + headlessAgent = "HEADLESS_AGENT" + mongodNotReadyIntervalMinutes = time.Minute * 1 +) + +var logger *zap.SugaredLogger + +func init() { + // By default, we log to the output (convenient for tests) + cfg := zap.NewDevelopmentConfig() + log, err := cfg.Build() + if err != nil { + panic(err) + } + logger = log.Sugar() +} + +// isPodReady main function which makes decision if the pod is ready or not. The decision is based on the information +// from the AA health status file. +// The logic depends on if the pod is a standard MongoDB or an AppDB one. +// - If MongoDB: then just the 'statuses[0].IsInGoalState` field is used to learn if the Agent has reached the goal +// - if AppDB: the 'mmsStatus[0].lastGoalVersionAchieved' field is compared with the one from mounted automation config +// Additionally if the previous check hasn't returned 'true' an additional check for wait steps is being performed +func isPodReady(ctx context.Context, conf config.Config) (bool, error) { + healthStatus, err := parseHealthStatus(conf.HealthStatusReader) + if err != nil { + logger.Errorf("There was problem parsing health status file: %s", err) + return false, nil + } + + // The 'statuses' file can be empty only for OM Agents + if len(healthStatus.Statuses) == 0 && !isHeadlessMode() { + logger.Debug("'statuses' is empty. We assume there is no automation config for the agent yet. Returning ready.") + return true, nil + } + + // If the agent has reached the goal state + inGoalState, err := isInGoalState(ctx, healthStatus, conf) + if err != nil { + logger.Errorf("There was problem checking the health status: %s", err) + return false, err + } + + inReadyState := isInReadyState(healthStatus) + if !inReadyState { + logger.Info("Mongod is not ready") + } + + if inGoalState && inReadyState { + logger.Info("The Agent has reached goal state. Returning ready.") + return true, nil + } + + // Fallback logic: the agent is not in goal state and got stuck in some steps + if !inGoalState && isOnWaitingStep(healthStatus) { + logger.Info("The Agent is on wait Step. Returning ready.") + return true, nil + } + + logger.Info("Reached the end of the check. Returning not ready.") + return false, nil +} + +// isOnWaitingStep returns true if the agent is stuck on waiting for the other Agents or something else to happen. +func isOnWaitingStep(health health.Status) bool { + currentStep := findCurrentStep(health.MmsStatus) + if currentStep != nil { + return isWaitStep(currentStep) + } + return false +} + +// findCurrentStep returns the step which the Agent is working now. +// The algorithm (described in https://github.com/10gen/ops-manager-kubernetes/pull/401#discussion_r333071555): +// - Obtain the latest plan (the last one in the plans array) +// - Find the last step, which has Started not nil and Completed nil. The Steps are processed as a tree in a BFS fashion. +// The last element is very likely to be the Step the Agent is performing at the moment. There are some chances that +// this is a waiting step, use isWaitStep to verify this. +func findCurrentStep(processStatuses map[string]health.MmsDirectorStatus) *health.StepStatus { + var currentPlan *health.PlanStatus + if len(processStatuses) == 0 { + // Seems shouldn't happen but let's check anyway - may be needs to be changed to Info if this happens + logger.Warnf("There is no information about Agent process plans") + return nil + } + if len(processStatuses) > 1 { + logger.Errorf("Only one process status is expected but got %d!", len(processStatuses)) + return nil + } + + // There is always only one process managed by the Agent - so there will be only one loop + for processName, processStatus := range processStatuses { + if len(processStatus.Plans) == 0 { + logger.Errorf("The process %s doesn't contain any plans!", processName) + return nil + } + currentPlan = processStatus.Plans[len(processStatus.Plans)-1] + } + + if currentPlan.Completed != nil { + logger.Debugf("The Agent hasn't reported working on the new config yet, the last plan finished at %s", + currentPlan.Completed.Format(time.RFC3339)) + return nil + } + + var lastStartedStep *health.StepStatus + for _, m := range currentPlan.Moves { + for _, s := range m.Steps { + if s.Started != nil && s.Completed == nil { + lastStartedStep = s + } + } + } + + return lastStartedStep +} + +// isWaitStep returns true is the Agent is currently waiting for something to happen. +// +// Most of the time, the Agent waits for an initialization by other member of the cluster. In such case, +// holding the rollout does not improve the overall system state. Even if the probe returns true too quickly +// the worst thing that can happen is a short service interruption, which is still better than full service outage. +// +// The 15 seconds explanation: +// - The status file is written every 10s but the Agent processes steps independently of it +// - In order to avoid reacting on a newly added wait Step (as they can naturally go away), we're giving the Agent +// at least 15 sends to spend on that Step. +// - This hopefully prevents the Probe from flipping False to True too quickly. +func isWaitStep(status *health.StepStatus) bool { + // Some logic behind 15 seconds: the health status file is dumped each 10 seconds, so we are sure that if the agent + // has been in the step for 10 seconds - this means it is waiting for the other hosts, and they are not available + fifteenSecondsAgo := time.Now().Add(time.Duration(-15) * time.Second) + if status.IsWaitStep && status.Completed == nil && status.Started.Before(fifteenSecondsAgo) { + logger.Debugf("Indicated a wait Step, status: %s, started at %s but hasn't finished "+ + "yet. Marking the probe as ready", status.Step, status.Started.Format(time.RFC3339)) + return true + } + return false +} + +func isInGoalState(ctx context.Context, health health.Status, conf config.Config) (bool, error) { + if isHeadlessMode() { + return headless.PerformCheckHeadlessMode(ctx, health, conf) + } + return performCheckOMMode(health), nil +} + +// performCheckOMMode does a general check if the Agent has reached the goal state - must be called when Agent is in +// "OM mode" +func performCheckOMMode(health health.Status) bool { + for _, v := range health.Statuses { + logger.Debug(v) + if v.IsInGoalState { + return true + } + } + return false +} + +func isHeadlessMode() bool { + return os.Getenv(headlessAgent) == "true" +} + +func kubernetesClientset() (kubernetes.Interface, error) { + config, err := rest.InClusterConfig() + if err != nil { + return nil, fmt.Errorf("failed to get in cluster config: %s", err) + } + // creates the clientset + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("failed to build config: %s", err) + } + return clientset, nil +} + +func parseHealthStatus(reader io.Reader) (health.Status, error) { + var health health.Status + data, err := io.ReadAll(reader) + if err != nil { + return health, err + } + + err = json.Unmarshal(data, &health) + return health, err +} + +func initLogger(l *lumberjack.Logger) { + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + + consoleCore := zapcore.NewCore( + zapcore.NewJSONEncoder(encoderConfig), + zapcore.AddSync(os.Stdout), + zap.DebugLevel) + + cores := []zapcore.Core{consoleCore} + if config.ReadBoolWitDefault(config.WithAgentFileLogging, "true") { + fileCore := zapcore.NewCore( + zapcore.NewJSONEncoder(encoderConfig), + zapcore.AddSync(l), + zap.DebugLevel) + cores = append(cores, fileCore) + } + + core := zapcore.NewTee(cores...) + log := zap.New(core, zap.Development()) + logger = log.Sugar() + + logger.Infof("logging configuration: %+v", l) +} + +func main() { + ctx := context.Background() + clientSet, err := kubernetesClientset() + if err != nil { + panic(err) + } + + initLogger(config.GetLogger()) + + healthStatusFilePath := config.GetEnvOrDefault(config.AgentHealthStatusFilePathEnv, config.DefaultAgentHealthStatusFilePath) + file, err := os.Open(healthStatusFilePath) + // The agent might be slow in creating the health status file. + // In that case, we don't want to panic to show the message + // in the kubernetes description. That would be a red herring, since that will solve itself with enough time. + if err != nil { + logger.Errorf("health status file not avaible yet: %s ", err) + os.Exit(1) + } + + cfg, err := config.BuildFromEnvVariables(clientSet, isHeadlessMode(), file) + if err != nil { + panic(err) + } + + ready, err := isPodReady(ctx, cfg) + if err != nil { + panic(err) + } + if !ready { + os.Exit(1) + } +} + +// isInReadyState checks the MongoDB Server state. It returns true if the mongod process is up and its state +// is PRIMARY or SECONDARY. +func isInReadyState(health health.Status) bool { + if len(health.Statuses) == 0 { + return true + } + for _, processHealth := range health.Statuses { + // We know this loop should run only once, in Kubernetes there's + // only 1 server managed per host. + if !processHealth.ExpectedToBeUp { + // Process may be down intentionally (if the process is marked as disabled in the automation config) + return true + } + + timeMongoUp := time.Unix(processHealth.LastMongoUpTime, 0) + mongoUpThreshold := time.Now().Add(-mongodNotReadyIntervalMinutes) + mongoIsHealthy := timeMongoUp.After(mongoUpThreshold) + // The case in which the agent is too old to publish replication status is handled inside "IsReadyState" + return mongoIsHealthy && processHealth.IsReadyState() + } + return false +} diff --git a/cmd/readiness/readiness_test.go b/cmd/readiness/readiness_test.go new file mode 100644 index 000000000..11222effa --- /dev/null +++ b/cmd/readiness/readiness_test.go @@ -0,0 +1,320 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "io" + "os" + "testing" + "time" + + "github.com/mongodb/mongodb-kubernetes-operator/cmd/readiness/testdata" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/health" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + + "github.com/stretchr/testify/assert" +) + +// TestDeadlockDetection verifies that if the agent is stuck in "WaitAllRsMembersUp" phase (started > 15 seconds ago) +// then the function returns "ready" +func TestDeadlockDetection(t *testing.T) { + ctx := context.Background() + type TestConfig struct { + conf config.Config + isErrorExpected bool + isReadyExpected bool + } + tests := map[string]TestConfig{ + "Ready but deadlocked on WaitAllRsMembersUp": { + conf: testConfig("testdata/health-status-deadlocked.json"), + isReadyExpected: true, + }, + "Ready but deadlocked on WaitCanUpdate while changing the versions with multiple plans": { + conf: testConfig("testdata/health-status-deadlocked-with-prev-config.json"), + isReadyExpected: true, + }, + "Ready but deadlocked on WaitHasCorrectAutomationCredentials (HELP-39937, HELP-39966)": { + conf: testConfig("testdata/health-status-deadlocked-waiting-for-correct-automation-credentials.json"), + isReadyExpected: true, + }, + "Ready and no deadlock detected": { + conf: testConfig("testdata/health-status-no-deadlock.json"), + isReadyExpected: true, + }, + "Ready and positive scenario": { + conf: testConfig("testdata/health-status-ok.json"), + isReadyExpected: true, + }, + "Ready and Pod readiness is correctly checked when no ReplicationStatus is present on the file": { + conf: testConfig("testdata/health-status-no-replication.json"), + isReadyExpected: true, + }, + "Ready and MongoDB replication state is reported by agents": { + conf: testConfig("testdata/health-status-ok-no-replica-status.json"), + isReadyExpected: true, + }, + "Not Ready If replication state is not PRIMARY or SECONDARY, Pod is not ready": { + conf: testConfig("testdata/health-status-not-readable-state.json"), + isReadyExpected: false, + }, + "Not Ready because of less than 15 seconds passed by after the health file update": { + conf: testConfig("testdata/health-status-pending.json"), + isReadyExpected: false, + }, + "Not Ready because there are no plans": { + conf: testConfig("testdata/health-status-no-plans.json"), + isReadyExpected: false, + }, + "Not Ready because there are no statuses": { + conf: testConfig("testdata/health-status-no-plans.json"), + isReadyExpected: false, + }, + "Not Ready because there are no processes": { + conf: testConfig("testdata/health-status-no-processes.json"), + isReadyExpected: false, + }, + "Not Ready because mongod is down for 90 seconds": { + conf: testConfigWithMongoUp("testdata/health-status-ok.json", time.Second*90), + isReadyExpected: false, + }, + "Not Ready because mongod is down for 1 hour": { + conf: testConfigWithMongoUp("testdata/health-status-ok.json", time.Hour*1), + isReadyExpected: false, + }, + "Not Ready because mongod is down for 2 days": { + conf: testConfigWithMongoUp("testdata/health-status-ok.json", time.Hour*48), + isReadyExpected: false, + }, + "Ready and mongod is up for 30 seconds": { + conf: testConfigWithMongoUp("testdata/health-status-ok.json", time.Second*30), + isReadyExpected: true, + }, + "Ready and mongod is up for 1 second": { + conf: testConfigWithMongoUp("testdata/health-status-ok.json", time.Second*30), + isReadyExpected: true, + }, + "Not Ready because of mongod bootstrap errors": { + conf: testConfigWithMongoUp("testdata/health-status-error-tls.json", time.Second*30), + isReadyExpected: false, + }, + "Not Ready because of waiting on an upgrade start in a recomputed plan (a real scenario for an interrupted start in EA)": { + conf: testConfigWithMongoUp("testdata/health-status-enterprise-upgrade-interrupted.json", time.Second*30), + isReadyExpected: false, + }, + } + for testName := range tests { + testConfig := tests[testName] + t.Run(testName, func(t *testing.T) { + ready, err := isPodReady(ctx, testConfig.conf) + if testConfig.isErrorExpected { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, testConfig.isReadyExpected, ready) + }) + } +} + +func TestObtainingCurrentStep(t *testing.T) { + noDeadlockHealthExample, _ := parseHealthStatus(testConfig("testdata/health-status-no-deadlock.json").HealthStatusReader) + now := time.Now() + tenMinutesAgo := time.Now().Add(-time.Minute * 10) + + type TestConfig struct { + processStatuses map[string]health.MmsDirectorStatus + expectedStep string + } + tests := map[string]TestConfig{ + "No deadlock example should point to WaitFeatureCompatibilityVersionCorrect": { + processStatuses: noDeadlockHealthExample.MmsStatus, + expectedStep: "WaitFeatureCompatibilityVersionCorrect", + }, + "Find single Started Step": { + processStatuses: map[string]health.MmsDirectorStatus{ + "ignore": { + Plans: []*health.PlanStatus{ + { + Moves: []*health.MoveStatus{ + { + Steps: []*health.StepStatus{ + { + Step: "will be ignored as completed", + Started: &tenMinutesAgo, + Completed: &now, + }, + { + Step: "test", + Started: &tenMinutesAgo, + }, + { + Step: "will be ignored as completed", + Started: &tenMinutesAgo, + Completed: &now, + }, + }, + }, + }, + Started: &tenMinutesAgo, + }, + }, + }, + }, + expectedStep: "test", + }, + "Find no Step in completed plan": { + processStatuses: map[string]health.MmsDirectorStatus{ + "ignore": { + Plans: []*health.PlanStatus{ + { + Moves: []*health.MoveStatus{ + { + Steps: []*health.StepStatus{ + { + Step: "test", + Started: &tenMinutesAgo, + }, + }, + }, + }, + Started: &tenMinutesAgo, + Completed: &now, + }, + }, + }, + }, + expectedStep: "", + }, + "Find single Started step in the latest plan only": { + processStatuses: map[string]health.MmsDirectorStatus{ + "ignore": { + Plans: []*health.PlanStatus{ + { + Moves: []*health.MoveStatus{ + { + Steps: []*health.StepStatus{ + { + Step: "will be ignored as only the last plan is evaluated", + Started: &tenMinutesAgo, + }, + }, + }, + }, + Started: &tenMinutesAgo, + }, + { + Moves: []*health.MoveStatus{ + { + Steps: []*health.StepStatus{ + { + Step: "test", + Started: &tenMinutesAgo, + }, + }, + }, + }, + Started: &tenMinutesAgo, + }, + }, + }, + }, + expectedStep: "test", + }, + } + for testName := range tests { + testConfig := tests[testName] + t.Run(testName, func(t *testing.T) { + step := findCurrentStep(testConfig.processStatuses) + if len(testConfig.expectedStep) == 0 { + assert.Nil(t, step) + } else { + assert.Equal(t, testConfig.expectedStep, step.Step) + } + }) + } +} + +// TestReadyWithWaitForCorrectBinaries tests the Static Containers Architecture mode for the Agent. +// In this case, the Readiness Probe needs to return Ready and let the StatefulSet Controller to proceed +// with the Pod rollout. +func TestReadyWithWaitForCorrectBinaries(t *testing.T) { + ctx := context.Background() + c := testConfigWithMongoUp("testdata/health-status-ok-with-WaitForCorrectBinaries.json", time.Second*30) + ready, err := isPodReady(ctx, c) + + assert.True(t, ready) + assert.NoError(t, err) +} + +// TestHeadlessAgentHasntReachedGoal verifies that the probe reports "false" if the config version is higher than the +// last achieved version of the Agent +// Note that the edge case is checked here: the health-status-ok.json has the "WaitRsInit" phase stuck in the last plan +// (as Agent doesn't marks all the step statuses finished when it reaches the goal) but this doesn't affect the result +// as the whole plan is complete already +func TestHeadlessAgentHasntReachedGoal(t *testing.T) { + ctx := context.Background() + t.Setenv(headlessAgent, "true") + c := testConfig("testdata/health-status-ok.json") + c.ClientSet = fake.NewSimpleClientset(testdata.TestPod(c.Namespace, c.Hostname), testdata.TestSecret(c.Namespace, c.AutomationConfigSecretName, 6)) + ready, err := isPodReady(ctx, c) + assert.False(t, ready) + assert.NoError(t, err) + thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(ctx, c.Hostname, metav1.GetOptions{}) + assert.Equal(t, map[string]string{"agent.mongodb.com/version": "5"}, thePod.Annotations) +} + +// TestHeadlessAgentReachedGoal verifies that the probe reports "true" if the config version is equal to the +// last achieved version of the Agent +func TestHeadlessAgentReachedGoal(t *testing.T) { + ctx := context.Background() + t.Setenv(headlessAgent, "true") + c := testConfig("testdata/health-status-ok.json") + c.ClientSet = fake.NewSimpleClientset(testdata.TestPod(c.Namespace, c.Hostname), testdata.TestSecret(c.Namespace, c.AutomationConfigSecretName, 5)) + ready, err := isPodReady(ctx, c) + assert.True(t, ready) + assert.NoError(t, err) + thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(ctx, c.Hostname, metav1.GetOptions{}) + assert.Equal(t, map[string]string{"agent.mongodb.com/version": "5"}, thePod.Annotations) +} + +func testConfig(healthFilePath string) config.Config { + return testConfigWithMongoUp(healthFilePath, 15*time.Second) +} + +func testConfigWithMongoUp(healthFilePath string, timeSinceMongoLastUp time.Duration) config.Config { + file, err := os.Open(healthFilePath) + if err != nil { + panic(err) + } + defer file.Close() + + status, err := parseHealthStatus(file) + if err != nil { + panic(err) + } + + for key, processHealth := range status.Statuses { + processHealth.LastMongoUpTime = time.Now().Add(-timeSinceMongoLastUp).Unix() + // Need to reassign the object back to map as 'processHealth' is a copy of the struct + status.Statuses[key] = processHealth + } + + return config.Config{ + HealthStatusReader: NewTestHealthStatusReader(status), + Namespace: "test-ns", + AutomationConfigSecretName: "test-mongodb-automation-config", + Hostname: "test-mongodb-0", + } +} + +func NewTestHealthStatusReader(status health.Status) io.Reader { + data, err := json.Marshal(status) + if err != nil { + panic(err) + } + return bytes.NewReader(data) +} diff --git a/cmd/readiness/testdata/config-current-version.json b/cmd/readiness/testdata/config-current-version.json new file mode 100644 index 000000000..0089ee2ea --- /dev/null +++ b/cmd/readiness/testdata/config-current-version.json @@ -0,0 +1,3 @@ +{ + "version": 5 +} diff --git a/cmd/readiness/testdata/config-new-version.json b/cmd/readiness/testdata/config-new-version.json new file mode 100644 index 000000000..13f5d4ce0 --- /dev/null +++ b/cmd/readiness/testdata/config-new-version.json @@ -0,0 +1,3 @@ +{ + "version": 6 +} diff --git a/cmd/readiness/testdata/health-status-deadlocked-waiting-for-correct-automation-credentials.json b/cmd/readiness/testdata/health-status-deadlocked-waiting-for-correct-automation-credentials.json new file mode 100644 index 000000000..c6e3053df --- /dev/null +++ b/cmd/readiness/testdata/health-status-deadlocked-waiting-for-correct-automation-credentials.json @@ -0,0 +1,116 @@ +{ + "statuses": { + "svcprovider-cluster-config-0": { + "IsInGoalState": false, + "LastMongoUpTime": 1669378820, + "ExpectedToBeUp": true, + "ReplicationStatus": 2 + } + }, + "mmsStatus": { + "svcprovider-cluster-config-0": { + "name": "svcprovider-cluster-config-0", + "lastGoalVersionAchieved": -1, + "plans": [ + { + "started": "2022-11-25T11:35:45.442597196Z", + "completed": null, + "moves": [ + { + "move": "Download", + "moveDoc": "Download mongodb binaries", + "steps": [ + { + "step": "Download", + "stepDoc": "Download mongodb binaries (may take a while)", + "isWaitStep": false, + "started": "2022-11-25T11:35:45.44261521Z", + "completed": "2022-11-25T11:35:50.8280641Z", + "result": "success" + } + ] + }, + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2022-11-25T11:35:50.828139893Z", + "completed": "2022-11-25T11:35:52.623601143Z", + "result": "success" + } + ] + }, + { + "move": "WaitAllRsMembersUp", + "moveDoc": "Wait until all members of this process' repl set are up", + "steps": [ + { + "step": "WaitAllRsMembersUp", + "stepDoc": "Wait until all members of this process' repl set are up", + "isWaitStep": true, + "started": "2022-11-25T11:35:52.623699243Z", + "completed": null, + "result": "wait" + } + ] + }, + { + "move": "RsInit", + "moveDoc": "Initialize a replica set including the current MongoDB process", + "steps": [ + { + "step": "RsInit", + "stepDoc": "Initialize a replica set", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + }, + { + "started": "2022-11-25T11:35:53.820885768Z", + "completed": null, + "moves": [ + { + "move": "WaitHasCorrectAutomationCredentials", + "moveDoc": "Wait for the automation user to be added (if needed)", + "steps": [ + { + "step": "WaitHasCorrectAutomationCredentials", + "stepDoc": "Wait for the automation user to be added (if needed)", + "isWaitStep": true, + "started": "2022-11-25T11:35:53.820925028Z", + "completed": null, + "result": "wait" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} \ No newline at end of file diff --git a/cmd/readiness/testdata/health-status-deadlocked-with-prev-config.json b/cmd/readiness/testdata/health-status-deadlocked-with-prev-config.json new file mode 100644 index 000000000..0c96f6fc1 --- /dev/null +++ b/cmd/readiness/testdata/health-status-deadlocked-with-prev-config.json @@ -0,0 +1,159 @@ +{ + "statuses": { + "mdb0-1": { + "IsInGoalState": false, + "LastMongoUpTime": 1674151493, + "ExpectedToBeUp": true, + "ReplicationStatus": 1 + } + }, + "mmsStatus": { + "mdb0-1": { + "name": "mdb0-1", + "lastGoalVersionAchieved": 2, + "plans": [ + { + "automationConfigVersion": 2, + "started": "2023-01-19T17:27:17.438126081Z", + "completed": "2023-01-19T17:27:22.74117999Z", + "moves": [ + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2023-01-19T17:27:17.438319285Z", + "completed": "2023-01-19T17:27:21.672553263Z", + "result": "success" + } + ] + }, + { + "move": "WaitRsInit", + "moveDoc": "Wait for the replica set to be initialized by another member", + "steps": [ + { + "step": "WaitRsInit", + "stepDoc": "Wait for the replica set to be initialized by another member", + "isWaitStep": true, + "started": "2023-01-19T17:27:21.672605664Z", + "completed": null, + "result": "error" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + }, + { + "automationConfigVersion": 2, + "started": "2023-01-19T17:36:34.742889301Z", + "completed": "2023-01-19T17:36:47.913043483Z", + "moves": [ + { + "move": "WaitHasCorrectAutomationCredentials", + "moveDoc": "Wait for the automation user to be added (if needed)", + "steps": [ + { + "step": "WaitHasCorrectAutomationCredentials", + "stepDoc": "Wait for the automation user to be added (if needed)", + "isWaitStep": true, + "started": "2023-01-19T17:36:34.742906201Z", + "completed": null, + "result": "wait" + } + ] + } + ] + }, + { + "automationConfigVersion": 3, + "started": "2023-01-19T17:38:33.622622261Z", + "completed": null, + "moves": [ + { + "move": "ChangeVersion", + "moveDoc": "Change MongoDB Version", + "steps": [ + { + "step": "CheckWrongVersion", + "stepDoc": "Check that MongoDB version is wrong", + "isWaitStep": false, + "started": "2023-01-19T17:38:33.622638561Z", + "completed": "2023-01-19T17:38:33.622959367Z", + "result": "success" + }, + { + "step": "CheckRsCorrect", + "stepDoc": "Check that replica set configuration is correct", + "isWaitStep": false, + "started": "2023-01-19T17:38:33.622960067Z", + "completed": "2023-01-19T17:38:33.623363973Z", + "result": "success" + }, + { + "step": "WaitCanUpdate", + "stepDoc": "Wait until the update can be made", + "isWaitStep": true, + "started": "2023-01-19T17:38:33.623364774Z", + "completed": null, + "result": "wait" + }, + { + "step": "DisableBalancerIfFirst", + "stepDoc": "Disable the balancer (may take a while)", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + }, + { + "step": "Stop", + "stepDoc": "Shutdown the process", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + }, + { + "step": "RemoveDbFilesIfArbiterDowngrade", + "stepDoc": "Delete db files if this is an arbiter downgrade.", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + }, + { + "step": "StartWithUpgrade", + "stepDoc": "Start a mongo instance (upgrade)", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} diff --git a/cmd/readiness/testdata/health-status-deadlocked.json b/cmd/readiness/testdata/health-status-deadlocked.json new file mode 100644 index 000000000..898b867aa --- /dev/null +++ b/cmd/readiness/testdata/health-status-deadlocked.json @@ -0,0 +1,95 @@ +{ + "statuses": { + "foo": { + "IsInGoalState": false, + "LastMongoUpTime": 1568188790, + "ExpectedToBeUp": true + } + }, + "mmsStatus": { + "foo": { + "name": "foo", + "lastGoalVersionAchieved": -1, + "plans": [ + { + "started": "2019-09-11T07:58:43.834514922Z", + "completed": null, + "moves": [ + { + "move": "Download", + "moveDoc": "Download mongodb binaries", + "steps": [ + { + "step": "Download", + "stepDoc": "Download mongodb binaries (may take a while)", + "isWaitStep": false, + "started": "2019-09-11T07:58:43.83456958Z", + "completed": "2019-09-11T07:58:58.535663992Z", + "result": "success" + } + ] + }, + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2019-09-11T07:58:58.535793305Z", + "completed": "2019-09-11T07:59:02.025389368Z", + "result": "success" + } + ] + }, + { + "move": "WaitAllRsMembersUp", + "moveDoc": "Wait until all members of this process' repl set are up", + "steps": [ + { + "step": "WaitAllRsMembersUp", + "stepDoc": "Wait until all members of this process' repl set are up", + "isWaitStep": true, + "started": "2019-09-11T07:59:02.026037414Z", + "completed": null, + "result": "wait" + } + ] + }, + { + "move": "RsInit", + "moveDoc": "Initialize a replica set including the current MongoDB process", + "steps": [ + { + "step": "RsInit", + "stepDoc": "Initialize a replica set", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} diff --git a/cmd/readiness/testdata/health-status-enterprise-upgrade-interrupted.json b/cmd/readiness/testdata/health-status-enterprise-upgrade-interrupted.json new file mode 100644 index 000000000..55678d574 --- /dev/null +++ b/cmd/readiness/testdata/health-status-enterprise-upgrade-interrupted.json @@ -0,0 +1,271 @@ +{ + "statuses": { + "my-replica-set-0": { + "IsInGoalState": false, + "LastMongoUpTime": 1689233828, + "ExpectedToBeUp": true, + "ReplicationStatus": 2 + } + }, + "mmsStatus": { + "my-replica-set-0": { + "name": "my-replica-set-0", + "lastGoalVersionAchieved": 8, + "plans": [ + { + "automationConfigVersion": 8, + "started": "2023-07-13T07:31:43.706340549Z", + "completed": null, + "moves": [ + { + "move": "Download", + "moveDoc": "Download mongodb binaries", + "steps": [ + { + "step": "Download", + "stepDoc": "Download mongodb binaries (may take a while)", + "isWaitStep": false, + "started": "2023-07-13T07:31:43.706368293Z", + "completed": "2023-07-13T07:31:52.545770428Z", + "result": "success" + } + ] + }, + { + "move": "DownloadMongosh", + "moveDoc": "Download Mongosh", + "steps": [ + { + "step": "DownloadMongosh", + "stepDoc": "Download mongosh (may take a while)", + "isWaitStep": false, + "started": "2023-07-13T07:31:52.545834821Z", + "completed": null, + "result": "error" + } + ] + }, + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitAllRsMembersUp", + "moveDoc": "Wait until all members of this process' repl set are up", + "steps": [ + { + "step": "WaitAllRsMembersUp", + "stepDoc": "Wait until all members of this process' repl set are up", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "RsInit", + "moveDoc": "Initialize a replica set including the current MongoDB process", + "steps": [ + { + "step": "RsInit", + "stepDoc": "Initialize a replica set", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + }, + { + "automationConfigVersion": 8, + "started": "2023-07-13T07:32:02.715922827Z", + "completed": "2023-07-13T07:32:20.938102204Z", + "moves": [ + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2023-07-13T07:32:02.715947483Z", + "completed": "2023-07-13T07:32:09.844613082Z", + "result": "success" + } + ] + }, + { + "move": "UpdateSymLink", + "moveDoc": "Update the mongosh binary symlink", + "steps": [ + { + "step": "UpdateSymLink", + "stepDoc": "Update the mongosh binary symlink", + "isWaitStep": false, + "started": "2023-07-13T07:32:09.844681639Z", + "completed": "2023-07-13T07:32:14.893961595Z", + "result": "success" + } + ] + }, + { + "move": "WaitAllRsMembersUp", + "moveDoc": "Wait until all members of this process' repl set are up", + "steps": [ + { + "step": "WaitAllRsMembersUp", + "stepDoc": "Wait until all members of this process' repl set are up", + "isWaitStep": true, + "started": "2023-07-13T07:32:14.894030206Z", + "completed": null, + "result": "wait" + } + ] + }, + { + "move": "RsInit", + "moveDoc": "Initialize a replica set including the current MongoDB process", + "steps": [ + { + "step": "RsInit", + "stepDoc": "Initialize a replica set", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + }, + { + "automationConfigVersion": 9, + "started": "2023-07-13T07:35:56.706945979Z", + "completed": null, + "moves": [ + { + "move": "Download", + "moveDoc": "Download mongodb binaries", + "steps": [ + { + "step": "Download", + "stepDoc": "Download mongodb binaries (may take a while)", + "isWaitStep": false, + "started": "2023-07-13T07:35:56.706976268Z", + "completed": "2023-07-13T07:36:01.116832943Z", + "result": "success" + } + ] + }, + { + "move": "ChangeVersion", + "moveDoc": "Change MongoDB Version", + "steps": [ + { + "step": "CheckWrongVersion", + "stepDoc": "Check that MongoDB version is wrong", + "isWaitStep": false, + "started": "2023-07-13T07:36:01.11709619Z", + "completed": "2023-07-13T07:36:01.11734988Z", + "result": "success" + }, + { + "step": "CheckRsCorrect", + "stepDoc": "Check that replica set configuration is correct", + "isWaitStep": false, + "started": "2023-07-13T07:36:01.117352255Z", + "completed": "2023-07-13T07:36:01.117626127Z", + "result": "success" + }, + { + "step": "WaitCanUpdate", + "stepDoc": "Wait until the update can be made", + "isWaitStep": true, + "started": "2023-07-13T07:36:01.117628516Z", + "completed": "2023-07-13T07:36:01.117818709Z", + "result": "success" + }, + { + "step": "DisableBalancerIfFirst", + "stepDoc": "Disable the balancer (may take a while)", + "isWaitStep": false, + "started": "2023-07-13T07:36:01.117821034Z", + "completed": "2023-07-13T07:36:01.18783613Z", + "result": "success" + }, + { + "step": "Stop", + "stepDoc": "Shutdown the process", + "isWaitStep": false, + "started": "2023-07-13T07:36:01.187839391Z", + "completed": null, + "result": "" + }, + { + "step": "RemoveDbFilesIfArbiterDowngrade", + "stepDoc": "Delete db files if this is an arbiter downgrade.", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + }, + { + "step": "StartWithUpgrade", + "stepDoc": "Start a mongo instance (upgrade)", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} \ No newline at end of file diff --git a/cmd/readiness/testdata/health-status-error-tls.json b/cmd/readiness/testdata/health-status-error-tls.json new file mode 100644 index 000000000..d6c4f2ebb --- /dev/null +++ b/cmd/readiness/testdata/health-status-error-tls.json @@ -0,0 +1,146 @@ +{ + "statuses": { + "test-tls-base-rs-require-ssl-1": { + "IsInGoalState": false, + "LastMongoUpTime": 0, + "ExpectedToBeUp": true, + "ReplicationStatus": -1 + } + }, + "mmsStatus": { + "test-tls-base-rs-require-ssl-1": { + "name": "test-tls-base-rs-require-ssl-1", + "lastGoalVersionAchieved": -1, + "plans": [ + { + "automationConfigVersion": 5, + "started": "2023-07-13T07:01:44.951990751Z", + "completed": null, + "moves": [ + { + "move": "DownloadMongosh", + "moveDoc": "Download Mongosh", + "steps": [ + { + "step": "DownloadMongosh", + "stepDoc": "Download mongosh (may take a while)", + "isWaitStep": false, + "started": "2023-07-13T07:01:44.952016495Z", + "completed": null, + "result": "error" + } + ] + }, + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitRsInit", + "moveDoc": "Wait for the replica set to be initialized by another member", + "steps": [ + { + "step": "WaitRsInit", + "stepDoc": "Wait for the replica set to be initialized by another member", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + }, + { + "automationConfigVersion": 5, + "started": "2023-07-13T07:01:49.72582887Z", + "completed": null, + "moves": [ + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2023-07-13T07:01:49.725856903Z", + "completed": null, + "result": "error" + } + ] + }, + { + "move": "UpdateSymLink", + "moveDoc": "Update the mongosh binary symlink", + "steps": [ + { + "step": "UpdateSymLink", + "stepDoc": "Update the mongosh binary symlink", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitRsInit", + "moveDoc": "Wait for the replica set to be initialized by another member", + "steps": [ + { + "step": "WaitRsInit", + "stepDoc": "Wait for the replica set to be initialized by another member", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "\u003ctest-tls-base-rs-require-ssl-1\u003e [07:03:13.893] Plan execution failed on step StartFresh as part of move Start : \u003ctest-tls-base-rs-require-ssl-1\u003e [07:03:13.893] Failed to apply action. Result = \u003cnil\u003e : \u003ctest-tls-base-rs-require-ssl-1\u003e [07:03:13.893] Error starting mongod : \u003ctest-tls-base-rs-require-ssl-1\u003e [07:03:13.893] Error running start command. cmd=[Args=[/var/lib/mongodb-mms-automation/mongodb-linux-x86_64-6.0.5-ent/bin/mongod -f /data/automation-mongod.conf]], stip=[args={\"net\":{\"bindIp\":\"0.0.0.0\",\"port\":27017,\"tls\":{\"CAFile\":\"/mongodb-automation/tls/ca/ca-pem\",\"FIPSMode\":true,\"allowConnectionsWithoutCertificates\":true,\"certificateKeyFile\":\"/mongodb-automation/tls/ZQHTF7GVI23UNJD4IHNM23NCX7Z6PUCB3PPAWCJ7TO3NB2WIHRDA\",\"mode\":\"requireTLS\"}},\"replication\":{\"replSetName\":\"test-tls-base-rs-require-ssl\"},\"storage\":{\"dbPath\":\"/data\"},\"systemLog\":{\"destination\":\"file\",\"path\":\"/var/log/mongodb-mms-automation/mongodb.log\"}}[],confPath=/data/automation-mongod.conf,version=6.0.5-ent-c9a99c120371d4d4c52cbb15dac34a36ce8d3b1d(enterprise),isKmipRotateMasterKey=false,useOldConfFile=false]\n\t,\nConfig Used:\n# THIS FILE IS MAINTAINED BY https://cloud-qa.mongodb.com . DO NOT MODIFY AS IT WILL BE OVERWRITTEN.\n# To make changes to your MongoDB deployment, please visit https://cloud-qa.mongodb.com . Your Group ID is 64a3eb7b7b02b627c635ea2b .\nnet:\n bindIp: 0.0.0.0\n port: 27017\n tls:\n CAFile: /mongodb-automation/tls/ca/ca-pem\n FIPSMode: true\n allowConnectionsWithoutCertificates: true\n certificateKeyFile: /mongodb-automation/tls/ZQHTF7GVI23UNJD4IHNM23NCX7Z6PUCB3PPAWCJ7TO3NB2WIHRDA\n mode: requireTLS\nprocessManagement:\n fork: \"true\"\nreplication:\n replSetName: test-tls-base-rs-require-ssl\nstorage:\n dbPath: /data\nsystemLog:\n destination: file\n path: /var/log/mongodb-mms-automation/mongodb.log\n\t- Output (stdout/stderr): \nabout to fork child process, waiting until server is ready for connections.\nforked process: 823\nERROR: child process failed, exited with 1\nTo see additional information in this output, start without the \"--fork\" option.\n\n\t- Mongo Logs: \n{\"t\":{\"$date\":\"2023-07-13T07:03:13.883+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":23172, \"ctx\":\"-\",\"msg\":\"FIPS 140-2 mode activated\"}\n{\"t\":{\"$date\":\"2023-07-13T07:03:13.884+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4915701, \"ctx\":\"main\",\"msg\":\"Initialized wire specification\",\"attr\":{\"spec\":{\"incomingExternalClient\":{\"minWireVersion\":0,\"maxWireVersion\":17},\"incomingInternalClient\":{\"minWireVersion\":0,\"maxWireVersion\":17},\"outgoing\":{\"minWireVersion\":6,\"maxWireVersion\":17},\"isInternalClient\":true}}}\n{\"t\":{\"$date\":\"2023-07-13T07:03:13.888+00:00\"},\"s\":\"E\", \"c\":\"NETWORK\", \"id\":23248, \"ctx\":\"main\",\"msg\":\"Cannot read certificate file\",\"attr\":{\"keyFile\":\"/mongodb-automation/tls/ZQHTF7GVI23UNJD4IHNM23NCX7Z6PUCB3PPAWCJ7TO3NB2WIHRDA\",\"error\":\"error:02001002:system library:fopen:No such file or directory\"}}\n{\"t\":{\"$date\":\"2023-07-13T07:03:13.888+00:00\"},\"s\":\"F\", \"c\":\"CONTROL\", \"id\":20574, \"ctx\":\"main\",\"msg\":\"Error during global initialization\",\"attr\":{\"error\":{\"code\":140,\"codeName\":\"InvalidSSLConfiguration\",\"errmsg\":\"Can not set up PEM key file.\"}}}\n : exit status 1" + } + } +} \ No newline at end of file diff --git a/cmd/readiness/testdata/health-status-no-deadlock.json b/cmd/readiness/testdata/health-status-no-deadlock.json new file mode 100644 index 000000000..bfb0ff05e --- /dev/null +++ b/cmd/readiness/testdata/health-status-no-deadlock.json @@ -0,0 +1,81 @@ +{ + "statuses": { + "wicklow-0-2": { + "IsInGoalState": false, + "LastMongoUpTime": 1579704888, + "ExpectedToBeUp": true + } + }, + "mmsStatus": { + "wicklow-0-2": { + "name": "wicklow-0-2", + "lastGoalVersionAchieved": -1, + "plans": [ + { + "started": "2020-01-22T14:47:09.3943094Z", + "completed": null, + "moves": [ + { + "move": "Download", + "moveDoc": "Download mongodb binaries", + "steps": [ + { + "step": "Download", + "stepDoc": "Download mongodb binaries (may take a while)", + "isWaitStep": false, + "started": "2020-01-22T14:47:09.398723407Z", + "completed": "2020-01-22T14:47:14.026809676Z", + "result": "success" + } + ] + }, + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2020-01-22T14:47:14.02694938Z", + "completed": "2020-01-22T14:47:15.014096704Z", + "result": "success" + } + ] + }, + { + "move": "WaitRsInit", + "moveDoc": "Wait for the replica set to be initialized by another member", + "steps": [ + { + "step": "WaitRsInit", + "stepDoc": "Wait for the replica set to be initialized by another member", + "isWaitStep": true, + "started": "2020-01-22T14:47:15.014328536Z", + "completed": null, + "result": "wait" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": "2020-01-22T14:47:18.323900219Z", + "completed": null, + "result": "wait" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} diff --git a/cmd/readiness/testdata/health-status-no-plans.json b/cmd/readiness/testdata/health-status-no-plans.json new file mode 100644 index 000000000..8adc0a798 --- /dev/null +++ b/cmd/readiness/testdata/health-status-no-plans.json @@ -0,0 +1,19 @@ +{ + "statuses": { + "foo": { + "IsInGoalState": false, + "LastMongoUpTime": 1568188790, + "ExpectedToBeUp": true + } + }, + "mmsStatus": { + "foo": { + "name": "foo", + "lastGoalVersionAchieved": -1, + "plans": [ + ], + "errorCode": 0, + "errorString": "" + } + } +} diff --git a/cmd/readiness/testdata/health-status-no-processes.json b/cmd/readiness/testdata/health-status-no-processes.json new file mode 100644 index 000000000..55d174fbf --- /dev/null +++ b/cmd/readiness/testdata/health-status-no-processes.json @@ -0,0 +1,12 @@ +{ + "statuses": { + "foo": { + "IsInGoalState": false, + "LastMongoUpTime": 1568188790, + "ExpectedToBeUp": true + } + }, + "mmsStatus": { + + } +} diff --git a/cmd/readiness/testdata/health-status-no-replication.json b/cmd/readiness/testdata/health-status-no-replication.json new file mode 100644 index 000000000..325d4a3b4 --- /dev/null +++ b/cmd/readiness/testdata/health-status-no-replication.json @@ -0,0 +1,81 @@ +{ + "mmsStatus": { + "bar": { + "errorString": "", + "errorCode": 0, + "plans": [ + { + "moves": [ + { + "steps": [ + { + "result": "success", + "completed": "2019-09-11T14:20:55.645615846Z", + "started": "2019-09-11T14:20:40.631404367Z", + "isWaitStep": false, + "stepDoc": "Download mongodb binaries (may take a while)", + "step": "Download" + } + ], + "moveDoc": "Download mongodb binaries", + "move": "Download" + }, + { + "steps": [ + { + "result": "success", + "completed": "2019-09-11T14:20:59.325129842Z", + "started": "2019-09-11T14:20:55.645743003Z", + "isWaitStep": false, + "stepDoc": "Start a mongo instance (start fresh)", + "step": "StartFresh" + } + ], + "moveDoc": "Start the process", + "move": "Start" + }, + { + "steps": [ + { + "result": "wait", + "completed": null, + "started": "2019-09-11T14:20:59.325272608Z", + "isWaitStep": true, + "stepDoc": "Wait for the replica set to be initialized by another member", + "step": "WaitRsInit" + } + ], + "moveDoc": "Wait for the replica set to be initialized by another member", + "move": "WaitRsInit" + }, + { + "steps": [ + { + "result": "", + "completed": null, + "started": null, + "isWaitStep": true, + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "step": "WaitFeatureCompatibilityVersionCorrect" + } + ], + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "move": "WaitFeatureCompatibilityVersionCorrect" + } + ], + "completed": "2019-09-11T14:21:42.034934358Z", + "started": "2019-09-11T14:20:40.631348806Z" + } + ], + "lastGoalVersionAchieved": 5, + "name": "bar" + } + }, + "statuses": { + "bar": { + "ExpectedToBeUp": true, + "LastMongoUpTime": 1568222195, + "IsInGoalState": true + } + } +} diff --git a/cmd/readiness/testdata/health-status-not-readable-state.json b/cmd/readiness/testdata/health-status-not-readable-state.json new file mode 100644 index 000000000..82449bbc0 --- /dev/null +++ b/cmd/readiness/testdata/health-status-not-readable-state.json @@ -0,0 +1,82 @@ +{ + "mmsStatus": { + "bar": { + "errorString": "", + "errorCode": 0, + "plans": [ + { + "moves": [ + { + "steps": [ + { + "result": "success", + "completed": "2019-09-11T14:20:55.645615846Z", + "started": "2019-09-11T14:20:40.631404367Z", + "isWaitStep": false, + "stepDoc": "Download mongodb binaries (may take a while)", + "step": "Download" + } + ], + "moveDoc": "Download mongodb binaries", + "move": "Download" + }, + { + "steps": [ + { + "result": "success", + "completed": "2019-09-11T14:20:59.325129842Z", + "started": "2019-09-11T14:20:55.645743003Z", + "isWaitStep": false, + "stepDoc": "Start a mongo instance (start fresh)", + "step": "StartFresh" + } + ], + "moveDoc": "Start the process", + "move": "Start" + }, + { + "steps": [ + { + "result": "wait", + "completed": null, + "started": "2019-09-11T14:20:59.325272608Z", + "isWaitStep": true, + "stepDoc": "Wait for the replica set to be initialized by another member", + "step": "WaitRsInit" + } + ], + "moveDoc": "Wait for the replica set to be initialized by another member", + "move": "WaitRsInit" + }, + { + "steps": [ + { + "result": "", + "completed": null, + "started": null, + "isWaitStep": true, + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "step": "WaitFeatureCompatibilityVersionCorrect" + } + ], + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "move": "WaitFeatureCompatibilityVersionCorrect" + } + ], + "completed": "2019-09-11T14:21:42.034934358Z", + "started": "2019-09-11T14:20:40.631348806Z" + } + ], + "lastGoalVersionAchieved": 5, + "name": "bar" + } + }, + "statuses": { + "bar": { + "ReplicationStatus": 3, + "ExpectedToBeUp": true, + "LastMongoUpTime": 1568222195, + "IsInGoalState": true + } + } +} diff --git a/cmd/readiness/testdata/health-status-ok-no-replica-status.json b/cmd/readiness/testdata/health-status-ok-no-replica-status.json new file mode 100644 index 000000000..fbf69490c --- /dev/null +++ b/cmd/readiness/testdata/health-status-ok-no-replica-status.json @@ -0,0 +1,82 @@ +{ + "mmsStatus": { + "bar": { + "errorString": "", + "errorCode": 0, + "plans": [ + { + "moves": [ + { + "steps": [ + { + "result": "success", + "completed": "2019-09-11T14:20:55.645615846Z", + "started": "2019-09-11T14:20:40.631404367Z", + "isWaitStep": false, + "stepDoc": "Download mongodb binaries (may take a while)", + "step": "Download" + } + ], + "moveDoc": "Download mongodb binaries", + "move": "Download" + }, + { + "steps": [ + { + "result": "success", + "completed": "2019-09-11T14:20:59.325129842Z", + "started": "2019-09-11T14:20:55.645743003Z", + "isWaitStep": false, + "stepDoc": "Start a mongo instance (start fresh)", + "step": "StartFresh" + } + ], + "moveDoc": "Start the process", + "move": "Start" + }, + { + "steps": [ + { + "result": "wait", + "completed": null, + "started": "2019-09-11T14:20:59.325272608Z", + "isWaitStep": true, + "stepDoc": "Wait for the replica set to be initialized by another member", + "step": "WaitRsInit" + } + ], + "moveDoc": "Wait for the replica set to be initialized by another member", + "move": "WaitRsInit" + }, + { + "steps": [ + { + "result": "", + "completed": null, + "started": null, + "isWaitStep": true, + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "step": "WaitFeatureCompatibilityVersionCorrect" + } + ], + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "move": "WaitFeatureCompatibilityVersionCorrect" + } + ], + "completed": "2019-09-11T14:21:42.034934358Z", + "started": "2019-09-11T14:20:40.631348806Z" + } + ], + "lastGoalVersionAchieved": 5, + "name": "bar" + } + }, + "statuses": { + "bar": { + "ReplicationStatus": null, + "ExpectedToBeUp": true, + "LastMongoUpTime": 1568222195, + "IsInGoalState": true + } + } +} diff --git a/cmd/readiness/testdata/health-status-ok-with-WaitForCorrectBinaries.json b/cmd/readiness/testdata/health-status-ok-with-WaitForCorrectBinaries.json new file mode 100644 index 000000000..c2c6bb307 --- /dev/null +++ b/cmd/readiness/testdata/health-status-ok-with-WaitForCorrectBinaries.json @@ -0,0 +1,144 @@ +{ + "statuses": { + "my-replica-set-downgrade-0": { + "IsInGoalState": false, + "LastMongoUpTime": 1701853492, + "ExpectedToBeUp": true, + "ReplicationStatus": 1 + } + }, + "mmsStatus": { + "my-replica-set-downgrade-0": { + "name": "my-replica-set-downgrade-0", + "lastGoalVersionAchieved": 1, + "plans": [ + { + "automationConfigVersion": 1, + "started": "2023-12-06T09:03:33.709679218Z", + "completed": "2023-12-06T09:03:43.65117796Z", + "moves": [ + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2023-12-06T09:03:33.709703572Z", + "completed": null, + "result": "error" + } + ] + }, + { + "move": "WaitAllRsMembersUp", + "moveDoc": "Wait until all members of this process' repl set are up", + "steps": [ + { + "step": "WaitAllRsMembersUp", + "stepDoc": "Wait until all members of this process' repl set are up", + "isWaitStep": true, + "started": "2023-12-06T09:03:35.652236845Z", + "completed": null, + "result": "wait" + } + ] + }, + { + "move": "RsInit", + "moveDoc": "Initialize a replica set including the current MongoDB process", + "steps": [ + { + "step": "RsInit", + "stepDoc": "Initialize a replica set", + "isWaitStep": false, + "started": "2023-12-06T09:03:43.536653463Z", + "completed": "2023-12-06T09:03:43.650871495Z", + "result": "success" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": "2023-12-06T09:03:43.650920722Z", + "completed": "2023-12-06T09:03:43.65111749Z", + "result": "success" + } + ] + } + ] + }, + { + "automationConfigVersion": 2, + "started": "2023-12-06T09:04:03.576712545Z", + "completed": null, + "moves": [ + { + "move": "ChangeVersionKube", + "moveDoc": "Change MongoDB Version on operator mode", + "steps": [ + { + "step": "CheckRunningOperatorMode", + "stepDoc": "Check Running in operator mode", + "isWaitStep": false, + "started": "2023-12-06T09:04:03.576729706Z", + "completed": "2023-12-06T09:04:03.576893698Z", + "result": "success" + }, + { + "step": "CheckWrongVersion", + "stepDoc": "Check that MongoDB version is wrong", + "isWaitStep": false, + "started": "2023-12-06T09:04:03.576894027Z", + "completed": "2023-12-06T09:04:03.577041016Z", + "result": "success" + }, + { + "step": "CheckRsCorrect", + "stepDoc": "Check that replica set configuration is correct", + "isWaitStep": false, + "started": "2023-12-06T09:04:03.577041402Z", + "completed": "2023-12-06T09:04:03.577219188Z", + "result": "success" + }, + { + "step": "WaitAllRouterConfigsFlushedForUpgrade", + "stepDoc": "Wait until flushRouterConfig has been run on all mongoses", + "isWaitStep": true, + "started": "2023-12-06T09:04:03.577219563Z", + "completed": "2023-12-06T09:04:03.577356271Z", + "result": "success" + }, + { + "step": "DisableBalancerIfFirst", + "stepDoc": "Disable the balancer (may take a while)", + "isWaitStep": false, + "started": "2023-12-06T09:04:03.577356599Z", + "completed": "2023-12-06T09:04:03.604579059Z", + "result": "success" + }, + { + "step": "WaitForCorrectBinaries", + "stepDoc": "Wait until correct binaries are available", + "isWaitStep": true, + "started": "2023-12-06T09:04:03.60458063Z", + "completed": null, + "result": "wait" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} \ No newline at end of file diff --git a/cmd/readiness/testdata/health-status-ok.json b/cmd/readiness/testdata/health-status-ok.json new file mode 100644 index 000000000..a8c3b8553 --- /dev/null +++ b/cmd/readiness/testdata/health-status-ok.json @@ -0,0 +1,81 @@ +{ + "statuses": { + "bar": { + "IsInGoalState": true, + "LastMongoUpTime": 1568222195, + "ExpectedToBeUp": true + } + }, + "mmsStatus": { + "bar": { + "name": "bar", + "lastGoalVersionAchieved": 5, + "plans": [ + { + "started": "2019-09-11T14:20:40.631348806Z", + "completed": "2019-09-11T14:21:42.034934358Z", + "moves": [ + { + "move": "Download", + "moveDoc": "Download mongodb binaries", + "steps": [ + { + "step": "Download", + "stepDoc": "Download mongodb binaries (may take a while)", + "isWaitStep": false, + "started": "2019-09-11T14:20:40.631404367Z", + "completed": "2019-09-11T14:20:55.645615846Z", + "result": "success" + } + ] + }, + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2019-09-11T14:20:55.645743003Z", + "completed": "2019-09-11T14:20:59.325129842Z", + "result": "success" + } + ] + }, + { + "move": "WaitRsInit", + "moveDoc": "Wait for the replica set to be initialized by another member", + "steps": [ + { + "step": "WaitRsInit", + "stepDoc": "Wait for the replica set to be initialized by another member", + "isWaitStep": true, + "started": "2019-09-11T14:20:59.325272608Z", + "completed": null, + "result": "wait" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} diff --git a/cmd/readiness/testdata/health-status-pending.json b/cmd/readiness/testdata/health-status-pending.json new file mode 100644 index 000000000..1d3d36b0c --- /dev/null +++ b/cmd/readiness/testdata/health-status-pending.json @@ -0,0 +1,100 @@ +{ + "statuses": { + "foo": { + "IsInGoalState": false, + "LastMongoUpTime": 1568188790, + "ExpectedToBeUp": true + } + }, + "mmsStatus": { + "foo": { + "name": "foo", + "lastGoalVersionAchieved": -1, + "plans": [ + { + "started": "2019-09-11T07:58:22.834514922Z", + "completed": "2019-09-11T07:58:42.834514922Z", + "moves": [] + }, + { + "started": "2019-09-11T07:58:43.834514922Z", + "completed": null, + "moves": [ + { + "move": "Download", + "moveDoc": "Download mongodb binaries", + "steps": [ + { + "step": "Download", + "stepDoc": "Download mongodb binaries (may take a while)", + "isWaitStep": false, + "started": "2019-09-11T07:58:43.83456958Z", + "completed": "2019-09-11T07:58:58.535663992Z", + "result": "success" + } + ] + }, + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2019-09-11T07:58:58.535793305Z", + "completed": "2019-09-11T07:59:02.025389368Z", + "result": "success" + } + ] + }, + { + "move": "WaitAllRsMembersUp", + "moveDoc": "Wait until all members of this process' repl set are up", + "steps": [ + { + "step": "WaitAllRsMembersUp", + "stepDoc": "Wait until all members of this process' repl set are up", + "isWaitStep": true, + "started": "2059-09-11T07:59:02.026037414Z", + "completed": null, + "result": "wait" + } + ] + }, + { + "move": "RsInit", + "moveDoc": "Initialize a replica set including the current MongoDB process", + "steps": [ + { + "step": "RsInit", + "stepDoc": "Initialize a replica set", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} diff --git a/cmd/readiness/testdata/k8sobjects.go b/cmd/readiness/testdata/k8sobjects.go new file mode 100644 index 000000000..e4e6d4d64 --- /dev/null +++ b/cmd/readiness/testdata/k8sobjects.go @@ -0,0 +1,33 @@ +package testdata + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Currently seems like the appending functionality on the library used by the fake +// implementation to simulate JSONPatch is broken: https://github.com/evanphx/json-patch/issues/138 +// The short term workaround is to have the annotation empty. + +// These are just k8s objects used for testing. Note, that these are defined in a non "_test.go" file as they are reused +// by other modules +func TestSecret(namespace, name string, version int) *corev1.Secret { + // We don't need to create a full automation config - just the json with version field is enough + deployment := fmt.Sprintf("{\"version\": %d}", version) + secret := &corev1.Secret{Data: map[string][]byte{"cluster-config.json": []byte(deployment)}} + secret.ObjectMeta = metav1.ObjectMeta{Namespace: namespace, Name: name} + return secret +} +func TestPod(namespace, name string) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: map[string]string{ + "agent.mongodb.com/version": "", + }, + }, + } +} diff --git a/cmd/testrunner/crds/crds.go b/cmd/testrunner/crds/crds.go deleted file mode 100644 index 8766b2359..000000000 --- a/cmd/testrunner/crds/crds.go +++ /dev/null @@ -1,79 +0,0 @@ -package crds - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - - "github.com/ghodss/yaml" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/client-go/rest" -) - -// EnsureCreation will locate all crd files "*_crd.yaml" in the given deploy directory and ensure that these -// CRDs are created into the kubernetes cluster -func EnsureCreation(config *rest.Config, deployDir string) error { - apiextensionsClientSet, err := apiextensionsclientset.NewForConfig(config) - if err != nil { - return fmt.Errorf("error creating apiextensions client set: %v", err) - } - - crdFilePaths, err := allCrds(deployDir) - if err != nil { - return fmt.Errorf("error walking deploy directory: %v", err) - } - - for _, filePath := range crdFilePaths { - crd := &apiextensionsv1beta1.CustomResourceDefinition{} - data, err := ioutil.ReadFile(filePath) - if err != nil { - return fmt.Errorf("error reading file: %v", err) - } - if err := marshalCRDFromYAMLBytes(data, crd); err != nil { - return fmt.Errorf("error converting yaml bytes to CRD: %v", err) - } - _, err = apiextensionsClientSet.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) - - if apierrors.IsAlreadyExists(err) { - fmt.Println("CRD already exists") - continue - } - - if err != nil { - return fmt.Errorf("error creating custom resource definition: %v", err) - } - } - return nil -} - -func marshalCRDFromYAMLBytes(bytes []byte, crd *apiextensionsv1beta1.CustomResourceDefinition) error { - jsonBytes, err := yaml.YAMLToJSON(bytes) - if err != nil { - return err - } - return json.Unmarshal(jsonBytes, &crd) -} - -func allCrds(deployDir string) ([]string, error) { - crdDir := path.Join(deployDir, "crds") - var crdFilePaths []string - err := filepath.Walk(crdDir, func(path string, info os.FileInfo, err error) error { - if info != nil && strings.HasSuffix(info.Name(), "_crd.yaml") { - fmt.Printf("Found CRD: %s\n", info.Name()) - crdFilePaths = append(crdFilePaths, path) - } - return nil - }) - - if err != nil { - return nil, err - } - - return crdFilePaths, nil -} diff --git a/cmd/testrunner/main.go b/cmd/testrunner/main.go deleted file mode 100644 index 3425d8a35..000000000 --- a/cmd/testrunner/main.go +++ /dev/null @@ -1,272 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "os" - "path" - "time" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/pod" - "k8s.io/client-go/kubernetes" - - "github.com/mongodb/mongodb-kubernetes-operator/cmd/testrunner/crds" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" - k8sClient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/yaml" -) - -type flags struct { - deployDir string - namespace string - operatorImage string - testImage string - test string -} - -func parseFlags() flags { - var namespace, deployDir, operatorImage, testImage, test *string - namespace = flag.String("namespace", "default", "the namespace the operator and tests should be deployed in") - deployDir = flag.String("deployDir", "deploy/", "the path to the directory which contains the yaml deployment files") - operatorImage = flag.String("operatorImage", "quay.io/mongodb/community-operator-dev:latest", "the image which should be used for the operator deployment") - testImage = flag.String("testImage", "quay.io/mongodb/community-operator-e2e:latest", "the image which should be used for the operator e2e tests") - test = flag.String("test", "", "test e2e test that should be run. (name of folder containing the test)") - flag.Parse() - - return flags{ - deployDir: *deployDir, - namespace: *namespace, - operatorImage: *operatorImage, - testImage: *testImage, - test: *test, - } -} - -func main() { - if err := runCmd(parseFlags()); err != nil { - panic(err) - } -} - -func runCmd(f flags) error { - config, err := rest.InClusterConfig() - if err != nil { - return fmt.Errorf("error retreiving kubernetes config: %v", err) - } - - k8s, err := k8sClient.New(config, k8sClient.Options{}) - if err != nil { - return fmt.Errorf("error creating kubernetes client %v", err) - } - - c := client.NewClient(k8s) - - if err := ensureNamespace(f.namespace, c); err != nil { - return fmt.Errorf("error ensuring namespace: %v", err) - } - - fmt.Printf("Ensured namespace: %s\n", f.namespace) - - if err := crds.EnsureCreation(config, f.deployDir); err != nil { - return fmt.Errorf("error ensuring CRDs: %v", err) - } - - fmt.Println("Ensured CRDs") - if err := deployOperator(f, c); err != nil { - return fmt.Errorf("error deploying operator: %v", err) - } - fmt.Println("Successfully deployed the operator") - - testToRun := "test/operator-sdk-test.yaml" - if err := buildKubernetesResourceFromYamlFile(c, testToRun, &corev1.Pod{}, withNamespace(f.namespace), withTestImage(f.testImage), withTest(f.test)); err != nil { - return fmt.Errorf("error deploying test: %v", err) - } - - nsName := types.NamespacedName{Name: "operator-sdk-test", Namespace: f.namespace} - - fmt.Println("Waiting for pod to be ready...") - testPod, err := pod.WaitForPhase(c, nsName, time.Second*5, time.Minute*5, corev1.PodRunning) - if err != nil { - return fmt.Errorf("error waiting for test pod to be created: %v", err) - } - - fmt.Println("Tailing pod logs...") - if err := tailPodLogs(config, testPod); err != nil { - return err - } - - _, err = pod.WaitForPhase(c, nsName, time.Second*5, time.Minute, corev1.PodSucceeded) - if err != nil { - return fmt.Errorf("error waiting for test to finish: %v", err) - } - - fmt.Println("Test passed!") - return nil -} - -func tailPodLogs(config *rest.Config, testPod corev1.Pod) error { - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return fmt.Errorf("error getting clientset: %v", err) - } - - if err := pod.GetLogs(os.Stdout, pod.CoreV1FollowStreamer(testPod, clientset.CoreV1())); err != nil { - return fmt.Errorf("error tailing logs: %+v", err) - } - return nil -} - -func ensureNamespace(ns string, client client.Client) error { - err := client.Get(context.TODO(), types.NamespacedName{Name: ns}, &corev1.Namespace{}) - if err != nil && !errors.IsNotFound(err) { - return fmt.Errorf("error creating namespace: %v", err) - } else if err == nil { - fmt.Printf("Namespace %s already exists!\n", ns) - return nil - } - - newNamespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: ns, - }, - } - if err := client.Create(context.TODO(), &newNamespace); err != nil { - return fmt.Errorf("error creating namespace: %s", err) - } - return nil -} - -func deployOperator(f flags, c client.Client) error { - if err := buildKubernetesResourceFromYamlFile(c, path.Join(f.deployDir, "role.yaml"), &rbacv1.Role{}, withNamespace(f.namespace)); err != nil { - return fmt.Errorf("error building operator role: %v", err) - } - fmt.Println("Successfully created the operator Role") - - if err := buildKubernetesResourceFromYamlFile(c, path.Join(f.deployDir, "service_account.yaml"), &corev1.ServiceAccount{}, withNamespace(f.namespace)); err != nil { - return fmt.Errorf("error building operator service account: %v", err) - } - fmt.Println("Successfully created the operator Service Account") - - if err := buildKubernetesResourceFromYamlFile(c, path.Join(f.deployDir, "role_binding.yaml"), &rbacv1.RoleBinding{}, withNamespace(f.namespace)); err != nil { - return fmt.Errorf("error building operator role binding: %v", err) - } - fmt.Println("Successfully created the operator Role Binding") - if err := buildKubernetesResourceFromYamlFile(c, path.Join(f.deployDir, "operator.yaml"), &appsv1.Deployment{}, withNamespace(f.namespace), withOperatorImage(f.operatorImage)); err != nil { - return fmt.Errorf("error building operator deployment: %v", err) - } - fmt.Println("Successfully created the operator Deployment") - return nil -} - -// withNamespace returns a function which will assign the namespace -// of the underlying type to the value specified. We can -// add new types here as required. -func withNamespace(ns string) func(runtime.Object) { - return func(obj runtime.Object) { - switch v := obj.(type) { - case *rbacv1.Role: - v.Namespace = ns - case *corev1.ServiceAccount: - v.Namespace = ns - case *rbacv1.RoleBinding: - v.Namespace = ns - case *corev1.Pod: - v.Namespace = ns - case *appsv1.Deployment: - v.Namespace = ns - } - } -} - -// withTestImage assumes that the type being created is a corev1.Pod -// and will have no effect when used with other types -func withTestImage(image string) func(obj runtime.Object) { - return func(obj runtime.Object) { - if testPod, ok := obj.(*corev1.Pod); ok { - testPod.Spec.Containers[0].Image = image - } - } -} - -// withOperatorImage assumes that the underlying type is an appsv1.Deployment -// which has the operator container as the first container. There will be -// no effect when used with a non-deployment type -func withOperatorImage(image string) func(runtime.Object) { - return func(obj runtime.Object) { - if dep, ok := obj.(*appsv1.Deployment); ok { - dep.Spec.Template.Spec.Containers[0].Image = image - } - } -} - -// withTest configures the test Pod to launch with the correct -// command which will target the given test -func withTest(test string) func(obj runtime.Object) { - return func(obj runtime.Object) { - if testPod, ok := obj.(*corev1.Pod); ok { - testPod.Spec.Containers[0].Command = []string{ - "/bin/operator-sdk", - "test", - "local", - fmt.Sprintf("./test/e2e/%s", test), - "--operator-namespace", - testPod.Namespace, - "--verbose", - "--kubeconfig", - "/etc/config/kubeconfig", - } - } - } -} - -// buildKubernetesResourceFromYamlFile will create the kubernetes resource defined in yamlFilePath. All of the functional options -// provided will be applied before creation. -func buildKubernetesResourceFromYamlFile(c client.Client, yamlFilePath string, obj runtime.Object, options ...func(obj runtime.Object)) error { - data, err := ioutil.ReadFile(yamlFilePath) - if err != nil { - return fmt.Errorf("error reading file: %v", err) - } - - if err := marshalRuntimeObjectFromYAMLBytes(data, obj); err != nil { - return fmt.Errorf("error converting yaml bytes to service account: %v", err) - } - - for _, opt := range options { - opt(obj) - } - - return createOrUpdate(c, obj) -} - -// marshalRuntimeObjectFromYAMLBytes accepts the bytes of a yaml resource -// and unmarshals them into the provided runtime Object -func marshalRuntimeObjectFromYAMLBytes(bytes []byte, obj runtime.Object) error { - jsonBytes, err := yaml.YAMLToJSON(bytes) - if err != nil { - return err - } - return json.Unmarshal(jsonBytes, &obj) -} - -func createOrUpdate(c client.Client, obj runtime.Object) error { - if err := c.Create(context.TODO(), obj); err != nil { - if apierrors.IsAlreadyExists(err) { - return c.Update(context.TODO(), obj) - } - return fmt.Errorf("error creating %s in kubernetes: %v", obj.GetObjectKind(), err) - } - return nil -} diff --git a/cmd/versionhook/main.go b/cmd/versionhook/main.go new file mode 100644 index 000000000..6e0d02f95 --- /dev/null +++ b/cmd/versionhook/main.go @@ -0,0 +1,244 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/agent" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + agentStatusFilePathEnv = "AGENT_STATUS_FILEPATH" + + defaultNamespace = "default" + + pollingInterval time.Duration = 1 * time.Second + pollingDuration time.Duration = 60 * time.Second +) + +func main() { + ctx := context.Background() + logger := setupLogger() + + logger.Info("Running version change post-start hook") + + if statusPath := os.Getenv(agentStatusFilePathEnv); statusPath == "" { + logger.Fatalf(`Required environment variable "%s" not set`, agentStatusFilePathEnv) + return + } + + logger.Info("Waiting for agent health status...") + health, err := waitForAgentHealthStatus() + if err != nil { + // If the pod has just restarted then the status file will not exist. + // In that case we return and let mongod start again. + if os.IsNotExist(err) { + logger.Info("Agent health status file not found, mongod will start") + } else { + logger.Errorf("Error getting the agent health file: %s", err) + } + + return + } + + shouldDelete, err := shouldDeletePod(health) + if err != nil { + logger.Errorf("Error checking if pod should be deleted: %s", err) + } + + if shouldDelete { + logger.Infof("Pod should be deleted") + if err := deletePod(ctx); err != nil { + // We should not raise an error if the Pod could not be deleted. It can have even + // worse consequences: Pod being restarted with the same version, and the agent + // killing it immediately after. + logger.Errorf("Could not manually trigger restart of this Pod because of: %s", err) + logger.Errorf("Make sure the Pod is restarted in order for the upgrade process to continue") + } + + // If the Pod needs to be killed, we'll wait until the Pod + // is killed by Kubernetes, bringing the new container image + // into play. + var quit = make(chan struct{}) + logger.Info("Pod killed itself, waiting...") + <-quit + } else { + logger.Info("Pod should not be deleted, mongod started") + } +} + +func setupLogger() *zap.SugaredLogger { + log, err := zap.NewDevelopment() + if err != nil { + zap.S().Errorf("Error building logger config: %s", err) + os.Exit(1) + } + + return log.Sugar() +} + +// waitForAgentHealthStatus will poll the health status file and wait for it to be updated. +// The agent doesn't write the plan to the file right away and hence we need to wait for the +// latest plan to be written. +func waitForAgentHealthStatus() (agent.Health, error) { + ticker := time.NewTicker(pollingInterval) + defer ticker.Stop() + + totalTime := time.Duration(0) + for range ticker.C { + if totalTime > pollingDuration { + break + } + totalTime += pollingInterval + + health, err := getAgentHealthStatus() + if err != nil { + return agent.Health{}, err + } + + status, ok := health.Healthiness[getHostname()] + if !ok { + return agent.Health{}, fmt.Errorf("couldn't find status for hostname %s", getHostname()) + } + + // We determine if the file has been updated by checking if the process is not in goal state. + // As the agent is currently executing a plan, the process should not be in goal state. + if !status.IsInGoalState { + return health, nil + } + } + return agent.Health{}, fmt.Errorf("agent health status not ready after waiting %s", pollingDuration.String()) + +} + +// getAgentHealthStatus returns an instance of agent.Health read +// from the health file on disk +func getAgentHealthStatus() (agent.Health, error) { + f, err := os.Open(os.Getenv(agentStatusFilePathEnv)) + if err != nil { + return agent.Health{}, err + } + defer f.Close() + + h, err := readAgentHealthStatus(f) + if err != nil { + return agent.Health{}, fmt.Errorf("could not read health status file: %s", err) + } + return h, err +} + +// readAgentHealthStatus reads an instance of health.Health from the provided +// io.Reader +func readAgentHealthStatus(reader io.Reader) (agent.Health, error) { + var h agent.Health + data, err := io.ReadAll(reader) + if err != nil { + return h, err + } + err = json.Unmarshal(data, &h) + return h, err +} + +func getHostname() string { + return os.Getenv("HOSTNAME") +} + +// shouldDeletePod returns a boolean value indicating if this pod should be deleted +// this would be the case if the agent is currently trying to upgrade the version +// of mongodb. +func shouldDeletePod(health agent.Health) (bool, error) { + status, ok := health.ProcessPlans[getHostname()] + if !ok { + return false, fmt.Errorf("hostname %s was not in the process plans", getHostname()) + } + return isWaitingToBeDeleted(status), nil +} + +// isWaitingToBeDeleted determines if the agent is currently waiting +// on the mongod pod to be restarted. In order to do this, we need to check the agent +// status file and determine if the mongod has been stopped and if we are in the process +// of a version change. +func isWaitingToBeDeleted(healthStatus agent.MmsDirectorStatus) bool { + if len(healthStatus.Plans) == 0 { + return false + } + lastPlan := healthStatus.Plans[len(healthStatus.Plans)-1] + for _, m := range lastPlan.Moves { + // When changing version the plan will contain a "ChangeVersion" step + if m.Move == "ChangeVersion" { + return true + } + } + return false +} + +// deletePod attempts to delete the pod this mongod is running in +func deletePod(ctx context.Context) error { + thisPod, err := getThisPod() + if err != nil { + return fmt.Errorf("could not get pod: %s", err) + } + k8sClient, err := inClusterClient() + if err != nil { + return fmt.Errorf("could not get client: %s", err) + } + + if err := k8sClient.Delete(ctx, &thisPod); err != nil { + return fmt.Errorf("could not delete pod: %s", err) + } + return nil +} + +// getThisPod returns an instance of corev1.Pod that points to the current pod +func getThisPod() (corev1.Pod, error) { + podName := getHostname() + if podName == "" { + return corev1.Pod{}, fmt.Errorf("environment variable HOSTNAME was not present") + } + + ns, err := getNamespace() + if err != nil { + return corev1.Pod{}, fmt.Errorf("could not read namespace: %s", err) + } + + return corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: ns, + }, + }, nil +} + +func inClusterClient() (client.Client, error) { + config, err := rest.InClusterConfig() + if err != nil { + return nil, fmt.Errorf("could not get cluster config: %s", err) + } + + k8sClient, err := client.New(config, client.Options{}) + if err != nil { + return nil, fmt.Errorf("could not create client: %s", err) + } + return k8sClient, nil +} + +func getNamespace() (string, error) { + data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + return "", err + } + if ns := strings.TrimSpace(string(data)); len(ns) > 0 { + return ns, nil + } + return defaultNamespace, nil +} diff --git a/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml b/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml new file mode 100644 index 000000000..12207a6bd --- /dev/null +++ b/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml @@ -0,0 +1,677 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + service.binding: path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret + service.binding/connectionString: path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=connectionString.standardSrv + service.binding/password: path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=password + service.binding/provider: community + service.binding/type: mongodb + service.binding/username: path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=username + name: mongodbcommunity.mongodbcommunity.mongodb.com +spec: + group: mongodbcommunity.mongodb.com + names: + kind: MongoDBCommunity + listKind: MongoDBCommunityList + plural: mongodbcommunity + shortNames: + - mdbc + singular: mongodbcommunity + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Current state of the MongoDB deployment + jsonPath: .status.phase + name: Phase + type: string + - description: Version of MongoDB server + jsonPath: .status.version + name: Version + type: string + name: v1 + schema: + openAPIV3Schema: + description: MongoDBCommunity is the Schema for the mongodbs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MongoDBCommunitySpec defines the desired state of MongoDB + properties: + additionalConnectionStringConfig: + description: Additional options to be appended to the connection string. + These options apply to the entire resource and to each user. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + additionalMongodConfig: + description: |- + AdditionalMongodConfig is additional configuration that can be passed to + each data-bearing mongod at runtime. Uses the same structure as the mongod + configuration file: https://www.mongodb.com/docs/manual/reference/configuration-options/ + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + agent: + description: AgentConfiguration sets options for the MongoDB automation + agent + properties: + auditLogRotate: + description: AuditLogRotate if enabled, will enable AuditLogRotate + for all processes. + properties: + includeAuditLogsWithMongoDBLogs: + description: |- + set to 'true' to have the Automation Agent rotate the audit files along + with mongodb log files + type: boolean + numTotal: + description: maximum number of log files to have total + type: integer + numUncompressed: + description: maximum number of log files to leave uncompressed + type: integer + percentOfDiskspace: + description: |- + Maximum percentage of the total disk space these log files should take up. + The string needs to be able to be converted to float64 + type: string + sizeThresholdMB: + description: |- + Maximum size for an individual log file before rotation. + The string needs to be able to be converted to float64. + Fractional values of MB are supported. + type: string + timeThresholdHrs: + description: maximum hours for an individual log file before + rotation + type: integer + required: + - sizeThresholdMB + - timeThresholdHrs + type: object + logFile: + type: string + logLevel: + type: string + logRotate: + description: LogRotate if enabled, will enable LogRotate for all + processes. + properties: + includeAuditLogsWithMongoDBLogs: + description: |- + set to 'true' to have the Automation Agent rotate the audit files along + with mongodb log files + type: boolean + numTotal: + description: maximum number of log files to have total + type: integer + numUncompressed: + description: maximum number of log files to leave uncompressed + type: integer + percentOfDiskspace: + description: |- + Maximum percentage of the total disk space these log files should take up. + The string needs to be able to be converted to float64 + type: string + sizeThresholdMB: + description: |- + Maximum size for an individual log file before rotation. + The string needs to be able to be converted to float64. + Fractional values of MB are supported. + type: string + timeThresholdHrs: + description: maximum hours for an individual log file before + rotation + type: integer + required: + - sizeThresholdMB + - timeThresholdHrs + type: object + maxLogFileDurationHours: + type: integer + systemLog: + description: SystemLog configures system log of mongod + properties: + destination: + type: string + logAppend: + type: boolean + path: + type: string + required: + - destination + - logAppend + - path + type: object + type: object + arbiters: + description: |- + Arbiters is the number of arbiters to add to the Replica Set. + It is not recommended to have more than one arbiter per Replica Set. + More info: https://www.mongodb.com/docs/manual/tutorial/add-replica-set-arbiter/ + type: integer + automationConfig: + description: |- + AutomationConfigOverride is merged on top of the operator created automation config. Processes are merged + by name. Currently Only the process.disabled field is supported. + properties: + processes: + items: + description: OverrideProcess contains fields that we can override + on the AutomationConfig processes. + properties: + disabled: + type: boolean + logRotate: + description: CrdLogRotate is the crd definition of LogRotate + including fields in strings while the agent supports them + as float64 + properties: + includeAuditLogsWithMongoDBLogs: + description: |- + set to 'true' to have the Automation Agent rotate the audit files along + with mongodb log files + type: boolean + numTotal: + description: maximum number of log files to have total + type: integer + numUncompressed: + description: maximum number of log files to leave uncompressed + type: integer + percentOfDiskspace: + description: |- + Maximum percentage of the total disk space these log files should take up. + The string needs to be able to be converted to float64 + type: string + sizeThresholdMB: + description: |- + Maximum size for an individual log file before rotation. + The string needs to be able to be converted to float64. + Fractional values of MB are supported. + type: string + timeThresholdHrs: + description: maximum hours for an individual log file + before rotation + type: integer + required: + - sizeThresholdMB + - timeThresholdHrs + type: object + name: + type: string + required: + - disabled + - name + type: object + type: array + replicaSet: + properties: + id: + description: |- + Id can be used together with additionalMongodConfig.replication.replSetName + to manage clusters where replSetName differs from the MongoDBCommunity resource name + type: string + settings: + description: |- + MapWrapper is a wrapper for a map to be used by other structs. + The CRD generator does not support map[string]interface{} + on the top level and hence we need to work around this with + a wrapping struct. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + type: object + featureCompatibilityVersion: + description: |- + FeatureCompatibilityVersion configures the feature compatibility version that will + be set for the deployment + type: string + memberConfig: + description: MemberConfig + items: + properties: + priority: + type: string + tags: + additionalProperties: + type: string + type: object + votes: + type: integer + type: object + type: array + members: + description: Members is the number of members in the replica set + type: integer + prometheus: + description: Prometheus configurations. + properties: + metricsPath: + description: Indicates path to the metrics endpoint. + pattern: ^\/[a-z0-9]+$ + type: string + passwordSecretRef: + description: Name of a Secret containing a HTTP Basic Auth Password. + properties: + key: + description: Key is the key in the secret storing this password. + Defaults to "password" + type: string + name: + description: Name is the name of the secret storing this user's + password + type: string + required: + - name + type: object + port: + description: Port where metrics endpoint will bind to. Defaults + to 9216. + type: integer + tlsSecretKeyRef: + description: |- + Name of a Secret (type kubernetes.io/tls) holding the certificates to use in the + Prometheus endpoint. + properties: + key: + description: Key is the key in the secret storing this password. + Defaults to "password" + type: string + name: + description: Name is the name of the secret storing this user's + password + type: string + required: + - name + type: object + username: + description: HTTP Basic Auth Username for metrics endpoint. + type: string + required: + - passwordSecretRef + - username + type: object + replicaSetHorizons: + description: |- + ReplicaSetHorizons Add this parameter and values if you need your database + to be accessed outside of Kubernetes. This setting allows you to + provide different DNS settings within the Kubernetes cluster and + to the Kubernetes cluster. The Kubernetes Operator uses split horizon + DNS for replica set members. This feature allows communication both + within the Kubernetes cluster and from outside Kubernetes. + items: + additionalProperties: + type: string + type: object + type: array + security: + description: Security configures security features, such as TLS, and + authentication settings for a deployment + properties: + authentication: + properties: + agentCertificateSecretRef: + description: |- + AgentCertificateSecret is a reference to a Secret containing the certificate and the key for the automation agent + The secret needs to have available: + - certificate under key: "tls.crt" + - private key under key: "tls.key" + If additionally, tls.pem is present, then it needs to be equal to the concatenation of tls.crt and tls.key + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + agentMode: + description: AgentMode contains the authentication mode used + by the automation agent. + enum: + - SCRAM + - SCRAM-SHA-256 + - SCRAM-SHA-1 + - X509 + type: string + ignoreUnknownUsers: + default: true + nullable: true + type: boolean + modes: + description: Modes is an array specifying which authentication + methods should be enabled. + items: + enum: + - SCRAM + - SCRAM-SHA-256 + - SCRAM-SHA-1 + - X509 + type: string + type: array + required: + - modes + type: object + roles: + description: User-specified custom MongoDB roles that should be + configured in the deployment. + items: + description: CustomRole defines a custom MongoDB role. + properties: + authenticationRestrictions: + description: The authentication restrictions the server + enforces on the role. + items: + description: |- + AuthenticationRestriction specifies a list of IP addresses and CIDR ranges users + are allowed to connect to or from. + properties: + clientSource: + items: + type: string + type: array + serverAddress: + items: + type: string + type: array + required: + - clientSource + - serverAddress + type: object + type: array + db: + description: The database of the role. + type: string + privileges: + description: The privileges to grant the role. + items: + description: Privilege defines the actions a role is allowed + to perform on a given resource. + properties: + actions: + items: + type: string + type: array + resource: + description: |- + Resource specifies specifies the resources upon which a privilege permits actions. + See https://www.mongodb.com/docs/manual/reference/resource-document for more. + properties: + anyResource: + type: boolean + cluster: + type: boolean + collection: + type: string + db: + type: string + type: object + required: + - actions + - resource + type: object + type: array + role: + description: The name of the role. + type: string + roles: + description: An array of roles from which this role inherits + privileges. + items: + description: Role is the database role this user should + have + properties: + db: + description: DB is the database the role can act on + type: string + name: + description: Name is the name of the role + type: string + required: + - db + - name + type: object + type: array + required: + - db + - privileges + - role + type: object + type: array + tls: + description: TLS configuration for both client-server and server-server + communication + properties: + caCertificateSecretRef: + description: |- + CaCertificateSecret is a reference to a Secret containing the certificate for the CA which signed the server certificates + The certificate is expected to be available under the key "ca.crt" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + caConfigMapRef: + description: |- + CaConfigMap is a reference to a ConfigMap containing the certificate for the CA which signed the server certificates + The certificate is expected to be available under the key "ca.crt" + This field is ignored when CaCertificateSecretRef is configured + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + certificateKeySecretRef: + description: |- + CertificateKeySecret is a reference to a Secret containing a private key and certificate to use for TLS. + The key and cert are expected to be PEM encoded and available at "tls.key" and "tls.crt". + This is the same format used for the standard "kubernetes.io/tls" Secret type, but no specific type is required. + Alternatively, an entry tls.pem, containing the concatenation of cert and key, can be provided. + If all of tls.pem, tls.crt and tls.key are present, the tls.pem one needs to be equal to the concatenation of tls.crt and tls.key + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + enabled: + type: boolean + optional: + description: Optional configures if TLS should be required + or optional for connections + type: boolean + required: + - enabled + type: object + type: object + statefulSet: + description: |- + StatefulSetConfiguration holds the optional custom StatefulSet + that should be merged into the operator created one. + properties: + metadata: + description: StatefulSetMetadataWrapper is a wrapper around Labels + and Annotations + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - spec + type: object + type: + description: Type defines which type of MongoDB deployment the resource + should create + enum: + - ReplicaSet + type: string + users: + description: Users specifies the MongoDB users that should be configured + in your deployment + items: + properties: + additionalConnectionStringConfig: + description: |- + Additional options to be appended to the connection string. + These options apply only to this user and will override any existing options in the resource. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + connectionStringSecretName: + description: |- + ConnectionStringSecretName is the name of the secret object created by the operator which exposes the connection strings for the user. + If provided, this secret must be different for each user in a deployment. + type: string + connectionStringSecretNamespace: + description: ConnectionStringSecretNamespace is the namespace + of the secret object created by the operator which exposes + the connection strings for the user. + type: string + db: + default: admin + description: DB is the database the user is stored in. Defaults + to "admin" + type: string + name: + description: Name is the username of the user + type: string + passwordSecretRef: + description: PasswordSecretRef is a reference to the secret + containing this user's password + properties: + key: + description: Key is the key in the secret storing this password. + Defaults to "password" + type: string + name: + description: Name is the name of the secret storing this + user's password + type: string + required: + - name + type: object + roles: + description: Roles is an array of roles assigned to this user + items: + description: Role is the database role this user should have + properties: + db: + description: DB is the database the role can act on + type: string + name: + description: Name is the name of the role + type: string + required: + - db + - name + type: object + type: array + scramCredentialsSecretName: + description: |- + ScramCredentialsSecretName appended by string "scram-credentials" is the name of the secret object created by the mongoDB operator for storing SCRAM credentials + These secrets names must be different for each user in a deployment. + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + - roles + type: object + type: array + version: + description: Version defines which version of MongoDB will be used + type: string + required: + - security + - type + - users + type: object + status: + description: MongoDBCommunityStatus defines the observed state of MongoDB + properties: + currentMongoDBArbiters: + type: integer + currentMongoDBMembers: + type: integer + currentStatefulSetArbitersReplicas: + type: integer + currentStatefulSetReplicas: + type: integer + message: + type: string + mongoUri: + type: string + phase: + type: string + version: + type: string + required: + - currentMongoDBMembers + - currentStatefulSetReplicas + - mongoUri + - phase + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml new file mode 100644 index 000000000..25eecc05f --- /dev/null +++ b/config/crd/kustomization.yaml @@ -0,0 +1,10 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml new file mode 100644 index 000000000..ec5c150a9 --- /dev/null +++ b/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml new file mode 100644 index 000000000..bd972fd91 --- /dev/null +++ b/config/default/kustomization.yaml @@ -0,0 +1,6 @@ +namePrefix: "" + +resources: + - ../crd + - ../rbac + - ../manager diff --git a/config/local_run/kustomization.yaml b/config/local_run/kustomization.yaml new file mode 100644 index 000000000..0a33b94d2 --- /dev/null +++ b/config/local_run/kustomization.yaml @@ -0,0 +1,6 @@ +# used to run the operator locally +namePrefix: "" + +resources: + - ../crd + - ../rbac diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml new file mode 100644 index 000000000..cb74a8d0e --- /dev/null +++ b/config/manager/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- manager.yaml + +generatorOptions: + disableNameSuffixHash: true + +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: mongodb-kubernetes-operator + newName: quay.io/mongodb/mongodb-kubernetes-operator:0.5.0 diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml new file mode 100644 index 000000000..0705e7eae --- /dev/null +++ b/config/manager/manager.yaml @@ -0,0 +1,74 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + email: support@mongodb.com + labels: + owner: mongodb + name: mongodb-kubernetes-operator +spec: + replicas: 1 + selector: + matchLabels: + name: mongodb-kubernetes-operator + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + name: mongodb-kubernetes-operator + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: name + operator: In + values: + - mongodb-kubernetes-operator + topologyKey: kubernetes.io/hostname + containers: + - command: + - /usr/local/bin/entrypoint + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: mongodb-kubernetes-operator + - name: AGENT_IMAGE + value: quay.io/mongodb/mongodb-agent-ubi:108.0.6.8796-1 + - name: VERSION_UPGRADE_HOOK_IMAGE + value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.10 + - name: READINESS_PROBE_IMAGE + value: quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.23 + - name: MONGODB_IMAGE + value: mongodb-community-server + - name: MONGODB_REPO_URL + value: quay.io/mongodb + image: quay.io/mongodb/mongodb-kubernetes-operator:0.13.0 + imagePullPolicy: Always + name: mongodb-kubernetes-operator + resources: + limits: + cpu: 1100m + memory: 1Gi + requests: + cpu: 500m + memory: 200Mi + securityContext: + readOnlyRootFilesystem: true + runAsUser: 2000 + allowPrivilegeEscalation: false + securityContext: + seccompProfile: + type: RuntimeDefault + serviceAccountName: mongodb-kubernetes-operator diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml new file mode 100644 index 000000000..f1fe88a33 --- /dev/null +++ b/config/rbac/kustomization.yaml @@ -0,0 +1,7 @@ +resources: +- role.yaml +- role_binding.yaml +- service_account.yaml +- service_account_database.yaml +- role_binding_database.yaml +- role_database.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 000000000..6a9c42070 --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: mongodb-kubernetes-operator +rules: +- apiGroups: + - "" + resources: + - pods + - services + - configmaps + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - mongodbcommunity.mongodb.com + resources: + - mongodbcommunity + - mongodbcommunity/status + - mongodbcommunity/spec + - mongodbcommunity/finalizers + verbs: + - get + - patch + - list + - update + - watch diff --git a/deploy/role_binding.yaml b/config/rbac/role_binding.yaml similarity index 100% rename from deploy/role_binding.yaml rename to config/rbac/role_binding.yaml diff --git a/deploy/testrunner/role_binding.yaml b/config/rbac/role_binding_database.yaml similarity index 68% rename from deploy/testrunner/role_binding.yaml rename to config/rbac/role_binding_database.yaml index ab4d3ee22..b02a52db3 100644 --- a/deploy/testrunner/role_binding.yaml +++ b/config/rbac/role_binding_database.yaml @@ -1,11 +1,11 @@ kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: test-runner + name: mongodb-database subjects: - kind: ServiceAccount - name: test-runner + name: mongodb-database roleRef: kind: Role - name: test-runner + name: mongodb-database apiGroup: rbac.authorization.k8s.io diff --git a/config/rbac/role_database.yaml b/config/rbac/role_database.yaml new file mode 100644 index 000000000..eaeef740b --- /dev/null +++ b/config/rbac/role_database.yaml @@ -0,0 +1,19 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-database +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - apiGroups: + - "" + resources: + - pods + verbs: + - patch + - delete + - get diff --git a/deploy/service_account.yaml b/config/rbac/service_account.yaml similarity index 100% rename from deploy/service_account.yaml rename to config/rbac/service_account.yaml diff --git a/deploy/testrunner/service_account.yaml b/config/rbac/service_account_database.yaml similarity index 64% rename from deploy/testrunner/service_account.yaml rename to config/rbac/service_account_database.yaml index f96e4d686..b24ae9d58 100644 --- a/deploy/testrunner/service_account.yaml +++ b/config/rbac/service_account_database.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: test-runner + name: mongodb-database diff --git a/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_custom_volume_cr.yaml b/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_custom_volume_cr.yaml new file mode 100644 index 000000000..89e8dbf7a --- /dev/null +++ b/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_custom_volume_cr.yaml @@ -0,0 +1,49 @@ +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + + statefulSet: + spec: + # Name for the service object created by the operator + serviceName: example-openshift-mongodb-svc + selector: {} + # Specifies a size for the data volume different from the default 10Gi + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: [ "ReadWriteOnce", "ReadWriteMany" ] + resources: + requests: + storage: 50Gi + + template: + # Adds a custom volume to the pods + spec: + volumes: + - name: custom-volume + emptyDir: {} + containers: + - name: mongodb-agent + volumeMounts: + - name: custom-volume + mountPath: /my-custom-version/mount-path diff --git a/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_hostpath.yaml b/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_hostpath.yaml new file mode 100644 index 000000000..45b811f10 --- /dev/null +++ b/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_hostpath.yaml @@ -0,0 +1,245 @@ +# This example deploys a 3 members ReplicaSet with HostPath volumes +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: mdb0 +spec: + members: 3 + security: + authentication: + modes: + - SCRAM + statefulSet: + spec: + template: + spec: + # Hostpath volumes are owned by root + # but MongoDB containers run as non root + # so we use an init container to change the owner of + # the directory (init containers run as root) + initContainers: + - command: + - chown + - -R + - "2000" + - /data + image: busybox + volumeMounts: + - mountPath: /data + name: data-volume + securityContext: + runAsNonRoot: false + runAsUser: 0 + runAsGroup: 0 + name: change-dir-permissions + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8G + selector: + matchLabels: + # We set this labels when creating the volume + # (see below) + type: data + storageClassName: default + - metadata: + name: logs-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8G + selector: + matchLabels: + type: logs + storageClassName: default + type: ReplicaSet + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + version: 6.0.5 +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: +--- +# Here we create 6 PVs: two for each ReplicaSet member +# (one for data, one for logs) +apiVersion: v1 +items: +- apiVersion: v1 + kind: PersistentVolume + metadata: + labels: + type: data + name: data-volume-0 + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 8G + hostPath: + path: /opt/data/mongo-data-0 + type: "" + nodeAffinity: + required: + # This is just an example for matchexpression + # This field is required depends on the specific + # of the environment the resource is deployed in + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + persistentVolumeReclaimPolicy: Retain + storageClassName: default + volumeMode: Filesystem +- apiVersion: v1 + kind: PersistentVolume + metadata: + labels: + type: data + name: data-volume-1 + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 8G + hostPath: + path: /opt/data/mongo-data-1 + type: "" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + persistentVolumeReclaimPolicy: Retain + storageClassName: default + volumeMode: Filesystem + status: + phase: Available +- apiVersion: v1 + kind: PersistentVolume + metadata: + labels: + type: data + name: data-volume-2 + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 8G + hostPath: + path: /opt/data/mongo-data-2 + type: "" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + persistentVolumeReclaimPolicy: Retain + storageClassName: default + volumeMode: Filesystem +- apiVersion: v1 + kind: PersistentVolume + metadata: + labels: + type: logs + name: logs-volume-0 + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 8G + hostPath: + path: /opt/data/mongo-logs-0 + type: "" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + persistentVolumeReclaimPolicy: Retain + storageClassName: default + volumeMode: Filesystem +- apiVersion: v1 + kind: PersistentVolume + metadata: + labels: + type: logs + name: logs-volume-1 + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 8G + hostPath: + path: /opt/data/mongo-logs-1 + type: "" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + persistentVolumeReclaimPolicy: Retain + storageClassName: default + volumeMode: Filesystem +- apiVersion: v1 + kind: PersistentVolume + metadata: + labels: + type: logs + name: logs-volume-2 + + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 8G + hostPath: + path: /opt/data/mongo-logs-2 + type: "" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + persistentVolumeReclaimPolicy: Retain + storageClassName: default + volumeMode: Filesystem +kind: List +--- diff --git a/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_metadata.yaml b/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_metadata.yaml new file mode 100644 index 000000000..91227aa24 --- /dev/null +++ b/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_metadata.yaml @@ -0,0 +1,59 @@ +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: mdb0 +spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: [ "SCRAM" ] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + storage.wiredTiger.engineConfig.journalCompressor: zlib + + statefulSet: + metadata: + annotations: + statefulSetAnnotationTest: testValue + labels: + statefulSetLabelTest: testValue + spec: + selector: + matchLabels: + podTemplateLabelTest: testValue + + template: + metadata: + annotations: + podTemplateAnnotationTest: testValue + labels: + podTemplateLabelTest: testValue + + volumeClaimTemplates: + - metadata: + name: data-volume + annotations: + pvcTemplateAnnotationTest: testValue + labels: + pvcTemplateLabelTest: testValue + +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/external_access/agent-certificate.yaml b/config/samples/external_access/agent-certificate.yaml new file mode 100644 index 000000000..c47c82a55 --- /dev/null +++ b/config/samples/external_access/agent-certificate.yaml @@ -0,0 +1,28 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: agent-certs +spec: + commonName: mms-automation-agent + dnsNames: + - automation + duration: 240h0m0s + issuerRef: # should point to your issuer + name: ca-issuer + renewBefore: 120h0m0s + secretName: agent-certs # should be equal to agentCertificateSecretRef from the MDBC resource + subject: + countries: + - US + localities: + - NY + organizationalUnits: + - a-1635241837-m5yb81lfnrz + organizations: + - cluster.local-agent + provinces: + - NY + usages: + - digital signature + - key encipherment + - client auth \ No newline at end of file diff --git a/config/samples/external_access/cert-manager-certificate.yaml b/config/samples/external_access/cert-manager-certificate.yaml new file mode 100644 index 000000000..6551bcda3 --- /dev/null +++ b/config/samples/external_access/cert-manager-certificate.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: cert-manager.io/v1alpha2 +kind: Certificate +metadata: + name: cert-manager-certificate +spec: + secretName: mongodb-tls + issuerRef: + name: ca-issuer + kind: Issuer + commonName: "*.-svc..svc.cluster.local" + dnsNames: + - "*.-svc..svc.cluster.local" + - + - + - diff --git a/config/samples/external_access/cert-manager-issuer.yaml b/config/samples/external_access/cert-manager-issuer.yaml new file mode 100644 index 000000000..578c343b0 --- /dev/null +++ b/config/samples/external_access/cert-manager-issuer.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + name: ca-issuer +spec: + ca: + secretName: ca-key-pair diff --git a/config/samples/external_access/cert-x509.yaml b/config/samples/external_access/cert-x509.yaml new file mode 100644 index 000000000..0f2eb0906 --- /dev/null +++ b/config/samples/external_access/cert-x509.yaml @@ -0,0 +1,20 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: x509-user-cert + spec: + commonName: my-x509-authenticated-user + duration: 240h0m0s + issuerRef: + name: ca-issuer + renewBefore: 120h0m0s + secretName: x509-client-cert + subject: + organizationalUnits: + - organizationalunit + organizations: + - organization + usages: + - digital signature + - client auth + \ No newline at end of file diff --git a/config/samples/external_access/external_services.yaml b/config/samples/external_access/external_services.yaml new file mode 100644 index 000000000..b14f1a673 --- /dev/null +++ b/config/samples/external_access/external_services.yaml @@ -0,0 +1,53 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: external-mongo-service-0 + annotations: + kube-linter.io/ignore-all: "used for sample" +spec: + type: NodePort + selector: + app: -svc + statefulset.kubernetes.io/pod-name: -0 + ports: + - protocol: TCP + nodePort: 31181 + port: 31181 + targetPort: 27017 + + +--- +kind: Service +apiVersion: v1 +metadata: + name: external-mongo-service-1 + annotations: + kube-linter.io/ignore-all: "used for sample" +spec: + type: NodePort + selector: + app: -svc + statefulset.kubernetes.io/pod-name: -1 + ports: + - nodePort: 31182 + port: 31182 + targetPort: 27017 + + +--- +kind: Service +apiVersion: v1 +metadata: + name: external-mongo-service-2 + annotations: + kube-linter.io/ignore-all: "used for sample" +spec: + type: NodePort + selector: + app: -svc + statefulset.kubernetes.io/pod-name: -2 + ports: + - nodePort: 31183 + port: 31183 + targetPort: 27017 diff --git a/config/samples/external_access/mongodb.com_v1_mongodbcommunity_cr.yaml b/config/samples/external_access/mongodb.com_v1_mongodbcommunity_cr.yaml new file mode 100644 index 000000000..4ca651ee4 --- /dev/null +++ b/config/samples/external_access/mongodb.com_v1_mongodbcommunity_cr.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + replicaSetHorizons: + - horizon: :31181 + - horizon: :31182 + - horizon: :31183 + security: + tls: + enabled: true + certificateKeySecretRef: + name: mongodb-tls + caConfigMapRef: + name: ca-config-map + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: + diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_additional_connection_string_options.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_additional_connection_string_options.yaml new file mode 100644 index 000000000..9023cdcab --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_additional_connection_string_options.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalConnectionStringConfig: + readPreference: secondary + additionalMongodConfig: + storage.wiredTiger.engineConfig.journalCompressor: zlib + additionalConnectionStringConfig: + readPreference: primary + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_additional_mongod_config_cr.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_additional_mongod_config_cr.yaml new file mode 100644 index 000000000..c6b21f546 --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_additional_mongod_config_cr.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + # the additional config passed to the mongod process can be specified + # either in nested or dot notation + storage.wiredTiger.engineConfig.journalCompressor: zlib + net: + port: 40333 + + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_connection_string_secret_namespace.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_connection_string_secret_namespace.yaml new file mode 100644 index 000000000..47e55aaae --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_connection_string_secret_namespace.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + connectionStringSecretNamespace: other-namespace + additionalMongodConfig: + storage.wiredTiger.engineConfig.journalCompressor: zlib + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_cr.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_cr.yaml new file mode 100644 index 000000000..89fe86096 --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_cr.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + storage.wiredTiger.engineConfig.journalCompressor: zlib + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_cr_podantiaffinity.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_cr_podantiaffinity.yaml new file mode 100644 index 000000000..8d7a274a4 --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_cr_podantiaffinity.yaml @@ -0,0 +1,60 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + statefulSet: +# NOTE: Overwriting the "app" labelSelectors via the sts wrapper is not supported since this labelselector is not +# getting propagated to the service. You can add others like defined below + spec: + selector: + matchLabels: + app.kubernetes.io/name: mongodb + template: + metadata: + # label the pod which is used by the "labelSelector" in podAntiAffinty + # you can label it witch some other labels as well -- make sure it change the podAntiAffinity labelselector accordingly + labels: + app.kubernetes.io/name: mongodb + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - mongodb + topologyKey: kubernetes.io/hostname + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_custom_role.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_custom_role.yaml new file mode 100644 index 000000000..4d89bfd9a --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_custom_role.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: custom-role-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + roles: # custom roles are defined here + - role: testRole + db: admin + privileges: + - resource: + db: "test" + collection: "" # an empty string indicates any collection + actions: + - find + roles: [] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + - name: testRole # apply the custom role to the user + db: admin + scramCredentialsSecretName: my-scram + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_disabled_process_cr.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_disabled_process_cr.yaml new file mode 100644 index 000000000..bb8788c36 --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_disabled_process_cr.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + automationConfig: + processes: + - name: example-mongodb-1 + disabled: true + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_ignore_unkown_users_cr.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_ignore_unkown_users_cr.yaml new file mode 100644 index 000000000..b131f95df --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_ignore_unkown_users_cr.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: ignore-unkown-users-example +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + ignoreUnknownUsers: true # users can be added to the deployment through other sources. (not through the CRD) and will not be removed by the agent. + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + storage.wiredTiger.engineConfig.journalCompressor: zlib + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_openshift_cr.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_openshift_cr.yaml new file mode 100644 index 000000000..3310c67fc --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_openshift_cr.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-openshift-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + statefulSet: + spec: + serviceName: example-openshift-mongodb-svc + selector: {} + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_override_ac_setting.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_override_ac_setting.yaml new file mode 100644 index 000000000..0a8a1566a --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_override_ac_setting.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + # to override ReplicaSet Configuration settings: + # https://www.mongodb.com/docs/manual/reference/replica-configuration/#replica-set-configuration-document-example + automationConfig: + replicaSet: + settings: + electionTimeoutMillis: 20 + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + storage.wiredTiger.engineConfig.journalCompressor: zlib + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_prometheus.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_prometheus.yaml new file mode 100644 index 000000000..d813ce0cf --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_prometheus.yaml @@ -0,0 +1,66 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-prometheus +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + + # You can expose metrics for Prometheus polling using the + # `prometheus` entry. + prometheus: + # Metrics endpoint HTTP Basic Auth username + username: + + # Metrics endpoint HTTP Basic Auth password + passwordSecretRef: + name: metrics-endpoint-password + + # Optional, defaults to `/metrics` + # metricsPath: /metrics + + # Optional defaults to 9216 + # port: 9216 + + # Prometheus endpoint can be configured to use HTTPS + # tlsSecretKeyRef: + # name: "" + + security: + authentication: + modes: ["SCRAM"] + + users: + - name: my-user + db: admin + passwordSecretRef: + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: + +# Secret holding the prometheus metrics endpoint HTTP Password. +--- +apiVersion: v1 +kind: Secret +metadata: + name: metrics-endpoint-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_readiness_probe_values.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_readiness_probe_values.yaml new file mode 100644 index 000000000..e07a434e1 --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_readiness_probe_values.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: mongodb-specify-readiness-probe-values +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + statefulSet: + spec: + template: + spec: + containers: + - name: mongodb-agent + readinessProbe: + failureThreshold: 50 + initialDelaySeconds: 10 + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_specify_pod_resources.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_specify_pod_resources.yaml new file mode 100644 index 000000000..84f8e66af --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_specify_pod_resources.yaml @@ -0,0 +1,65 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: mongodb-specify-pod-resources +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + statefulSet: + spec: + template: + spec: + # resources can be specified by applying an override + # per container name. + containers: + - name: mongod + resources: + limits: + cpu: "0.2" + memory: 250M + requests: + cpu: "0.2" + memory: 200M + - name: mongodb-agent + resources: + limits: + cpu: "0.2" + memory: 250M + requests: + cpu: "0.2" + memory: 200M + initContainers: + - name: mongodb-agent-readinessprobe + resources: + limits: + cpu: "2" + memory: 200M + requests: + cpu: "1" + memory: 100M +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_tls_cr.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_tls_cr.yaml new file mode 100644 index 000000000..d4fbf7bc9 --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_tls_cr.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + tls: + enabled: true + certificateKeySecretRef: + name: tls-secret-name + caConfigMapRef: + name: tls-ca-configmap-name + users: + - name: my-user + db: admin + passwordSecretRef: + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_x509.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_x509.yaml new file mode 100644 index 000000000..ad4a99c2a --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_x509.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + tls: + enabled: true + certificateKeySecretRef: + name: mongodb-tls + caConfigMapRef: + name: ca-issuer + authentication: + modes: ["X509", "SCRAM"] + agentMode: "X509" + agentCertificateSecretRef: + name: my-agent-certificate + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + - db: admin + name: readWriteAnyDatabase + scramCredentialsSecretName: my-scram + - name: "CN=my-x509-authenticated-user,OU=organizationalunit,O=organization" + db: "$external" + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + - db: admin + name: readWriteAnyDatabase + + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: password + diff --git a/controllers/construct/build_statefulset_test.go b/controllers/construct/build_statefulset_test.go new file mode 100644 index 000000000..791fa5a8b --- /dev/null +++ b/controllers/construct/build_statefulset_test.go @@ -0,0 +1,178 @@ +package construct + +import ( + "reflect" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/podtemplatespec" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/resourcerequirements" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" + + corev1 "k8s.io/api/core/v1" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/probes" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func newTestReplicaSet() mdbv1.MongoDBCommunity { + return mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-rs", + Namespace: "my-ns", + Annotations: map[string]string{}, + }, + Spec: mdbv1.MongoDBCommunitySpec{ + Members: 3, + Version: "6.0.5", + }, + } +} + +func TestMultipleCalls_DoNotCauseSideEffects(t *testing.T) { + mdb := newTestReplicaSet() + stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, "fake-mongodbImage", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage", true) + sts := &appsv1.StatefulSet{} + + t.Run("1st Call", func(t *testing.T) { + stsFunc(sts) + assertStatefulSetIsBuiltCorrectly(t, mdb, sts) + }) + t.Run("2nd Call", func(t *testing.T) { + stsFunc(sts) + assertStatefulSetIsBuiltCorrectly(t, mdb, sts) + }) + t.Run("3rd Call", func(t *testing.T) { + stsFunc(sts) + assertStatefulSetIsBuiltCorrectly(t, mdb, sts) + }) +} + +func TestManagedSecurityContext(t *testing.T) { + t.Setenv(podtemplatespec.ManagedSecurityContextEnv, "true") + + mdb := newTestReplicaSet() + stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, "fake-mongodbImage", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage", true) + + sts := &appsv1.StatefulSet{} + stsFunc(sts) + + assertStatefulSetIsBuiltCorrectly(t, mdb, sts) +} + +func TestMongod_Container(t *testing.T) { + const mongodbImageMock = "fake-mongodbImage" + c := container.New(mongodbContainer(mongodbImageMock, []corev1.VolumeMount{}, mdbv1.NewMongodConfiguration())) + + t.Run("Has correct Env vars", func(t *testing.T) { + assert.Len(t, c.Env, 1) + assert.Equal(t, agentHealthStatusFilePathEnv, c.Env[0].Name) + assert.Equal(t, "/healthstatus/agent-health-status.json", c.Env[0].Value) + }) + + t.Run("Image is correct", func(t *testing.T) { + assert.Equal(t, mongodbImageMock, c.Image) + }) + + t.Run("Resource requirements are correct", func(t *testing.T) { + assert.Equal(t, resourcerequirements.Defaults(), c.Resources) + }) +} + +func TestMongoDBAgentCommand(t *testing.T) { + cmd := AutomationAgentCommand(false, mdbv1.LogLevelInfo, "testfile", 24) + baseCmd := MongodbUserCommand + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + assert.Len(t, cmd, 3) + assert.Equal(t, cmd[0], "/bin/bash") + assert.Equal(t, cmd[1], "-c") + assert.Equal(t, cmd[2], baseCmd+" -logFile testfile -logLevel INFO -maxLogFileDurationHrs 24") + + cmd = AutomationAgentCommand(false, mdbv1.LogLevelInfo, "/dev/stdout", 24) + assert.Len(t, cmd, 3) + assert.Equal(t, cmd[0], "/bin/bash") + assert.Equal(t, cmd[1], "-c") + assert.Equal(t, cmd[2], baseCmd+" -logLevel INFO") +} + +func assertStatefulSetIsBuiltCorrectly(t *testing.T, mdb mdbv1.MongoDBCommunity, sts *appsv1.StatefulSet) { + assert.Len(t, sts.Spec.Template.Spec.Containers, 2) + assert.Len(t, sts.Spec.Template.Spec.InitContainers, 2) + assert.Equal(t, mdb.ServiceName(), sts.Spec.ServiceName) + assert.Equal(t, mdb.Name, sts.Name) + assert.Equal(t, mdb.Namespace, sts.Namespace) + assert.Equal(t, mongodbDatabaseServiceAccountName, sts.Spec.Template.Spec.ServiceAccountName) + assert.Len(t, sts.Spec.Template.Spec.Containers[0].Env, 4) + assert.Len(t, sts.Spec.Template.Spec.Containers[1].Env, 1) + + managedSecurityContext := envvar.ReadBool(podtemplatespec.ManagedSecurityContextEnv) // nolint:forbidigo + if !managedSecurityContext { + assert.NotNil(t, sts.Spec.Template.Spec.SecurityContext) + assert.Equal(t, podtemplatespec.DefaultPodSecurityContext(), *sts.Spec.Template.Spec.SecurityContext) + } else { + assert.Nil(t, sts.Spec.Template.Spec.SecurityContext) + } + + agentContainer := sts.Spec.Template.Spec.Containers[0] + assert.Equal(t, "fake-agentImage", agentContainer.Image) + probe := agentContainer.ReadinessProbe + assert.True(t, reflect.DeepEqual(probes.New(DefaultReadiness()), *probe)) + assert.Equal(t, probes.New(DefaultReadiness()).FailureThreshold, probe.FailureThreshold) + assert.Len(t, agentContainer.VolumeMounts, 7) + assert.NotNil(t, agentContainer.ReadinessProbe) + if !managedSecurityContext { + assert.NotNil(t, sts.Spec.Template.Spec.Containers[0].SecurityContext) + assert.Equal(t, container.DefaultSecurityContext(), *sts.Spec.Template.Spec.Containers[0].SecurityContext) + } else { + assert.Nil(t, agentContainer.SecurityContext) + } + + assertContainsVolumeMountWithName(t, agentContainer.VolumeMounts, "agent-scripts") + assertContainsVolumeMountWithName(t, agentContainer.VolumeMounts, "automation-config") + assertContainsVolumeMountWithName(t, agentContainer.VolumeMounts, "data-volume") + assertContainsVolumeMountWithName(t, agentContainer.VolumeMounts, "healthstatus") + assertContainsVolumeMountWithName(t, agentContainer.VolumeMounts, "logs-volume") + assertContainsVolumeMountWithName(t, agentContainer.VolumeMounts, "my-rs-keyfile") + + mongodContainer := sts.Spec.Template.Spec.Containers[1] + assert.Equal(t, "fake-mongodbImage", mongodContainer.Image) + assert.Len(t, mongodContainer.VolumeMounts, 6) + if !managedSecurityContext { + assert.NotNil(t, sts.Spec.Template.Spec.Containers[1].SecurityContext) + assert.Equal(t, container.DefaultSecurityContext(), *sts.Spec.Template.Spec.Containers[1].SecurityContext) + } else { + assert.Nil(t, agentContainer.SecurityContext) + } + + assertContainsVolumeMountWithName(t, mongodContainer.VolumeMounts, "data-volume") + assertContainsVolumeMountWithName(t, mongodContainer.VolumeMounts, "healthstatus") + assertContainsVolumeMountWithName(t, mongodContainer.VolumeMounts, "hooks") + assertContainsVolumeMountWithName(t, mongodContainer.VolumeMounts, "logs-volume") + assertContainsVolumeMountWithName(t, mongodContainer.VolumeMounts, "my-rs-keyfile") + + initContainer := sts.Spec.Template.Spec.InitContainers[0] + assert.Equal(t, versionUpgradeHookName, initContainer.Name) + assert.Equal(t, "fake-versionUpgradeHookImage", initContainer.Image) + assert.Len(t, initContainer.VolumeMounts, 1) + if !managedSecurityContext { + assert.NotNil(t, sts.Spec.Template.Spec.InitContainers[0].SecurityContext) + assert.Equal(t, container.DefaultSecurityContext(), *sts.Spec.Template.Spec.InitContainers[0].SecurityContext) + } else { + assert.Nil(t, agentContainer.SecurityContext) + } +} + +func assertContainsVolumeMountWithName(t *testing.T, mounts []corev1.VolumeMount, name string) { + found := false + for _, m := range mounts { + if m.Name == name { + found = true + break + } + } + assert.True(t, found, "Mounts should have contained a mount with name %s, but didn't. Actual mounts: %v", name, mounts) +} diff --git a/controllers/construct/mongodbstatefulset.go b/controllers/construct/mongodbstatefulset.go new file mode 100644 index 000000000..ec94a6eac --- /dev/null +++ b/controllers/construct/mongodbstatefulset.go @@ -0,0 +1,436 @@ +package construct + +import ( + "fmt" + "os" + "strconv" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/persistentvolumeclaim" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/podtemplatespec" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/probes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/resourcerequirements" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/types" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + corev1 "k8s.io/api/core/v1" +) + +var ( + OfficialMongodbRepoUrls = []string{"docker.io/mongodb", "quay.io/mongodb"} +) + +// Environment variables used to configure the MongoDB StatefulSet. +const ( + MongodbRepoUrlEnv = "MONGODB_REPO_URL" + MongodbImageEnv = "MONGODB_IMAGE" + MongoDBImageTypeEnv = "MDB_IMAGE_TYPE" + AgentImageEnv = "AGENT_IMAGE" + VersionUpgradeHookImageEnv = "VERSION_UPGRADE_HOOK_IMAGE" + ReadinessProbeImageEnv = "READINESS_PROBE_IMAGE" +) + +const ( + AgentName = "mongodb-agent" + MongodbName = "mongod" + + DefaultImageType = "ubi8" + + versionUpgradeHookName = "mongod-posthook" + ReadinessProbeContainerName = "mongodb-agent-readinessprobe" + readinessProbePath = "/opt/scripts/readinessprobe" + agentHealthStatusFilePathEnv = "AGENT_STATUS_FILEPATH" + clusterFilePath = "/var/lib/automation/config/cluster-config.json" + mongodbDatabaseServiceAccountName = "mongodb-database" + agentHealthStatusFilePathValue = "/var/log/mongodb-mms-automation/healthstatus/agent-health-status.json" + + OfficialMongodbEnterpriseServerImageName = "mongodb-enterprise-server" + + headlessAgentEnv = "HEADLESS_AGENT" + podNamespaceEnv = "POD_NAMESPACE" + automationConfigEnv = "AUTOMATION_CONFIG_MAP" + MongoDBAssumeEnterpriseEnv = "MDB_ASSUME_ENTERPRISE" + + automationMongodConfFileName = "automation-mongod.conf" + keyfileFilePath = "/var/lib/mongodb-mms-automation/authentication/keyfile" + + automationAgentOptions = " -skipMongoStart -noDaemonize -useLocalMongoDbTools" + + MongodbUserCommand = `current_uid=$(id -u) +declare -r current_uid +if ! grep -q "${current_uid}" /etc/passwd ; then +sed -e "s/^mongodb:/builder:/" /etc/passwd > /tmp/passwd +echo "mongodb:x:$(id -u):$(id -g):,,,:/:/bin/bash" >> /tmp/passwd +export NSS_WRAPPER_PASSWD=/tmp/passwd +export LD_PRELOAD=libnss_wrapper.so +export NSS_WRAPPER_GROUP=/etc/group +fi +` + //nolint:gosec //The credentials path is hardcoded in the container. + MongodbUserCommandWithAPIKeyExport = `current_uid=$(id -u) +AGENT_API_KEY="$(cat /mongodb-automation/agent-api-key/agentApiKey)" +declare -r current_uid +if ! grep -q "${current_uid}" /etc/passwd ; then +sed -e "s/^mongodb:/builder:/" /etc/passwd > /tmp/passwd +echo "mongodb:x:$(id -u):$(id -g):,,,:/:/bin/bash" >> /tmp/passwd +export NSS_WRAPPER_PASSWD=/tmp/passwd +export LD_PRELOAD=libnss_wrapper.so +export NSS_WRAPPER_GROUP=/etc/group +fi +` +) + +// MongoDBStatefulSetOwner is an interface which any resource which generates a MongoDB StatefulSet should implement. +type MongoDBStatefulSetOwner interface { + // ServiceName returns the name of the K8S service the operator will create. + ServiceName() string + // GetName returns the name of the resource. + GetName() string + // GetNamespace returns the namespace the resource is defined in. + GetNamespace() string + // GetMongoDBVersion returns the version of MongoDB to be used for this resource. + GetMongoDBVersion() string + // AutomationConfigSecretName returns the name of the secret which will contain the automation config. + AutomationConfigSecretName() string + // GetUpdateStrategyType returns the UpdateStrategyType of the statefulset. + GetUpdateStrategyType() appsv1.StatefulSetUpdateStrategyType + // HasSeparateDataAndLogsVolumes returns whether or not the volumes for data and logs would need to be different. + HasSeparateDataAndLogsVolumes() bool + // GetAgentKeyfileSecretNamespacedName returns the NamespacedName of the secret which stores the keyfile for the agent. + GetAgentKeyfileSecretNamespacedName() types.NamespacedName + // DataVolumeName returns the name that the data volume should have. + DataVolumeName() string + // LogsVolumeName returns the name that the data volume should have. + LogsVolumeName() string + // GetAgentLogLevel returns the log level for the MongoDB automation agent. + GetAgentLogLevel() mdbv1.LogLevel + // GetAgentLogFile returns the log file for the MongoDB automation agent. + GetAgentLogFile() string + // GetAgentMaxLogFileDurationHours returns the number of hours after which the log file should be rolled. + GetAgentMaxLogFileDurationHours() int + + // GetMongodConfiguration returns the MongoDB configuration for each member. + GetMongodConfiguration() mdbv1.MongodConfiguration + + // NeedsAutomationConfigVolume returns whether the statefulset needs to have a volume for the automationconfig. + NeedsAutomationConfigVolume() bool +} + +// BuildMongoDBReplicaSetStatefulSetModificationFunction builds the parts of the replica set that are common between every resource that implements +// MongoDBStatefulSetOwner. +// It doesn't configure TLS or additional containers/env vars that the statefulset might need. +func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSetOwner, scaler scale.ReplicaSetScaler, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage string, withInitContainers bool) statefulset.Modification { + labels := map[string]string{ + "app": mdb.ServiceName(), + } + + // the health status volume is required in both agent and mongod pods. + // the mongod requires it to determine if an upgrade is happening and needs to kill the pod + // to prevent agent deadlock + healthStatusVolume := statefulset.CreateVolumeFromEmptyDir("healthstatus") + agentHealthStatusVolumeMount := statefulset.CreateVolumeMount(healthStatusVolume.Name, "/var/log/mongodb-mms-automation/healthstatus") + mongodHealthStatusVolumeMount := statefulset.CreateVolumeMount(healthStatusVolume.Name, "/healthstatus") + + hooksVolume := corev1.Volume{} + scriptsVolume := corev1.Volume{} + upgradeInitContainer := podtemplatespec.NOOP() + readinessInitContainer := podtemplatespec.NOOP() + + // tmp volume is required by the mongodb-agent and mongod + tmpVolume := statefulset.CreateVolumeFromEmptyDir("tmp") + tmpVolumeMount := statefulset.CreateVolumeMount(tmpVolume.Name, "/tmp", statefulset.WithReadOnly(false)) + + keyFileNsName := mdb.GetAgentKeyfileSecretNamespacedName() + keyFileVolume := statefulset.CreateVolumeFromEmptyDir(keyFileNsName.Name) + keyFileVolumeVolumeMount := statefulset.CreateVolumeMount(keyFileVolume.Name, "/var/lib/mongodb-mms-automation/authentication", statefulset.WithReadOnly(false)) + keyFileVolumeVolumeMountMongod := statefulset.CreateVolumeMount(keyFileVolume.Name, "/var/lib/mongodb-mms-automation/authentication", statefulset.WithReadOnly(false)) + + mongodbAgentVolumeMounts := []corev1.VolumeMount{agentHealthStatusVolumeMount, keyFileVolumeVolumeMount, tmpVolumeMount} + + automationConfigVolumeFunc := podtemplatespec.NOOP() + if mdb.NeedsAutomationConfigVolume() { + automationConfigVolume := statefulset.CreateVolumeFromSecret("automation-config", mdb.AutomationConfigSecretName()) + automationConfigVolumeFunc = podtemplatespec.WithVolume(automationConfigVolume) + automationConfigVolumeMount := statefulset.CreateVolumeMount(automationConfigVolume.Name, "/var/lib/automation/config", statefulset.WithReadOnly(true)) + mongodbAgentVolumeMounts = append(mongodbAgentVolumeMounts, automationConfigVolumeMount) + } + mongodVolumeMounts := []corev1.VolumeMount{mongodHealthStatusVolumeMount, keyFileVolumeVolumeMountMongod, tmpVolumeMount} + + hooksVolumeMod := podtemplatespec.NOOP() + scriptsVolumeMod := podtemplatespec.NOOP() + + // This is temporary code; + // once we make the operator fully deploy static workloads, we will remove those init containers. + if withInitContainers { + // hooks volume is only required on the mongod pod. + hooksVolume = statefulset.CreateVolumeFromEmptyDir("hooks") + hooksVolumeMount := statefulset.CreateVolumeMount(hooksVolume.Name, "/hooks", statefulset.WithReadOnly(false)) + + // scripts volume is only required on the mongodb-agent pod. + scriptsVolume = statefulset.CreateVolumeFromEmptyDir("agent-scripts") + scriptsVolumeMount := statefulset.CreateVolumeMount(scriptsVolume.Name, "/opt/scripts", statefulset.WithReadOnly(false)) + + upgradeInitContainer = podtemplatespec.WithInitContainer(versionUpgradeHookName, versionUpgradeHookInit([]corev1.VolumeMount{hooksVolumeMount}, versionUpgradeHookImage)) + readinessInitContainer = podtemplatespec.WithInitContainer(ReadinessProbeContainerName, readinessProbeInit([]corev1.VolumeMount{scriptsVolumeMount}, readinessProbeImage)) + scriptsVolumeMod = podtemplatespec.WithVolume(scriptsVolume) + hooksVolumeMod = podtemplatespec.WithVolume(hooksVolume) + + mongodVolumeMounts = append(mongodVolumeMounts, hooksVolumeMount) + mongodbAgentVolumeMounts = append(mongodbAgentVolumeMounts, scriptsVolumeMount) + } + + dataVolumeClaim := statefulset.NOOP() + logVolumeClaim := statefulset.NOOP() + singleModeVolumeClaim := func(s *appsv1.StatefulSet) {} + if mdb.HasSeparateDataAndLogsVolumes() { + logVolumeMount := statefulset.CreateVolumeMount(mdb.LogsVolumeName(), automationconfig.DefaultAgentLogPath) + dataVolumeMount := statefulset.CreateVolumeMount(mdb.DataVolumeName(), mdb.GetMongodConfiguration().GetDBDataDir()) + dataVolumeClaim = statefulset.WithVolumeClaim(mdb.DataVolumeName(), dataPvc(mdb.DataVolumeName())) + logVolumeClaim = statefulset.WithVolumeClaim(mdb.LogsVolumeName(), logsPvc(mdb.LogsVolumeName())) + mongodbAgentVolumeMounts = append(mongodbAgentVolumeMounts, dataVolumeMount, logVolumeMount) + mongodVolumeMounts = append(mongodVolumeMounts, dataVolumeMount, logVolumeMount) + } else { + mounts := []corev1.VolumeMount{ + statefulset.CreateVolumeMount(mdb.DataVolumeName(), mdb.GetMongodConfiguration().GetDBDataDir(), statefulset.WithSubPath("data")), + statefulset.CreateVolumeMount(mdb.DataVolumeName(), automationconfig.DefaultAgentLogPath, statefulset.WithSubPath("logs")), + } + mongodbAgentVolumeMounts = append(mongodbAgentVolumeMounts, mounts...) + mongodVolumeMounts = append(mongodVolumeMounts, mounts...) + singleModeVolumeClaim = statefulset.WithVolumeClaim(mdb.DataVolumeName(), dataPvc(mdb.DataVolumeName())) + } + + podSecurityContext, _ := podtemplatespec.WithDefaultSecurityContextsModifications() + + agentLogLevel := mdbv1.LogLevelInfo + if mdb.GetAgentLogLevel() != "" { + agentLogLevel = mdb.GetAgentLogLevel() + } + + agentLogFile := automationconfig.DefaultAgentLogFile + if mdb.GetAgentLogFile() != "" { + agentLogFile = mdb.GetAgentLogFile() + } + + agentMaxLogFileDurationHours := automationconfig.DefaultAgentMaxLogFileDurationHours + if mdb.GetAgentMaxLogFileDurationHours() != 0 { + agentMaxLogFileDurationHours = mdb.GetAgentMaxLogFileDurationHours() + } + + return statefulset.Apply( + statefulset.WithName(mdb.GetName()), + statefulset.WithNamespace(mdb.GetNamespace()), + statefulset.WithServiceName(mdb.ServiceName()), + statefulset.WithLabels(labels), + statefulset.WithMatchLabels(labels), + statefulset.WithReplicas(scale.ReplicasThisReconciliation(scaler)), + statefulset.WithUpdateStrategyType(mdb.GetUpdateStrategyType()), + dataVolumeClaim, + logVolumeClaim, + singleModeVolumeClaim, + statefulset.WithPodSpecTemplate( + podtemplatespec.Apply( + podSecurityContext, + podtemplatespec.WithPodLabels(labels), + podtemplatespec.WithVolume(healthStatusVolume), + automationConfigVolumeFunc, + hooksVolumeMod, + scriptsVolumeMod, + podtemplatespec.WithVolume(tmpVolume), + podtemplatespec.WithVolume(keyFileVolume), + podtemplatespec.WithServiceAccount(mongodbDatabaseServiceAccountName), + podtemplatespec.WithContainer(AgentName, mongodbAgentContainer(mdb.AutomationConfigSecretName(), mongodbAgentVolumeMounts, agentLogLevel, agentLogFile, agentMaxLogFileDurationHours, agentImage)), + podtemplatespec.WithContainer(MongodbName, mongodbContainer(mongodbImage, mongodVolumeMounts, mdb.GetMongodConfiguration())), + upgradeInitContainer, + readinessInitContainer, + ), + )) +} + +func BaseAgentCommand() string { + return "agent/mongodb-agent -healthCheckFilePath=" + agentHealthStatusFilePathValue + " -serveStatusPort=5000" +} + +// AutomationAgentCommand withAgentAPIKeyExport detects whether we want to deploy this agent with the agent api key exported +// it can be used to register the agent with OM. +func AutomationAgentCommand(withAgentAPIKeyExport bool, logLevel mdbv1.LogLevel, logFile string, maxLogFileDurationHours int) []string { + // This is somewhat undocumented at https://www.mongodb.com/docs/ops-manager/current/reference/mongodb-agent-settings/ + // Not setting the -logFile option make the mongodb-agent log to stdout. Setting -logFile /dev/stdout will result in + // an error by the agent trying to open /dev/stdout-verbose and still trying to do log rotation. + // To keep consistent with old behavior not setting the logFile in the config does not log to stdout but keeps + // the default logFile as defined by DefaultAgentLogFile. Setting the logFile explictly to "/dev/stdout" will log to stdout. + agentLogOptions := "" + if logFile == "/dev/stdout" { + agentLogOptions += " -logLevel " + string(logLevel) + } else { + agentLogOptions += " -logFile " + logFile + " -logLevel " + string(logLevel) + " -maxLogFileDurationHrs " + strconv.Itoa(maxLogFileDurationHours) + } + + if withAgentAPIKeyExport { + return []string{"/bin/bash", "-c", MongodbUserCommandWithAPIKeyExport + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + agentLogOptions} + } + return []string{"/bin/bash", "-c", MongodbUserCommand + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + agentLogOptions} +} + +func mongodbAgentContainer(automationConfigSecretName string, volumeMounts []corev1.VolumeMount, logLevel mdbv1.LogLevel, logFile string, maxLogFileDurationHours int, agentImage string) container.Modification { + _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() + return container.Apply( + container.WithName(AgentName), + container.WithImage(agentImage), + container.WithImagePullPolicy(corev1.PullAlways), + container.WithReadinessProbe(DefaultReadiness()), + container.WithResourceRequirements(resourcerequirements.Defaults()), + container.WithVolumeMounts(volumeMounts), + container.WithCommand(AutomationAgentCommand(false, logLevel, logFile, maxLogFileDurationHours)), + containerSecurityContext, + container.WithEnvs( + corev1.EnvVar{ + Name: headlessAgentEnv, + Value: "true", + }, + corev1.EnvVar{ + Name: podNamespaceEnv, + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + corev1.EnvVar{ + Name: automationConfigEnv, + Value: automationConfigSecretName, + }, + corev1.EnvVar{ + Name: agentHealthStatusFilePathEnv, + Value: agentHealthStatusFilePathValue, + }, + ), + ) +} + +func versionUpgradeHookInit(volumeMount []corev1.VolumeMount, versionUpgradeHookImage string) container.Modification { + _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() + return container.Apply( + container.WithName(versionUpgradeHookName), + container.WithCommand([]string{"cp", "version-upgrade-hook", "/hooks/version-upgrade"}), + container.WithImage(versionUpgradeHookImage), + container.WithResourceRequirements(resourcerequirements.Defaults()), + container.WithImagePullPolicy(corev1.PullAlways), + container.WithVolumeMounts(volumeMount), + containerSecurityContext, + ) +} + +func DefaultReadiness() probes.Modification { + return probes.Apply( + probes.WithExecCommand([]string{readinessProbePath}), + probes.WithFailureThreshold(40), + probes.WithInitialDelaySeconds(5), + ) +} + +func dataPvc(dataVolumeName string) persistentvolumeclaim.Modification { + return persistentvolumeclaim.Apply( + persistentvolumeclaim.WithName(dataVolumeName), + persistentvolumeclaim.WithAccessModes(corev1.ReadWriteOnce), + persistentvolumeclaim.WithResourceRequests(resourcerequirements.BuildDefaultStorageRequirements()), + ) +} + +func logsPvc(logsVolumeName string) persistentvolumeclaim.Modification { + return persistentvolumeclaim.Apply( + persistentvolumeclaim.WithName(logsVolumeName), + persistentvolumeclaim.WithAccessModes(corev1.ReadWriteOnce), + persistentvolumeclaim.WithResourceRequests(resourcerequirements.BuildStorageRequirements("2G")), + ) +} + +// readinessProbeInit returns a modification function which will add the readiness probe container. +// this container will copy the readiness probe binary into the /opt/scripts directory. +func readinessProbeInit(volumeMount []corev1.VolumeMount, readinessProbeImage string) container.Modification { + _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() + return container.Apply( + container.WithName(ReadinessProbeContainerName), + container.WithCommand([]string{"cp", "/probes/readinessprobe", "/opt/scripts/readinessprobe"}), + container.WithImage(readinessProbeImage), + container.WithImagePullPolicy(corev1.PullAlways), + container.WithVolumeMounts(volumeMount), + container.WithResourceRequirements(resourcerequirements.Defaults()), + containerSecurityContext, + ) +} + +func mongodbContainer(mongodbImage string, volumeMounts []corev1.VolumeMount, additionalMongoDBConfig mdbv1.MongodConfiguration) container.Modification { + filePath := additionalMongoDBConfig.GetDBDataDir() + "/" + automationMongodConfFileName + mongoDbCommand := fmt.Sprintf(` +if [ -e "/hooks/version-upgrade" ]; then + #run post-start hook to handle version changes (if exists) + /hooks/version-upgrade +fi + +# wait for config and keyfile to be created by the agent +while ! [ -f %s -a -f %s ]; do sleep 3 ; done ; sleep 2 ; + +# start mongod with this configuration +exec mongod -f %s; + +`, filePath, keyfileFilePath, filePath) + + containerCommand := []string{ + "/bin/sh", + "-c", + mongoDbCommand, + } + + _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() + + return container.Apply( + container.WithName(MongodbName), + container.WithImage(mongodbImage), + container.WithResourceRequirements(resourcerequirements.Defaults()), + container.WithCommand(containerCommand), + // The official image provides both CMD and ENTRYPOINT. We're reusing the former and need to replace + // the latter with an empty string. + container.WithArgs([]string{""}), + containerSecurityContext, + container.WithEnvs( + collectEnvVars()..., + ), + container.WithVolumeMounts(volumeMounts), + ) +} + +// Function to collect and return the environment variables to be used in the +// MongoDB container. +func collectEnvVars() []corev1.EnvVar { + var envVars []corev1.EnvVar + + envVars = append(envVars, corev1.EnvVar{ + Name: agentHealthStatusFilePathEnv, + Value: "/healthstatus/agent-health-status.json", + }) + + addEnvVarIfSet := func(name string) { + value := os.Getenv(name) // nolint:forbidigo + if value != "" { + envVars = append(envVars, corev1.EnvVar{ + Name: name, + Value: value, + }) + } + } + + addEnvVarIfSet(config.ReadinessProbeLoggerBackups) + addEnvVarIfSet(config.ReadinessProbeLoggerMaxSize) + addEnvVarIfSet(config.ReadinessProbeLoggerMaxAge) + addEnvVarIfSet(config.ReadinessProbeLoggerCompress) + addEnvVarIfSet(config.WithAgentFileLogging) + + return envVars +} diff --git a/controllers/construct/mongodbstatefulset_test.go b/controllers/construct/mongodbstatefulset_test.go new file mode 100644 index 000000000..67d78174b --- /dev/null +++ b/controllers/construct/mongodbstatefulset_test.go @@ -0,0 +1,97 @@ +package construct + +import ( + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "testing" +) + +func TestCollectEnvVars(t *testing.T) { + tests := []struct { + name string + envSetup map[string]string + expectedEnv []corev1.EnvVar + }{ + { + name: "Basic env vars set", + envSetup: map[string]string{ + config.ReadinessProbeLoggerBackups: "3", + config.ReadinessProbeLoggerMaxSize: "10M", + config.ReadinessProbeLoggerMaxAge: "7", + config.WithAgentFileLogging: "enabled", + }, + expectedEnv: []corev1.EnvVar{ + { + Name: config.AgentHealthStatusFilePathEnv, + Value: "/healthstatus/agent-health-status.json", + }, + { + Name: config.ReadinessProbeLoggerBackups, + Value: "3", + }, + { + Name: config.ReadinessProbeLoggerMaxSize, + Value: "10M", + }, + { + Name: config.ReadinessProbeLoggerMaxAge, + Value: "7", + }, + { + Name: config.WithAgentFileLogging, + Value: "enabled", + }, + }, + }, + { + name: "Additional env var set", + envSetup: map[string]string{ + config.ReadinessProbeLoggerBackups: "3", + config.ReadinessProbeLoggerMaxSize: "10M", + config.ReadinessProbeLoggerMaxAge: "7", + config.ReadinessProbeLoggerCompress: "true", + config.WithAgentFileLogging: "enabled", + }, + expectedEnv: []corev1.EnvVar{ + { + Name: config.AgentHealthStatusFilePathEnv, + Value: "/healthstatus/agent-health-status.json", + }, + { + Name: config.ReadinessProbeLoggerBackups, + Value: "3", + }, + { + Name: config.ReadinessProbeLoggerMaxSize, + Value: "10M", + }, + { + Name: config.ReadinessProbeLoggerMaxAge, + Value: "7", + }, + { + Name: config.ReadinessProbeLoggerCompress, + Value: "true", + }, + { + Name: config.WithAgentFileLogging, + Value: "enabled", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup environment variables + for key, value := range tt.envSetup { + t.Setenv(key, value) + } + + actualEnvVars := collectEnvVars() + + assert.EqualValues(t, tt.expectedEnv, actualEnvVars) + }) + } +} diff --git a/controllers/mongodb_cleanup.go b/controllers/mongodb_cleanup.go new file mode 100644 index 000000000..d13b0426d --- /dev/null +++ b/controllers/mongodb_cleanup.go @@ -0,0 +1,122 @@ +package controllers + +import ( + "context" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" +) + +// cleanupPemSecret cleans up the old pem secret generated for the agent certificate. +func (r *ReplicaSetReconciler) cleanupPemSecret(ctx context.Context, currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, namespace string) { + if currentMDBSpec.GetAgentAuthMode() == lastAppliedMDBSpec.GetAgentAuthMode() { + return + } + + if !currentMDBSpec.IsAgentX509() && lastAppliedMDBSpec.IsAgentX509() { + agentCertSecret := lastAppliedMDBSpec.GetAgentCertificateRef() + if err := r.client.DeleteSecret(ctx, types.NamespacedName{ + Namespace: namespace, + Name: agentCertSecret + "-pem", + }); err != nil { + if apiErrors.IsNotFound(err) { + r.log.Debugf("Agent pem file secret %s-pem was already deleted", agentCertSecret) + } else { + r.log.Warnf("Could not cleanup old agent pem file %s-pem: %s", agentCertSecret, err) + } + } + } +} + +// cleanupScramSecrets cleans up old scram secrets based on the last successful applied mongodb spec. +func (r *ReplicaSetReconciler) cleanupScramSecrets(ctx context.Context, currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, namespace string) { + secretsToDelete := getScramSecretsToDelete(currentMDBSpec, lastAppliedMDBSpec) + + for _, s := range secretsToDelete { + if err := r.client.DeleteSecret(ctx, types.NamespacedName{ + Name: s, + Namespace: namespace, + }); err != nil { + r.log.Warnf("Could not cleanup old secret %s: %s", s, err) + } else { + r.log.Debugf("Sucessfully cleaned up secret: %s", s) + } + } +} + +// cleanupConnectionStringSecrets cleans up old scram secrets based on the last successful applied mongodb spec. +func (r *ReplicaSetReconciler) cleanupConnectionStringSecrets(ctx context.Context, currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, namespace string, resourceName string) { + secretsToDelete := getConnectionStringSecretsToDelete(currentMDBSpec, lastAppliedMDBSpec, resourceName) + + for _, s := range secretsToDelete { + if err := r.client.DeleteSecret(ctx, types.NamespacedName{ + Name: s, + Namespace: namespace, + }); err != nil { + r.log.Warnf("Could not cleanup old secret %s: %s", s, err) + } else { + r.log.Debugf("Sucessfully cleaned up secret: %s", s) + } + } +} + +func getScramSecretsToDelete(currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec) []string { + type user struct { + db string + name string + } + m := map[user]string{} + var secretsToDelete []string + + for _, mongoDBUser := range currentMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] = mongoDBUser.GetScramCredentialsSecretName() + } + + for _, mongoDBUser := range lastAppliedMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + currentScramSecretName, ok := m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] + if !ok { // not used anymore + secretsToDelete = append(secretsToDelete, mongoDBUser.GetScramCredentialsSecretName()) + } else if currentScramSecretName != mongoDBUser.GetScramCredentialsSecretName() { // have changed + secretsToDelete = append(secretsToDelete, mongoDBUser.GetScramCredentialsSecretName()) + } + } + return secretsToDelete +} + +func getConnectionStringSecretsToDelete(currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, resourceName string) []string { + type user struct { + db string + name string + } + m := map[user]string{} + var secretsToDelete []string + + for _, mongoDBUser := range currentMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] = mongoDBUser.GetConnectionStringSecretName(resourceName) + } + + for _, mongoDBUser := range lastAppliedMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + currentConnectionStringSecretName, ok := m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] + if !ok { // user was removed + secretsToDelete = append(secretsToDelete, mongoDBUser.GetConnectionStringSecretName(resourceName)) + } else if currentConnectionStringSecretName != mongoDBUser.GetConnectionStringSecretName(resourceName) { + // this happens when a new ConnectionStringSecretName was set for the old user + secretsToDelete = append(secretsToDelete, mongoDBUser.GetConnectionStringSecretName(resourceName)) + } + } + return secretsToDelete +} diff --git a/controllers/mongodb_cleanup_test.go b/controllers/mongodb_cleanup_test.go new file mode 100644 index 000000000..0123f63ee --- /dev/null +++ b/controllers/mongodb_cleanup_test.go @@ -0,0 +1,242 @@ +package controllers + +import ( + "context" + "testing" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + kubeClient "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestReplicaSetReconcilerCleanupScramSecrets(t *testing.T) { + lastApplied := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials", + }) + + t.Run("no change same resource", func(t *testing.T) { + actual := getScramSecretsToDelete(lastApplied.Spec, lastApplied.Spec) + + assert.Equal(t, []string(nil), actual) + }) + + t.Run("new user new secret", func(t *testing.T) { + current := newScramReplicaSet( + mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials", + }, + mdbv1.MongoDBUser{ + Name: "newUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials-2", + }, + ) + + actual := getScramSecretsToDelete(current.Spec, lastApplied.Spec) + + assert.Equal(t, []string(nil), actual) + }) + + t.Run("old user new secret", func(t *testing.T) { + current := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials-2", + }) + + expected := []string{"scram-credentials-scram-credentials"} + actual := getScramSecretsToDelete(current.Spec, lastApplied.Spec) + + assert.Equal(t, expected, actual) + }) + + t.Run("removed one user and changed secret of the other", func(t *testing.T) { + lastApplied = newScramReplicaSet( + mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials", + }, + mdbv1.MongoDBUser{ + Name: "anotherUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "another-scram-credentials", + }, + ) + + current := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials-2", + }) + + expected := []string{"scram-credentials-scram-credentials", "another-scram-credentials-scram-credentials"} + actual := getScramSecretsToDelete(current.Spec, lastApplied.Spec) + + assert.Equal(t, expected, actual) + }) + +} +func TestReplicaSetReconcilerCleanupPemSecret(t *testing.T) { + ctx := context.Background() + lastAppliedSpec := mdbv1.MongoDBCommunitySpec{ + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"X509"}, + }, + }, + } + mdb := mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-rs", + Namespace: "my-ns", + Annotations: map[string]string{}, + }, + Spec: mdbv1.MongoDBCommunitySpec{ + Members: 3, + Version: "4.2.2", + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + TLS: mdbv1.TLS{ + Enabled: true, + CaConfigMap: &corev1.LocalObjectReference{ + Name: "caConfigMap", + }, + CaCertificateSecret: &corev1.LocalObjectReference{ + Name: "certificateKeySecret", + }, + CertificateKeySecret: corev1.LocalObjectReference{ + Name: "certificateKeySecret", + }, + }, + }, + }, + } + + mgr := kubeClient.NewManager(ctx, &mdb) + + client := kubeClient.NewClient(mgr.GetClient()) + err := createAgentCertPemSecret(ctx, client, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + secret, err := r.client.GetSecret(ctx, mdb.AgentCertificatePemSecretNamespacedName()) + assert.NoError(t, err) + assert.Equal(t, "CERT", string(secret.Data["tls.crt"])) + assert.Equal(t, "KEY", string(secret.Data["tls.key"])) + + r.cleanupPemSecret(ctx, mdb.Spec, lastAppliedSpec, "my-ns") + + _, err = r.client.GetSecret(ctx, mdb.AgentCertificatePemSecretNamespacedName()) + assert.Error(t, err) +} + +func TestReplicaSetReconcilerCleanupConnectionStringSecrets(t *testing.T) { + lastApplied := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + }) + + t.Run("no change same resource", func(t *testing.T) { + actual := getConnectionStringSecretsToDelete(lastApplied.Spec, lastApplied.Spec, "my-rs") + + assert.Equal(t, []string(nil), actual) + }) + + t.Run("new user does not require existing user cleanup", func(t *testing.T) { + current := newScramReplicaSet( + mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + }, + mdbv1.MongoDBUser{ + Name: "newUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret-2", + }, + ) + + actual := getConnectionStringSecretsToDelete(current.Spec, lastApplied.Spec, "my-rs") + + assert.Equal(t, []string(nil), actual) + }) + + t.Run("old user new secret", func(t *testing.T) { + current := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret-2", + }) + + expected := []string{"connection-string-secret"} + actual := getConnectionStringSecretsToDelete(current.Spec, lastApplied.Spec, "my-rs") + + assert.Equal(t, expected, actual) + }) + + t.Run("removed one user and changed secret of the other", func(t *testing.T) { + lastApplied = newScramReplicaSet( + mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + }, + mdbv1.MongoDBUser{ + Name: "anotherUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret-2", + }, + ) + + current := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret-1", + }) + + expected := []string{"connection-string-secret", "connection-string-secret-2"} + actual := getConnectionStringSecretsToDelete(current.Spec, lastApplied.Spec, "my-rs") + + assert.Equal(t, expected, actual) + }) + +} diff --git a/controllers/mongodb_status_options.go b/controllers/mongodb_status_options.go new file mode 100644 index 000000000..5961bdcbd --- /dev/null +++ b/controllers/mongodb_status_options.go @@ -0,0 +1,243 @@ +package controllers + +import ( + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/apierrors" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result" + "go.uber.org/zap" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// severity indicates the severity level +// at which the message should be logged +type severity string + +const ( + Info severity = "INFO" + Debug severity = "DEBUG" + Warn severity = "WARN" + Error severity = "ERROR" + None severity = "NONE" +) + +// optionBuilder is in charge of constructing a slice of options that +// will be applied on top of the MongoDB resource that has been provided +type optionBuilder struct { + options []status.Option +} + +// GetOptions implements the OptionBuilder interface +func (o *optionBuilder) GetOptions() []status.Option { + return o.options +} + +// statusOptions returns an initialized optionBuilder +func statusOptions() *optionBuilder { + return &optionBuilder{ + options: []status.Option{}, + } +} + +func (o *optionBuilder) withMongoURI(uri string) *optionBuilder { + o.options = append(o.options, + mongoUriOption{ + mongoUri: uri, + }) + return o +} + +type mongoUriOption struct { + mongoUri string +} + +func (m mongoUriOption) ApplyOption(mdb *mdbv1.MongoDBCommunity) { + mdb.Status.MongoURI = m.mongoUri +} + +func (m mongoUriOption) GetResult() (reconcile.Result, error) { + return result.OK() +} + +func (o *optionBuilder) withVersion(version string) *optionBuilder { + o.options = append(o.options, + versionOption{ + version: version, + }) + return o +} + +type versionOption struct { + version string +} + +func (v versionOption) ApplyOption(mdb *mdbv1.MongoDBCommunity) { + mdb.Status.Version = v.version +} + +func (v versionOption) GetResult() (reconcile.Result, error) { + return result.OK() +} + +func (o *optionBuilder) withPhase(phase mdbv1.Phase, retryAfter int) *optionBuilder { + o.options = append(o.options, + phaseOption{ + phase: phase, + retryAfter: retryAfter, + }) + return o +} + +type message struct { + messageString string + severityLevel severity +} + +type messageOption struct { + message message +} + +func (m messageOption) ApplyOption(mdb *mdbv1.MongoDBCommunity) { + mdb.Status.Message = m.message.messageString + if m.message.severityLevel == Error { + zap.S().Error(m.message.messageString) + } + if m.message.severityLevel == Warn { + zap.S().Warn(m.message.messageString) + } + if m.message.severityLevel == Info { + zap.S().Info(m.message.messageString) + } + if m.message.severityLevel == Debug { + zap.S().Debug(m.message.messageString) + } +} + +func (m messageOption) GetResult() (reconcile.Result, error) { + return result.OK() +} + +func (o *optionBuilder) withMongoDBMembers(members int) *optionBuilder { + o.options = append(o.options, mongoDBReplicasOption{ + mongoDBMembers: members, + }) + return o +} + +func (o *optionBuilder) withStatefulSetReplicas(members int) *optionBuilder { + o.options = append(o.options, statefulSetReplicasOption{ + replicas: members, + }) + return o +} + +func (o *optionBuilder) withMongoDBArbiters(arbiters int) *optionBuilder { + o.options = append(o.options, mongoDBArbitersOption{ + mongoDBArbiters: arbiters, + }) + return o +} + +func (o *optionBuilder) withStatefulSetArbiters(arbiters int) *optionBuilder { + o.options = append(o.options, statefulSetArbitersOption{ + arbiters: arbiters, + }) + return o +} + +func (o *optionBuilder) withMessage(severityLevel severity, msg string) *optionBuilder { + if apierrors.IsTransientMessage(msg) { + severityLevel = Debug + msg = "" + } + o.options = append(o.options, messageOption{ + message: message{ + messageString: msg, + severityLevel: severityLevel, + }, + }) + return o +} + +func (o *optionBuilder) withFailedPhase() *optionBuilder { + return o.withPhase(mdbv1.Failed, 0) +} + +func (o *optionBuilder) withPendingPhase(retryAfter int) *optionBuilder { + return o.withPhase(mdbv1.Pending, retryAfter) +} + +func (o *optionBuilder) withRunningPhase() *optionBuilder { + return o.withPhase(mdbv1.Running, -1) +} + +type phaseOption struct { + phase mdbv1.Phase + retryAfter int +} + +func (p phaseOption) ApplyOption(mdb *mdbv1.MongoDBCommunity) { + mdb.Status.Phase = p.phase +} + +func (p phaseOption) GetResult() (reconcile.Result, error) { + if p.phase == mdbv1.Running { + return result.OK() + } + if p.phase == mdbv1.Pending { + return result.Retry(p.retryAfter) + } + if p.phase == mdbv1.Failed { + return result.Failed() + } + return result.OK() +} + +type mongoDBReplicasOption struct { + mongoDBMembers int +} + +func (a mongoDBReplicasOption) ApplyOption(mdb *mdbv1.MongoDBCommunity) { + mdb.Status.CurrentMongoDBMembers = a.mongoDBMembers +} + +func (a mongoDBReplicasOption) GetResult() (reconcile.Result, error) { + return result.OK() +} + +type statefulSetReplicasOption struct { + replicas int +} + +func (s statefulSetReplicasOption) ApplyOption(mdb *mdbv1.MongoDBCommunity) { + mdb.Status.CurrentStatefulSetReplicas = s.replicas +} + +func (s statefulSetReplicasOption) GetResult() (reconcile.Result, error) { + return result.OK() +} + +type mongoDBArbitersOption struct { + mongoDBArbiters int +} + +func (a mongoDBArbitersOption) ApplyOption(mdb *mdbv1.MongoDBCommunity) { + mdb.Status.CurrentMongoDBArbiters = a.mongoDBArbiters +} + +func (a mongoDBArbitersOption) GetResult() (reconcile.Result, error) { + return result.OK() +} + +type statefulSetArbitersOption struct { + arbiters int +} + +func (s statefulSetArbitersOption) ApplyOption(mdb *mdbv1.MongoDBCommunity) { + mdb.Status.CurrentStatefulSetArbitersReplicas = s.arbiters +} + +func (s statefulSetArbitersOption) GetResult() (reconcile.Result, error) { + return result.OK() +} diff --git a/controllers/mongodb_status_options_test.go b/controllers/mongodb_status_options_test.go new file mode 100644 index 000000000..9041c8d99 --- /dev/null +++ b/controllers/mongodb_status_options_test.go @@ -0,0 +1,74 @@ +package controllers + +import ( + "testing" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stretchr/testify/assert" +) + +const testVersion string = "4.2.6" + +func TestMongoUriOption_ApplyOption(t *testing.T) { + + mdb := newReplicaSet(3, testVersion, "my-rs", "my-ns") + + opt := mongoUriOption{ + mongoUri: "my-uri", + } + + opt.ApplyOption(&mdb) + + assert.Equal(t, "my-uri", mdb.Status.MongoURI, "Status should be updated") +} + +func TestOptionBuilder_RunningPhase(t *testing.T) { + mdb := newReplicaSet(3, testVersion, "my-rs", "my-ns") + + statusOptions().withRunningPhase().GetOptions()[0].ApplyOption(&mdb) + + assert.Equal(t, mdbv1.Running, mdb.Status.Phase) +} + +func TestOptionBuilder_PendingPhase(t *testing.T) { + mdb := newReplicaSet(3, testVersion, "my-rs", "my-ns") + + statusOptions().withPendingPhase(10).GetOptions()[0].ApplyOption(&mdb) + + assert.Equal(t, mdbv1.Pending, mdb.Status.Phase) +} + +func TestOptionBuilder_FailedPhase(t *testing.T) { + mdb := newReplicaSet(3, testVersion, "my-rs", "my-ns") + + statusOptions().withFailedPhase().GetOptions()[0].ApplyOption(&mdb) + + assert.Equal(t, mdbv1.Failed, mdb.Status.Phase) +} + +func TestVersion_ApplyOption(t *testing.T) { + mdb := newReplicaSet(3, testVersion, "my-rs", "my-ns") + + opt := versionOption{ + version: testVersion, + } + opt.ApplyOption(&mdb) + + assert.Equal(t, testVersion, mdb.Status.Version, "Status should be updated") +} + +func newReplicaSet(members int, version string, name, namespace string) mdbv1.MongoDBCommunity { + return mdbv1.MongoDBCommunity{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: mdbv1.MongoDBCommunitySpec{ + Members: members, + Version: version, + }, + } +} diff --git a/controllers/mongodb_tls.go b/controllers/mongodb_tls.go new file mode 100644 index 000000000..56c67642d --- /dev/null +++ b/controllers/mongodb_tls.go @@ -0,0 +1,379 @@ +package controllers + +import ( + "context" + "crypto/sha256" + "fmt" + "strings" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + + "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/configmap" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/podtemplatespec" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" + + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" +) + +const ( + tlsCAMountPath = "/var/lib/tls/ca/" + tlsCACertName = "ca.crt" + tlsOperatorSecretMountPath = "/var/lib/tls/server/" //nolint + tlsPrometheusSecretMountPath = "/var/lib/tls/prometheus/" //nolint + tlsSecretCertName = "tls.crt" + tlsSecretKeyName = "tls.key" + tlsSecretPemName = "tls.pem" + automationAgentPemMountPath = "/var/lib/mongodb-mms-automation/agent-certs" +) + +// validateTLSConfig will check that the configured ConfigMap and Secret exist and that they have the correct fields. +func (r *ReplicaSetReconciler) validateTLSConfig(ctx context.Context, mdb mdbv1.MongoDBCommunity) (bool, error) { + if !mdb.Spec.Security.TLS.Enabled { + return true, nil + } + + r.log.Info("Ensuring TLS is correctly configured") + + // Ensure CA cert is configured + _, err := getCaCrt(ctx, r.client, r.client, mdb) + + if err != nil { + if apiErrors.IsNotFound(err) { + r.log.Warnf("CA resource not found: %s", err) + return false, nil + } + + return false, err + } + + // Ensure Secret exists + _, err = secret.ReadStringData(ctx, r.client, mdb.TLSSecretNamespacedName()) + if err != nil { + if apiErrors.IsNotFound(err) { + r.log.Warnf(`Secret "%s" not found`, mdb.TLSSecretNamespacedName()) + return false, nil + } + + return false, err + } + + // validate whether the secret contains "tls.crt" and "tls.key", or it contains "tls.pem" + // if it contains all three, then the pem entry should be equal to the concatenation of crt and key + _, err = getPemOrConcatenatedCrtAndKey(ctx, r.client, mdb.TLSSecretNamespacedName()) + if err != nil { + r.log.Warnf(err.Error()) + return false, nil + } + + // Watch certificate-key secret to handle rotations + r.secretWatcher.Watch(ctx, mdb.TLSSecretNamespacedName(), mdb.NamespacedName()) + + // Watch CA certificate changes + if mdb.Spec.Security.TLS.CaCertificateSecret != nil { + r.secretWatcher.Watch(ctx, mdb.TLSCaCertificateSecretNamespacedName(), mdb.NamespacedName()) + } else { + r.configMapWatcher.Watch(ctx, mdb.TLSConfigMapNamespacedName(), mdb.NamespacedName()) + } + + r.log.Infof("Successfully validated TLS config") + return true, nil +} + +// getTLSConfigModification creates a modification function which enables TLS in the automation config. +// It will also ensure that the combined cert-key secret is created. +func getTLSConfigModification(ctx context.Context, cmGetter configmap.Getter, secretGetter secret.Getter, mdb mdbv1.MongoDBCommunity) (automationconfig.Modification, error) { + if !mdb.Spec.Security.TLS.Enabled { + return automationconfig.NOOP(), nil + } + + caCert, err := getCaCrt(ctx, cmGetter, secretGetter, mdb) + if err != nil { + return automationconfig.NOOP(), err + } + + certKey, err := getPemOrConcatenatedCrtAndKey(ctx, secretGetter, mdb.TLSSecretNamespacedName()) + if err != nil { + return automationconfig.NOOP(), err + } + + return tlsConfigModification(mdb, certKey, caCert), nil +} + +// getCertAndKey will fetch the certificate and key from the user-provided Secret. +func getCertAndKey(ctx context.Context, getter secret.Getter, secretName types.NamespacedName) string { + cert, err := secret.ReadKey(ctx, getter, tlsSecretCertName, secretName) + if err != nil { + return "" + } + + key, err := secret.ReadKey(ctx, getter, tlsSecretKeyName, secretName) + if err != nil { + return "" + } + + return combineCertificateAndKey(cert, key) +} + +// getPem will fetch the pem from the user-provided secret +func getPem(ctx context.Context, getter secret.Getter, secretName types.NamespacedName) string { + pem, err := secret.ReadKey(ctx, getter, tlsSecretPemName, secretName) + if err != nil { + return "" + } + return pem +} + +func combineCertificateAndKey(cert, key string) string { + trimmedCert := strings.TrimRight(cert, "\n") + trimmedKey := strings.TrimRight(key, "\n") + return fmt.Sprintf("%s\n%s", trimmedCert, trimmedKey) +} + +// getPemOrConcatenatedCrtAndKey will get the final PEM to write to the secret. +// This is either the tls.pem entry in the given secret, or the concatenation +// of tls.crt and tls.key +// It performs a basic validation on the entries. +func getPemOrConcatenatedCrtAndKey(ctx context.Context, getter secret.Getter, secretName types.NamespacedName) (string, error) { + certKey := getCertAndKey(ctx, getter, secretName) + pem := getPem(ctx, getter, secretName) + if certKey == "" && pem == "" { + return "", fmt.Errorf(`neither "%s" nor the pair "%s"/"%s" were present in the TLS secret`, tlsSecretPemName, tlsSecretCertName, tlsSecretKeyName) + } + if certKey == "" { + return pem, nil + } + if pem == "" { + return certKey, nil + } + if certKey != pem { + return "", fmt.Errorf(`if all of "%s", "%s" and "%s" are present in the secret, the entry for "%s" must be equal to the concatenation of "%s" with "%s"`, tlsSecretCertName, tlsSecretKeyName, tlsSecretPemName, tlsSecretPemName, tlsSecretCertName, tlsSecretKeyName) + } + return certKey, nil +} + +func getCaCrt(ctx context.Context, cmGetter configmap.Getter, secretGetter secret.Getter, mdb mdbv1.MongoDBCommunity) (string, error) { + var caResourceName types.NamespacedName + var caData map[string]string + var err error + if mdb.Spec.Security.TLS.CaCertificateSecret != nil { + caResourceName = mdb.TLSCaCertificateSecretNamespacedName() + caData, err = secret.ReadStringData(ctx, secretGetter, caResourceName) + } else if mdb.Spec.Security.TLS.CaConfigMap != nil { + caResourceName = mdb.TLSConfigMapNamespacedName() + caData, err = configmap.ReadData(ctx, cmGetter, caResourceName) + } + + if err != nil { + return "", err + } + + if caData == nil { + return "", fmt.Errorf("TLS field requires a reference to the CA certificate which signed the server certificates. Neither secret (field caCertificateSecretRef) not configMap (field CaConfigMap) reference present") + } + + if cert, ok := caData[tlsCACertName]; !ok || cert == "" { + return "", fmt.Errorf(`CA certificate resource "%s" should have a CA certificate in field "%s"`, caResourceName, tlsCACertName) + } else { + return cert, nil + } +} + +// ensureCASecret will create or update the operator managed Secret containing +// the CA certficate from the user provided Secret or ConfigMap. +func ensureCASecret(ctx context.Context, cmGetter configmap.Getter, secretGetter secret.Getter, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { + cert, err := getCaCrt(ctx, cmGetter, secretGetter, mdb) + if err != nil { + return err + } + + caFileName := tlsOperatorSecretFileName(cert) + + operatorSecret := secret.Builder(). + SetName(mdb.TLSOperatorCASecretNamespacedName().Name). + SetNamespace(mdb.TLSOperatorCASecretNamespacedName().Namespace). + SetField(caFileName, cert). + SetOwnerReferences(mdb.GetOwnerReferences()). + Build() + + return secret.CreateOrUpdate(ctx, getUpdateCreator, operatorSecret) +} + +// ensureTLSSecret will create or update the operator-managed Secret containing +// the concatenated certificate and key from the user-provided Secret. +func ensureTLSSecret(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { + certKey, err := getPemOrConcatenatedCrtAndKey(ctx, getUpdateCreator, mdb.TLSSecretNamespacedName()) + if err != nil { + return err + } + // Calculate file name from certificate and key + fileName := tlsOperatorSecretFileName(certKey) + + operatorSecret := secret.Builder(). + SetName(mdb.TLSOperatorSecretNamespacedName().Name). + SetNamespace(mdb.TLSOperatorSecretNamespacedName().Namespace). + SetField(fileName, certKey). + SetOwnerReferences(mdb.GetOwnerReferences()). + Build() + + return secret.CreateOrUpdate(ctx, getUpdateCreator, operatorSecret) +} + +func ensureAgentCertSecret(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { + if mdb.Spec.GetAgentAuthMode() != "X509" { + return nil + } + + certKey, err := getPemOrConcatenatedCrtAndKey(ctx, getUpdateCreator, mdb.AgentCertificateSecretNamespacedName()) + if err != nil { + return err + } + + agentCertSecret := secret.Builder(). + SetName(mdb.AgentCertificatePemSecretNamespacedName().Name). + SetNamespace(mdb.NamespacedName().Namespace). + SetField(mdb.AgentCertificatePemSecretNamespacedName().Name, certKey). + SetOwnerReferences(mdb.GetOwnerReferences()). + Build() + + return secret.CreateOrUpdate(ctx, getUpdateCreator, agentCertSecret) +} + +// ensurePrometheusTLSSecret will create or update the operator-managed Secret containing +// the concatenated certificate and key from the user-provided Secret. +func ensurePrometheusTLSSecret(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { + certKey, err := getPemOrConcatenatedCrtAndKey(ctx, getUpdateCreator, mdb.DeepCopy().PrometheusTLSSecretNamespacedName()) + if err != nil { + return err + } + // Calculate file name from certificate and key + fileName := tlsOperatorSecretFileName(certKey) + + operatorSecret := secret.Builder(). + SetName(mdb.PrometheusTLSOperatorSecretNamespacedName().Name). + SetNamespace(mdb.PrometheusTLSOperatorSecretNamespacedName().Namespace). + SetField(fileName, certKey). + SetOwnerReferences(mdb.GetOwnerReferences()). + Build() + + return secret.CreateOrUpdate(ctx, getUpdateCreator, operatorSecret) +} + +// tlsOperatorSecretFileName calculates the file name to use for the mounted +// certificate-key file. The name is based on the hash of the combined cert and key. +// If the certificate or key changes, the file path changes as well which will trigger +// the agent to perform a restart. +// The user-provided secret is being watched and will trigger a reconciliation +// on changes. This enables the operator to automatically handle cert rotations. +func tlsOperatorSecretFileName(certKey string) string { + hash := sha256.Sum256([]byte(certKey)) + return fmt.Sprintf("%x.pem", hash) +} + +// tlsConfigModification will enable TLS in the automation config. +func tlsConfigModification(mdb mdbv1.MongoDBCommunity, certKey, caCert string) automationconfig.Modification { + caCertificatePath := tlsCAMountPath + tlsOperatorSecretFileName(caCert) + certificateKeyPath := tlsOperatorSecretMountPath + tlsOperatorSecretFileName(certKey) + + mode := automationconfig.TLSModeRequired + if mdb.Spec.Security.TLS.Optional { + // TLSModePreferred requires server-server connections to use TLS but makes it optional for clients. + mode = automationconfig.TLSModePreferred + } + + automationAgentPemFilePath := "" + if mdb.Spec.IsAgentX509() { + automationAgentPemFilePath = automationAgentPemMountPath + "/" + mdb.AgentCertificatePemSecretNamespacedName().Name + } + + return func(config *automationconfig.AutomationConfig) { + // Configure CA certificate for agent + config.TLSConfig.CAFilePath = caCertificatePath + config.TLSConfig.AutoPEMKeyFilePath = automationAgentPemFilePath + + for i := range config.Processes { + args := config.Processes[i].Args26 + + args.Set("net.tls.mode", mode) + args.Set("net.tls.CAFile", caCertificatePath) + args.Set("net.tls.certificateKeyFile", certificateKeyPath) + args.Set("net.tls.allowConnectionsWithoutCertificates", true) + } + } +} + +// buildTLSPodSpecModification will add the TLS init container and volumes to the pod template if TLS is enabled. +func buildTLSPodSpecModification(mdb mdbv1.MongoDBCommunity) podtemplatespec.Modification { + if !mdb.Spec.Security.TLS.Enabled { + return podtemplatespec.NOOP() + } + + // Configure a volume which mounts the CA certificate from either a Secret or a ConfigMap + // The certificate is used by both mongod and the agent + caVolume := statefulset.CreateVolumeFromSecret("tls-ca", mdb.TLSOperatorCASecretNamespacedName().Name) + caVolumeMount := statefulset.CreateVolumeMount(caVolume.Name, tlsCAMountPath, statefulset.WithReadOnly(true)) + + // Configure a volume which mounts the secret holding the server key and certificate + // The same key-certificate pair is used for all servers + tlsSecretVolume := statefulset.CreateVolumeFromSecret("tls-secret", mdb.TLSOperatorSecretNamespacedName().Name) + tlsSecretVolumeMount := statefulset.CreateVolumeMount(tlsSecretVolume.Name, tlsOperatorSecretMountPath, statefulset.WithReadOnly(true)) + + // MongoDB expects both key and certificate to be provided in a single PEM file + // We are using a secret format where they are stored in separate fields, tls.crt and tls.key + // Because of this we need to use an init container which reads the two files mounted from the secret and combines them into one + return podtemplatespec.Apply( + podtemplatespec.WithVolume(caVolume), + podtemplatespec.WithVolume(tlsSecretVolume), + podtemplatespec.WithVolumeMounts(construct.AgentName, tlsSecretVolumeMount, caVolumeMount), + podtemplatespec.WithVolumeMounts(construct.MongodbName, tlsSecretVolumeMount, caVolumeMount), + ) +} + +// buildTLSPrometheus adds the TLS mounts for Prometheus. +func buildTLSPrometheus(mdb mdbv1.MongoDBCommunity) podtemplatespec.Modification { + if mdb.Spec.Prometheus == nil || mdb.Spec.Prometheus.TLSSecretRef.Name == "" { + return podtemplatespec.NOOP() + } + + // Configure a volume which mounts the secret holding the server key and certificate + // The same key-certificate pair is used for all servers + tlsSecretVolume := statefulset.CreateVolumeFromSecret("prom-tls-secret", mdb.PrometheusTLSOperatorSecretNamespacedName().Name) + + tlsSecretVolumeMount := statefulset.CreateVolumeMount(tlsSecretVolume.Name, tlsPrometheusSecretMountPath, statefulset.WithReadOnly(true)) + + // MongoDB expects both key and certificate to be provided in a single PEM file + // We are using a secret format where they are stored in separate fields, tls.crt and tls.key + // Because of this we need to use an init container which reads the two files mounted from the secret and combines them into one + return podtemplatespec.Apply( + // podtemplatespec.WithVolume(caVolume), + podtemplatespec.WithVolume(tlsSecretVolume), + podtemplatespec.WithVolumeMounts(construct.AgentName, tlsSecretVolumeMount), + podtemplatespec.WithVolumeMounts(construct.MongodbName, tlsSecretVolumeMount), + ) +} + +func buildAgentX509(mdb mdbv1.MongoDBCommunity) podtemplatespec.Modification { + if mdb.Spec.GetAgentAuthMode() != "X509" { + return podtemplatespec.Apply( + podtemplatespec.RemoveVolume(constants.AgentPemFile), + podtemplatespec.RemoveVolumeMount(construct.AgentName, constants.AgentPemFile), + ) + } + + agentCertVolume := statefulset.CreateVolumeFromSecret(constants.AgentPemFile, mdb.AgentCertificatePemSecretNamespacedName().Name) + agentCertVolumeMount := statefulset.CreateVolumeMount(agentCertVolume.Name, automationAgentPemMountPath, statefulset.WithReadOnly(true)) + + return podtemplatespec.Apply( + podtemplatespec.WithVolume(agentCertVolume), + podtemplatespec.WithVolumeMounts(construct.AgentName, agentCertVolumeMount), + ) + +} diff --git a/controllers/mongodb_tls_test.go b/controllers/mongodb_tls_test.go new file mode 100644 index 000000000..b4e832778 --- /dev/null +++ b/controllers/mongodb_tls_test.go @@ -0,0 +1,618 @@ +package controllers + +import ( + "context" + "errors" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" + + corev1 "k8s.io/api/core/v1" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/types" + k8sClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/x509" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + kubeClient "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/configmap" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + "github.com/stretchr/testify/assert" +) + +func TestStatefulSetIsCorrectlyConfiguredWithTLS(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mgr := kubeClient.NewManager(ctx, &mdb) + + client := kubeClient.NewClient(mgr.GetClient()) + err := createTLSSecret(ctx, client, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, client, mdb) + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts := appsv1.StatefulSet{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + + assertStatefulSetVolumesAndVolumeMounts(t, sts, mdb.TLSOperatorCASecretNamespacedName().Name, mdb.TLSOperatorSecretNamespacedName().Name, "", "") +} + +func TestStatefulSetIsCorrectlyConfiguredWithTLSAndX509(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509"} + mgr := kubeClient.NewManager(ctx, &mdb) + + client := kubeClient.NewClient(mgr.GetClient()) + err := createTLSSecret(ctx, client, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, client, mdb) + assert.NoError(t, err) + crt, key, err := x509.CreateAgentCertificate() + assert.NoError(t, err) + err = createAgentCertSecret(ctx, client, mdb, crt, key, "") + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts := appsv1.StatefulSet{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + + // Check that the pem secret has been created + s := corev1.Secret{} + err = mgr.GetClient().Get(ctx, mdb.AgentCertificatePemSecretNamespacedName(), &s) + assert.NoError(t, err) + + assertStatefulSetVolumesAndVolumeMounts(t, sts, mdb.TLSOperatorCASecretNamespacedName().Name, mdb.TLSOperatorSecretNamespacedName().Name, "", mdb.AgentCertificatePemSecretNamespacedName().Name) + + // If we deactivate X509 for the agent, we expect the certificates to be unmounted. + mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"SCRAM"} + err = mgr.GetClient().Update(ctx, &mdb) + assert.NoError(t, err) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts = appsv1.StatefulSet{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + + assertStatefulSetVolumesAndVolumeMounts(t, sts, mdb.TLSOperatorCASecretNamespacedName().Name, mdb.TLSOperatorSecretNamespacedName().Name, "", "") +} + +func assertStatefulSetVolumesAndVolumeMounts(t *testing.T, sts appsv1.StatefulSet, expectedTLSCASecretName string, expectedTLSOperatorSecretName string, expectedPromTLSSecretName string, expectedAgentCertSecretName string) { + prometheusTLSEnabled := expectedPromTLSSecretName != "" + agentX509Enabled := expectedAgentCertSecretName != "" + + permission := int32(416) + assert.Contains(t, sts.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: "tls-ca", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: expectedTLSCASecretName, + DefaultMode: &permission, + }, + }, + }) + assert.Contains(t, sts.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: "tls-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: expectedTLSOperatorSecretName, + DefaultMode: &permission, + }, + }, + }) + if prometheusTLSEnabled { + assert.Contains(t, sts.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: "prom-tls-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: expectedPromTLSSecretName, + DefaultMode: &permission, + }, + }, + }) + } + if agentX509Enabled { + assert.Contains(t, sts.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: "agent-certs-pem", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: expectedAgentCertSecretName, + DefaultMode: &permission, + }, + }, + }) + } else { + assert.NotContains(t, sts.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: "agent-certs-pem", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: expectedAgentCertSecretName, + DefaultMode: &permission, + }, + }, + }) + } + + tlsSecretVolumeMount := corev1.VolumeMount{ + Name: "tls-secret", + ReadOnly: true, + MountPath: tlsOperatorSecretMountPath, + } + tlsCAVolumeMount := corev1.VolumeMount{ + Name: "tls-ca", + ReadOnly: true, + MountPath: tlsCAMountPath, + } + tlsPrometheusSecretVolumeMount := corev1.VolumeMount{ + Name: "prom-tls-secret", + ReadOnly: true, + MountPath: tlsPrometheusSecretMountPath, + } + agentCertSecretVolumeMount := corev1.VolumeMount{ + Name: "agent-certs-pem", + ReadOnly: true, + MountPath: automationAgentPemMountPath, + } + + assert.Len(t, sts.Spec.Template.Spec.InitContainers, 2) + + var agentContainer corev1.Container + var mongodbContainer corev1.Container + + for i, container := range sts.Spec.Template.Spec.Containers { + if container.Name == construct.AgentName { + agentContainer = sts.Spec.Template.Spec.Containers[i] + } else if container.Name == construct.MongodbName { + mongodbContainer = sts.Spec.Template.Spec.Containers[i] + } + } + + assert.Contains(t, agentContainer.VolumeMounts, tlsSecretVolumeMount) + assert.Contains(t, agentContainer.VolumeMounts, tlsCAVolumeMount) + if prometheusTLSEnabled { + assert.Contains(t, agentContainer.VolumeMounts, tlsPrometheusSecretVolumeMount) + } + if agentX509Enabled { + assert.Contains(t, agentContainer.VolumeMounts, agentCertSecretVolumeMount) + } else { + assert.NotContains(t, agentContainer.VolumeMounts, agentCertSecretVolumeMount) + } + + assert.Contains(t, mongodbContainer.VolumeMounts, tlsSecretVolumeMount) + assert.Contains(t, mongodbContainer.VolumeMounts, tlsCAVolumeMount) + if prometheusTLSEnabled { + assert.Contains(t, mongodbContainer.VolumeMounts, tlsPrometheusSecretVolumeMount) + } +} + +func TestStatefulSetIsCorrectlyConfiguredWithPrometheusTLS(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mdb.Spec.Prometheus = &mdbv1.Prometheus{ + Username: "username", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "prom-password-secret", + }, + Port: 4321, + TLSSecretRef: mdbv1.SecretKeyReference{ + Name: "prom-secret-cert", + }, + } + + mgr := kubeClient.NewManager(ctx, &mdb) + cli := kubeClient.NewClient(mgr.GetClient()) + + err := secret.CreateOrUpdate(ctx, mgr.Client, secret.Builder(). + SetName("prom-password-secret"). + SetNamespace(mdb.Namespace). + SetField("password", "my-password"). + Build()) + assert.NoError(t, err) + err = createTLSSecret(ctx, cli, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createPrometheusTLSSecret(ctx, cli, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + + err = createTLSConfigMap(ctx, cli, mdb) + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts := appsv1.StatefulSet{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + + assertStatefulSetVolumesAndVolumeMounts(t, sts, mdb.TLSOperatorCASecretNamespacedName().Name, mdb.TLSOperatorSecretNamespacedName().Name, mdb.PrometheusTLSOperatorSecretNamespacedName().Name, "") +} + +func TestStatefulSetIsCorrectlyConfiguredWithTLSAfterChangingExistingVolumes(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mgr := kubeClient.NewManager(ctx, &mdb) + + cli := kubeClient.NewClient(mgr.GetClient()) + err := createTLSSecret(ctx, cli, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + + tlsCAVolumeSecretName := mdb.TLSOperatorCASecretNamespacedName().Name + changedTLSCAVolumeSecretName := tlsCAVolumeSecretName + "-old" + + err = createTLSSecretWithNamespaceAndName(ctx, cli, mdb.Namespace, changedTLSCAVolumeSecretName, "CERT", "KEY", "") + assert.NoError(t, err) + + err = createTLSConfigMap(ctx, cli, mdb) + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts := appsv1.StatefulSet{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + + assertStatefulSetVolumesAndVolumeMounts(t, sts, tlsCAVolumeSecretName, mdb.TLSOperatorSecretNamespacedName().Name, "", "") + + // updating sts tls-ca volume directly to simulate changing of underlying volume's secret + for i := range sts.Spec.Template.Spec.Volumes { + if sts.Spec.Template.Spec.Volumes[i].Name == "tls-ca" { + sts.Spec.Template.Spec.Volumes[i].VolumeSource.Secret.SecretName = changedTLSCAVolumeSecretName + } + } + + err = mgr.GetClient().Update(ctx, &sts) + assert.NoError(t, err) + + assertStatefulSetVolumesAndVolumeMounts(t, sts, changedTLSCAVolumeSecretName, mdb.TLSOperatorSecretNamespacedName().Name, "", "") + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts = appsv1.StatefulSet{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + assertStatefulSetVolumesAndVolumeMounts(t, sts, tlsCAVolumeSecretName, mdb.TLSOperatorSecretNamespacedName().Name, "", "") +} + +func TestAutomationConfigIsCorrectlyConfiguredWithTLS(t *testing.T) { + ctx := context.Background() + createAC := func(mdb mdbv1.MongoDBCommunity) automationconfig.AutomationConfig { + client := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, client, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, client, mdb) + assert.NoError(t, err) + + tlsModification, err := getTLSConfigModification(ctx, client, client, mdb) + assert.NoError(t, err) + ac, err := buildAutomationConfig(mdb, false, automationconfig.Auth{}, automationconfig.AutomationConfig{}, tlsModification) + assert.NoError(t, err) + + return ac + } + + t.Run("With TLS disabled", func(t *testing.T) { + mdb := newTestReplicaSet() + ac := createAC(mdb) + + assert.Equal(t, &automationconfig.TLS{ + CAFilePath: "", + ClientCertificateMode: automationconfig.ClientCertificateModeOptional, + }, ac.TLSConfig) + + for _, process := range ac.Processes { + assert.False(t, process.Args26.Has("net.tls")) + } + }) + + t.Run("With logRotate and SystemLog enabled", func(t *testing.T) { + mdb := newTestReplicaSetWithSystemLogAndLogRotate() + ac := createAC(mdb) + + for _, process := range ac.Processes { + assert.Equal(t, "/tmp/test", process.Args26.Get("systemLog.path").String()) + assert.Equal(t, "file", process.Args26.Get("systemLog.destination").String()) + assert.Equal(t, process.LogRotate, automationconfig.ConvertCrdLogRotateToAC(mdb.Spec.AgentConfiguration.LogRotate)) + assert.Equal(t, process.AuditLogRotate, automationconfig.ConvertCrdLogRotateToAC(mdb.Spec.AgentConfiguration.AuditLogRotate)) + } + }) + + t.Run("With TLS enabled and required, rollout completed", func(t *testing.T) { + mdb := newTestReplicaSetWithTLS() + ac := createAC(mdb) + + assert.Equal(t, &automationconfig.TLS{ + CAFilePath: tlsCAMountPath + tlsOperatorSecretFileName("CERT"), + ClientCertificateMode: automationconfig.ClientCertificateModeOptional, + }, ac.TLSConfig) + + for _, process := range ac.Processes { + operatorSecretFileName := tlsOperatorSecretFileName("CERT\nKEY") + + assert.Equal(t, automationconfig.TLSModeRequired, process.Args26.Get("net.tls.mode").Data()) + assert.Equal(t, tlsOperatorSecretMountPath+operatorSecretFileName, process.Args26.Get("net.tls.certificateKeyFile").Data()) + assert.Equal(t, tlsCAMountPath+tlsOperatorSecretFileName("CERT"), process.Args26.Get("net.tls.CAFile").Data()) + assert.True(t, process.Args26.Get("net.tls.allowConnectionsWithoutCertificates").MustBool()) + } + }) + + t.Run("With TLS enabled and optional, rollout completed", func(t *testing.T) { + mdb := newTestReplicaSetWithTLS() + mdb.Spec.Security.TLS.Optional = true + ac := createAC(mdb) + + assert.Equal(t, &automationconfig.TLS{ + CAFilePath: tlsCAMountPath + tlsOperatorSecretFileName("CERT"), + ClientCertificateMode: automationconfig.ClientCertificateModeOptional, + }, ac.TLSConfig) + + for _, process := range ac.Processes { + operatorSecretFileName := tlsOperatorSecretFileName("CERT\nKEY") + + assert.Equal(t, automationconfig.TLSModePreferred, process.Args26.Get("net.tls.mode").Data()) + assert.Equal(t, tlsOperatorSecretMountPath+operatorSecretFileName, process.Args26.Get("net.tls.certificateKeyFile").Data()) + assert.Equal(t, tlsCAMountPath+tlsOperatorSecretFileName("CERT"), process.Args26.Get("net.tls.CAFile").Data()) + assert.True(t, process.Args26.Get("net.tls.allowConnectionsWithoutCertificates").MustBool()) + } + }) +} + +func TestTLSOperatorSecret(t *testing.T) { + ctx := context.Background() + t.Run("Secret is created if it doesn't exist", func(t *testing.T) { + mdb := newTestReplicaSetWithTLS() + c := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, c, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, c, mdb) + assert.NoError(t, err) + + r := NewReconciler(kubeClient.NewManagerWithClient(c), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + err = r.ensureTLSResources(ctx, mdb) + assert.NoError(t, err) + + // Operator-managed secret should have been created and contains the + // concatenated certificate and key. + expectedCertificateKey := "CERT\nKEY" + certificateKey, err := secret.ReadKey(ctx, c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) + assert.NoError(t, err) + assert.Equal(t, expectedCertificateKey, certificateKey) + }) + + t.Run("Secret is updated if it already exists", func(t *testing.T) { + mdb := newTestReplicaSetWithTLS() + k8sclient := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, k8sclient, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, k8sclient, mdb) + assert.NoError(t, err) + + // Create operator-managed secret + s := secret.Builder(). + SetName(mdb.TLSOperatorSecretNamespacedName().Name). + SetNamespace(mdb.TLSOperatorSecretNamespacedName().Namespace). + SetField(tlsOperatorSecretFileName(""), ""). + Build() + err = k8sclient.CreateSecret(ctx, s) + assert.NoError(t, err) + + r := NewReconciler(kubeClient.NewManagerWithClient(k8sclient), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + err = r.ensureTLSResources(ctx, mdb) + assert.NoError(t, err) + + // Operator-managed secret should have been updated with the concatenated + // certificate and key. + expectedCertificateKey := "CERT\nKEY" + certificateKey, err := secret.ReadKey(ctx, k8sclient, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) + assert.NoError(t, err) + assert.Equal(t, expectedCertificateKey, certificateKey) + }) +} + +func TestCombineCertificateAndKey(t *testing.T) { + tests := []struct { + Cert string + Key string + Expected string + }{ + {"CERT", "KEY", "CERT\nKEY"}, + {"CERT\n", "KEY", "CERT\nKEY"}, + {"CERT", "KEY\n", "CERT\nKEY"}, + {"CERT\n", "KEY\n", "CERT\nKEY"}, + {"CERT\n\n\n", "KEY\n\n\n", "CERT\nKEY"}, + } + + for _, test := range tests { + combined := combineCertificateAndKey(test.Cert, test.Key) + assert.Equal(t, test.Expected, combined) + } +} + +func TestPemSupport(t *testing.T) { + ctx := context.Background() + t.Run("Success if only pem is provided", func(t *testing.T) { + mdb := newTestReplicaSetWithTLS() + c := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, c, mdb, "", "", "CERT\nKEY") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, c, mdb) + assert.NoError(t, err) + + r := NewReconciler(kubeClient.NewManagerWithClient(c), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + err = r.ensureTLSResources(ctx, mdb) + assert.NoError(t, err) + + // Operator-managed secret should have been created and contains the + // concatenated certificate and key. + expectedCertificateKey := "CERT\nKEY" + certificateKey, err := secret.ReadKey(ctx, c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) + assert.NoError(t, err) + assert.Equal(t, expectedCertificateKey, certificateKey) + }) + t.Run("Success if pem is equal to cert+key", func(t *testing.T) { + mdb := newTestReplicaSetWithTLS() + c := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, c, mdb, "CERT", "KEY", "CERT\nKEY") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, c, mdb) + assert.NoError(t, err) + + r := NewReconciler(kubeClient.NewManagerWithClient(c), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + err = r.ensureTLSResources(ctx, mdb) + assert.NoError(t, err) + + // Operator-managed secret should have been created and contains the + // concatenated certificate and key. + expectedCertificateKey := "CERT\nKEY" + certificateKey, err := secret.ReadKey(ctx, c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) + assert.NoError(t, err) + assert.Equal(t, expectedCertificateKey, certificateKey) + }) + t.Run("Failure if pem is different from cert+key", func(t *testing.T) { + mdb := newTestReplicaSetWithTLS() + c := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, c, mdb, "CERT1", "KEY1", "CERT\nKEY") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, c, mdb) + assert.NoError(t, err) + + r := NewReconciler(kubeClient.NewManagerWithClient(c), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + err = r.ensureTLSResources(ctx, mdb) + assert.Error(t, err) + assert.Contains(t, err.Error(), `if all of "tls.crt", "tls.key" and "tls.pem" are present in the secret, the entry for "tls.pem" must be equal to the concatenation of "tls.crt" with "tls.key"`) + }) +} + +func TestTLSConfigReferencesToCACertAreValidated(t *testing.T) { + ctx := context.Background() + type args struct { + caConfigMap *corev1.LocalObjectReference + caCertificateSecret *corev1.LocalObjectReference + expectedError error + } + tests := map[string]args{ + "Success if reference to CA cert provided via secret": { + caConfigMap: &corev1.LocalObjectReference{ + Name: "certificateKeySecret"}, + caCertificateSecret: nil, + }, + "Success if reference to CA cert provided via config map": { + caConfigMap: nil, + caCertificateSecret: &corev1.LocalObjectReference{ + Name: "caConfigMap"}, + }, + "Succes if reference to CA cert provided both via secret and configMap": { + caConfigMap: &corev1.LocalObjectReference{ + Name: "certificateKeySecret"}, + caCertificateSecret: &corev1.LocalObjectReference{ + Name: "caConfigMap"}, + }, + "Failure if reference to CA cert is missing": { + caConfigMap: nil, + caCertificateSecret: nil, + expectedError: errors.New("TLS field requires a reference to the CA certificate which signed the server certificates. Neither secret (field caCertificateSecretRef) not configMap (field CaConfigMap) reference present"), + }, + } + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + mdb := newTestReplicaSetWithTLSCaCertificateReferences(tc.caConfigMap, tc.caCertificateSecret) + + mgr := kubeClient.NewManager(ctx, &mdb) + cli := kubeClient.NewClient(mgr.GetClient()) + err := createTLSSecret(ctx, cli, mdb, "cert", "key", "pem") + + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + _, err = r.validateTLSConfig(ctx, mdb) + if tc.expectedError != nil { + assert.EqualError(t, err, tc.expectedError.Error()) + } else { + assert.NoError(t, err) + } + }) + } + +} + +func createTLSConfigMap(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity) error { + if !mdb.Spec.Security.TLS.Enabled { + return nil + } + + configMap := configmap.Builder(). + SetName(mdb.Spec.Security.TLS.CaConfigMap.Name). + SetNamespace(mdb.Namespace). + SetDataField("ca.crt", "CERT"). + Build() + + return c.Create(ctx, &configMap) +} + +func createTLSSecretWithNamespaceAndName(ctx context.Context, c k8sClient.Client, namespace string, name string, crt string, key string, pem string) error { + sBuilder := secret.Builder(). + SetName(name). + SetNamespace(namespace). + SetField(tlsCACertName, "CERT") + + if crt != "" { + sBuilder.SetField(tlsSecretCertName, crt) + } + if key != "" { + sBuilder.SetField(tlsSecretKeyName, key) + } + if pem != "" { + sBuilder.SetField(tlsSecretPemName, pem) + } + + s := sBuilder.Build() + return c.Create(ctx, &s) +} + +func createTLSSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { + return createTLSSecretWithNamespaceAndName(ctx, c, mdb.Namespace, mdb.Spec.Security.TLS.CertificateKeySecret.Name, crt, key, pem) +} + +func createAgentCertSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { + return createTLSSecretWithNamespaceAndName(ctx, c, mdb.Namespace, mdb.AgentCertificateSecretNamespacedName().Name, crt, key, pem) +} + +func createAgentCertPemSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { + return createTLSSecretWithNamespaceAndName(ctx, c, mdb.Namespace, mdb.AgentCertificatePemSecretNamespacedName().Name, crt, key, pem) +} + +func createPrometheusTLSSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { + return createTLSSecretWithNamespaceAndName(ctx, c, mdb.Namespace, mdb.Spec.Prometheus.TLSSecretRef.Name, crt, key, pem) +} + +func createUserPasswordSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, userPasswordSecretName string, password string) error { + sBuilder := secret.Builder(). + SetName(userPasswordSecretName). + SetNamespace(mdb.Namespace). + SetField("password", password) + + s := sBuilder.Build() + return c.Create(ctx, &s) +} diff --git a/controllers/mongodb_users.go b/controllers/mongodb_users.go new file mode 100644 index 000000000..cd99734ba --- /dev/null +++ b/controllers/mongodb_users.go @@ -0,0 +1,91 @@ +package controllers + +import ( + "context" + "fmt" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" +) + +// ensureUserResources will check that the configured user password secrets can be found +// and will start monitor them so that the reconcile process is triggered every time these secrets are updated +func (r ReplicaSetReconciler) ensureUserResources(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { + for _, user := range mdb.GetAuthUsers() { + if user.Database != constants.ExternalDB { + secretNamespacedName := types.NamespacedName{Name: user.PasswordSecretName, Namespace: mdb.Namespace} + if _, err := secret.ReadKey(ctx, r.client, user.PasswordSecretKey, secretNamespacedName); err != nil { + if apiErrors.IsNotFound(err) { + // check for SCRAM secret as well + scramSecretName := types.NamespacedName{Name: user.ScramCredentialsSecretName, Namespace: mdb.Namespace} + _, err = r.client.GetSecret(ctx, scramSecretName) + if apiErrors.IsNotFound(err) { + return fmt.Errorf(`user password secret: %s and scram secret: %s not found`, secretNamespacedName, scramSecretName) + } + r.log.Errorf(`user password secret "%s" not found: %s`, secretNamespacedName, err) + continue + } + return err + } + r.secretWatcher.Watch(ctx, secretNamespacedName, mdb.NamespacedName()) + } + } + + return nil +} + +// updateConnectionStringSecrets updates secrets where user specific connection strings are stored. +// The client applications can mount these secrets and connect to the mongodb cluster +func (r ReplicaSetReconciler) updateConnectionStringSecrets(ctx context.Context, mdb mdbv1.MongoDBCommunity, clusterDomain string) error { + for _, user := range mdb.GetAuthUsers() { + secretName := user.ConnectionStringSecretName + + secretNamespace := mdb.Namespace + if user.ConnectionStringSecretNamespace != "" { + secretNamespace = user.ConnectionStringSecretNamespace + } + + existingSecret, err := r.client.GetSecret(ctx, types.NamespacedName{ + Name: secretName, + Namespace: secretNamespace, + }) + if err != nil && !apiErrors.IsNotFound(err) { + return err + } + if err == nil && !secret.HasOwnerReferences(existingSecret, mdb.GetOwnerReferences()) { + return fmt.Errorf("connection string secret %s already exists and is not managed by the operator", secretName) + } + + pwd := "" + + if user.Database != constants.ExternalDB { + secretNamespacedName := types.NamespacedName{Name: user.PasswordSecretName, Namespace: mdb.Namespace} + pwd, err = secret.ReadKey(ctx, r.client, user.PasswordSecretKey, secretNamespacedName) + if err != nil { + return err + } + } + + connectionStringSecret := secret.Builder(). + SetName(secretName). + SetNamespace(secretNamespace). + SetField("connectionString.standard", mdb.MongoAuthUserURI(user, pwd, clusterDomain)). + SetField("connectionString.standardSrv", mdb.MongoAuthUserSRVURI(user, pwd, clusterDomain)). + SetField("username", user.Username). + SetField("password", pwd). + SetOwnerReferences(mdb.GetOwnerReferences()). + Build() + + if err := secret.CreateOrUpdate(ctx, r.client, connectionStringSecret); err != nil { + return err + } + + secretNamespacedName := types.NamespacedName{Name: connectionStringSecret.Name, Namespace: connectionStringSecret.Namespace} + r.secretWatcher.Watch(ctx, secretNamespacedName, mdb.NamespacedName()) + } + + return nil +} diff --git a/pkg/controller/predicates/predicates.go b/controllers/predicates/predicates.go similarity index 79% rename from pkg/controller/predicates/predicates.go rename to controllers/predicates/predicates.go index 191b59225..9acc314b6 100644 --- a/pkg/controller/predicates/predicates.go +++ b/controllers/predicates/predicates.go @@ -3,7 +3,7 @@ package predicates import ( "reflect" - mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" ) @@ -15,8 +15,8 @@ import ( func OnlyOnSpecChange() predicate.Funcs { return predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { - oldResource := e.ObjectOld.(*mdbv1.MongoDB) - newResource := e.ObjectNew.(*mdbv1.MongoDB) + oldResource := e.ObjectOld.(*mdbv1.MongoDBCommunity) + newResource := e.ObjectNew.(*mdbv1.MongoDBCommunity) specChanged := !reflect.DeepEqual(oldResource.Spec, newResource.Spec) return specChanged }, diff --git a/controllers/prometheus.go b/controllers/prometheus.go new file mode 100644 index 000000000..cebe939fe --- /dev/null +++ b/controllers/prometheus.go @@ -0,0 +1,79 @@ +package controllers + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + + "k8s.io/apimachinery/pkg/types" +) + +const ( + // Keep in sync with api/v1/mongodbcommunity_types.go + DefaultPrometheusPort = 9216 + ListenAddress = "0.0.0.0" +) + +// PrometheusModification adds Prometheus configuration to AutomationConfig. +func getPrometheusModification(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) (automationconfig.Modification, error) { + if mdb.Spec.Prometheus == nil { + return automationconfig.NOOP(), nil + } + + secretNamespacedName := types.NamespacedName{Name: mdb.Spec.Prometheus.PasswordSecretRef.Name, Namespace: mdb.Namespace} + password, err := secret.ReadKey(ctx, getUpdateCreator, mdb.Spec.Prometheus.GetPasswordKey(), secretNamespacedName) + if err != nil { + return automationconfig.NOOP(), fmt.Errorf("could not configure Prometheus modification: %s", err) + } + + var certKey string + var tlsPEMPath string + var scheme string + + if mdb.Spec.Prometheus.TLSSecretRef.Name != "" { + certKey, err = getPemOrConcatenatedCrtAndKey(ctx, getUpdateCreator, mdb.PrometheusTLSSecretNamespacedName()) + if err != nil { + return automationconfig.NOOP(), err + } + tlsPEMPath = tlsPrometheusSecretMountPath + tlsOperatorSecretFileName(certKey) + scheme = "https" + } else { + scheme = "http" + } + + return func(config *automationconfig.AutomationConfig) { + promConfig := automationconfig.NewDefaultPrometheus(mdb.Spec.Prometheus.Username) + + promConfig.TLSPemPath = tlsPEMPath + promConfig.Scheme = scheme + promConfig.Password = password + + if mdb.Spec.Prometheus.Port > 0 { + promConfig.ListenAddress = fmt.Sprintf("%s:%d", ListenAddress, mdb.Spec.Prometheus.Port) + } + + if mdb.Spec.Prometheus.MetricsPath != "" { + promConfig.MetricsPath = mdb.Spec.Prometheus.MetricsPath + } + + config.Prometheus = &promConfig + }, nil +} + +// prometheusPort returns a `corev1.ServicePort` to be configured in the StatefulSet +// for the Prometheus endpoint. This function will only return a new Port when +// Prometheus has been configured, and nil otherwise. +func prometheusPort(mdb mdbv1.MongoDBCommunity) *corev1.ServicePort { + if mdb.Spec.Prometheus != nil { + return &corev1.ServicePort{ + Port: int32(mdb.Spec.Prometheus.GetPort()), + Name: "prometheus", + } + } + return nil +} diff --git a/controllers/replica_set_controller.go b/controllers/replica_set_controller.go new file mode 100644 index 000000000..cf3e9d526 --- /dev/null +++ b/controllers/replica_set_controller.go @@ -0,0 +1,799 @@ +package controllers + +import ( + "context" + "encoding/json" + "fmt" + "os" + "strconv" + "strings" + + "github.com/imdario/mergo" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" + "github.com/mongodb/mongodb-kubernetes-operator/controllers/predicates" + "github.com/mongodb/mongodb-kubernetes-operator/controllers/validation" + "github.com/mongodb/mongodb-kubernetes-operator/controllers/watch" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/agent" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations" + kubernetesClient "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/podtemplatespec" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/service" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/functions" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/merge" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" + "github.com/stretchr/objx" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + k8sClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + clusterDomain = "CLUSTER_DOMAIN" + + lastSuccessfulConfiguration = "mongodb.com/v1.lastSuccessfulConfiguration" + lastAppliedMongoDBVersion = "mongodb.com/v1.lastAppliedMongoDBVersion" +) + +func init() { + logger, err := zap.NewDevelopment() + if err != nil { + os.Exit(1) + } + zap.ReplaceGlobals(logger) +} + +func NewReconciler(mgr manager.Manager, mongodbRepoUrl, mongodbImage, mongodbImageType, agentImage, versionUpgradeHookImage, readinessProbeImage string) *ReplicaSetReconciler { + mgrClient := mgr.GetClient() + secretWatcher := watch.New() + configMapWatcher := watch.New() + return &ReplicaSetReconciler{ + client: kubernetesClient.NewClient(mgrClient), + scheme: mgr.GetScheme(), + log: zap.S(), + secretWatcher: &secretWatcher, + configMapWatcher: &configMapWatcher, + + mongodbRepoUrl: mongodbRepoUrl, + mongodbImage: mongodbImage, + mongodbImageType: mongodbImageType, + agentImage: agentImage, + versionUpgradeHookImage: versionUpgradeHookImage, + readinessProbeImage: readinessProbeImage, + } +} + +// SetupWithManager sets up the controller with the Manager and configures the necessary watches. +func (r *ReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{MaxConcurrentReconciles: 3}). + For(&mdbv1.MongoDBCommunity{}, builder.WithPredicates(predicates.OnlyOnSpecChange())). + Watches(&corev1.Secret{}, r.secretWatcher). + Watches(&corev1.ConfigMap{}, r.configMapWatcher). + Owns(&appsv1.StatefulSet{}). + Complete(r) +} + +// ReplicaSetReconciler reconciles a MongoDB ReplicaSet +type ReplicaSetReconciler struct { + // This client, initialized using mgr.Client() above, is a split client + // that reads objects from the cache and writes to the apiserver + client kubernetesClient.Client + scheme *runtime.Scheme + log *zap.SugaredLogger + secretWatcher *watch.ResourceWatcher + configMapWatcher *watch.ResourceWatcher + + mongodbRepoUrl string + mongodbImage string + mongodbImageType string + agentImage string + versionUpgradeHookImage string + readinessProbeImage string +} + +// +kubebuilder:rbac:groups=mongodbcommunity.mongodb.com,resources=mongodbcommunity,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=mongodbcommunity.mongodb.com,resources=mongodbcommunity/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=mongodbcommunity.mongodb.com,resources=mongodbcommunity/finalizers,verbs=update +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list + +// Reconcile reads that state of the cluster for a MongoDB object and makes changes based on the state read +// and what is in the MongoDB.Spec +// Note: +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r ReplicaSetReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + + // TODO: generalize preparation for resource + // Fetch the MongoDB instance + mdb := mdbv1.MongoDBCommunity{} + err := r.client.Get(ctx, request.NamespacedName, &mdb) + if err != nil { + if apiErrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return result.OK() + } + r.log.Errorf("Error reconciling MongoDB resource: %s", err) + // Error reading the object - requeue the request. + return result.Failed() + } + + r.log = zap.S().With("ReplicaSet", request.NamespacedName) + r.log.Infof("Reconciling MongoDB") + + r.log.Debug("Validating MongoDB.Spec") + lastAppliedSpec, err := r.validateSpec(mdb) + if err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("error validating new Spec: %s", err)). + withFailedPhase()) + } + + r.log.Debug("Ensuring the service exists") + if err := r.ensureService(ctx, mdb); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring the service (members) exists: %s", err)). + withFailedPhase()) + } + + isTLSValid, err := r.validateTLSConfig(ctx, mdb) + if err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error validating TLS config: %s", err)). + withFailedPhase()) + } + + if !isTLSValid { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Info, "TLS config is not yet valid, retrying in 10 seconds"). + withPendingPhase(10)) + } + + if err := r.ensureTLSResources(ctx, mdb); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring TLS resources: %s", err)). + withFailedPhase()) + } + + if err := r.ensurePrometheusTLSResources(ctx, mdb); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring TLS resources: %s", err)). + withFailedPhase()) + } + + if err := r.ensureUserResources(ctx, mdb); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring User config: %s", err)). + withFailedPhase()) + } + + ready, err := r.deployMongoDBReplicaSet(ctx, mdb, lastAppliedSpec) + if err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error deploying MongoDB ReplicaSet: %s", err)). + withFailedPhase()) + } + + if !ready { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Info, "ReplicaSet is not yet ready, retrying in 10 seconds"). + withPendingPhase(10)) + } + + r.log.Debug("Resetting StatefulSet UpdateStrategy to RollingUpdate") + if err := statefulset.ResetUpdateStrategy(ctx, &mdb, r.client); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error resetting StatefulSet UpdateStrategyType: %s", err)). + withFailedPhase()) + } + + if mdb.IsStillScaling() { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). + withMessage(Info, fmt.Sprintf("Performing scaling operation, currentMembers=%d, desiredMembers=%d", + mdb.CurrentReplicas(), mdb.DesiredReplicas())). + withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). + withStatefulSetArbiters(mdb.StatefulSetArbitersThisReconciliation()). + withMongoDBArbiters(mdb.AutomationConfigArbitersThisReconciliation()). + withPendingPhase(10)) + } + + res, err := status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMongoURI(mdb.MongoURI(os.Getenv(clusterDomain))). // nolint:forbidigo + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). + withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). + withStatefulSetArbiters(mdb.StatefulSetArbitersThisReconciliation()). + withMongoDBArbiters(mdb.AutomationConfigArbitersThisReconciliation()). + withMessage(None, ""). + withRunningPhase(). + withVersion(mdb.GetMongoDBVersion())) + if err != nil { + r.log.Errorf("Error updating the status of the MongoDB resource: %s", err) + return res, err + } + + if err := r.updateConnectionStringSecrets(ctx, mdb, os.Getenv(clusterDomain)); err != nil { // nolint:forbidigo + r.log.Errorf("Could not update connection string secrets: %s", err) + } + + if lastAppliedSpec != nil { + r.cleanupScramSecrets(ctx, mdb.Spec, *lastAppliedSpec, mdb.Namespace) + r.cleanupPemSecret(ctx, mdb.Spec, *lastAppliedSpec, mdb.Namespace) + r.cleanupConnectionStringSecrets(ctx, mdb.Spec, *lastAppliedSpec, mdb.Namespace, mdb.Name) + } + + if err := r.updateLastSuccessfulConfiguration(ctx, mdb); err != nil { + r.log.Errorf("Could not save current spec as an annotation: %s", err) + } + + if res.RequeueAfter > 0 || res.Requeue { + r.log.Info("Requeuing reconciliation") + return res, nil + } + + r.log.Infof("Successfully finished reconciliation, MongoDB.Spec: %+v, MongoDB.Status: %+v", mdb.Spec, mdb.Status) + return res, err +} + +// updateLastSuccessfulConfiguration annotates the MongoDBCommunity resource with the latest configuration +func (r *ReplicaSetReconciler) updateLastSuccessfulConfiguration(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { + currentSpec, err := json.Marshal(mdb.Spec) + if err != nil { + return err + } + + specAnnotations := map[string]string{ + lastSuccessfulConfiguration: string(currentSpec), + // the last version will be duplicated in two annotations. + // This is needed to reuse the update strategy logic in enterprise + lastAppliedMongoDBVersion: mdb.Spec.Version, + } + return annotations.SetAnnotations(ctx, &mdb, specAnnotations, r.client) +} + +// ensureTLSResources creates any required TLS resources that the MongoDBCommunity +// requires for TLS configuration. +func (r *ReplicaSetReconciler) ensureTLSResources(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { + if !mdb.Spec.Security.TLS.Enabled { + return nil + } + // the TLS secret needs to be created beforehand, as both the StatefulSet and AutomationConfig + // require the contents. + if mdb.Spec.Security.TLS.Enabled { + r.log.Infof("TLS is enabled, creating/updating CA secret") + if err := ensureCASecret(ctx, r.client, r.client, r.client, mdb); err != nil { + return fmt.Errorf("could not ensure CA secret: %s", err) + } + r.log.Infof("TLS is enabled, creating/updating TLS secret") + if err := ensureTLSSecret(ctx, r.client, mdb); err != nil { + return fmt.Errorf("could not ensure TLS secret: %s", err) + } + if mdb.Spec.IsAgentX509() { + r.log.Infof("Agent X509 authentication is enabled, creating/updating agent certificate secret") + if err := ensureAgentCertSecret(ctx, r.client, mdb); err != nil { + return fmt.Errorf("could not ensure Agent Certificate secret: %s", err) + } + } + } + return nil +} + +// ensurePrometheusTLSResources creates any required TLS resources that the MongoDBCommunity +// requires for TLS configuration. +func (r *ReplicaSetReconciler) ensurePrometheusTLSResources(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { + if mdb.Spec.Prometheus == nil || mdb.Spec.Prometheus.TLSSecretRef.Name == "" { + return nil + } + + // the TLS secret needs to be created beforehand, as both the StatefulSet and AutomationConfig + // require the contents. + r.log.Infof("Prometheus TLS is enabled, creating/updating TLS secret") + if err := ensurePrometheusTLSSecret(ctx, r.client, mdb); err != nil { + return fmt.Errorf("could not ensure TLS secret: %s", err) + } + + return nil +} + +// deployStatefulSet deploys the backing StatefulSet of the MongoDBCommunity resource. +// +// When `Spec.Arbiters` > 0, a second StatefulSet will be created, with the amount +// of Pods corresponding to the amount of expected arbiters. +// +// The returned boolean indicates that the StatefulSet is ready. +func (r *ReplicaSetReconciler) deployStatefulSet(ctx context.Context, mdb mdbv1.MongoDBCommunity) (bool, error) { + r.log.Info("Creating/Updating StatefulSet") + if err := r.createOrUpdateStatefulSet(ctx, mdb, false); err != nil { + return false, fmt.Errorf("error creating/updating StatefulSet: %s", err) + } + + r.log.Info("Creating/Updating StatefulSet for Arbiters") + if err := r.createOrUpdateStatefulSet(ctx, mdb, true); err != nil { + return false, fmt.Errorf("error creating/updating StatefulSet: %s", err) + } + + currentSts, err := r.client.GetStatefulSet(ctx, mdb.NamespacedName()) + if err != nil { + return false, fmt.Errorf("error getting StatefulSet: %s", err) + } + + r.log.Debugf("Ensuring StatefulSet is ready, with type: %s", mdb.GetUpdateStrategyType()) + + isReady := statefulset.IsReady(currentSts, mdb.StatefulSetReplicasThisReconciliation()) + + return isReady || currentSts.Spec.UpdateStrategy.Type == appsv1.OnDeleteStatefulSetStrategyType, nil +} + +// deployAutomationConfig deploys the AutomationConfig for the MongoDBCommunity resource. +// The returned boolean indicates whether or not that Agents have all reached goal state. +func (r *ReplicaSetReconciler) deployAutomationConfig(ctx context.Context, mdb mdbv1.MongoDBCommunity, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) (bool, error) { + r.log.Infof("Creating/Updating AutomationConfig") + + sts, err := r.client.GetStatefulSet(ctx, mdb.NamespacedName()) + if err != nil && !apiErrors.IsNotFound(err) { + return false, fmt.Errorf("failed to get StatefulSet: %s", err) + } + + ac, err := r.ensureAutomationConfig(mdb, ctx, lastAppliedSpec) + if err != nil { + return false, fmt.Errorf("failed to ensure AutomationConfig: %s", err) + } + + // the StatefulSet has not yet been created, so the next stage of reconciliation will be + // creating the StatefulSet and ensuring it reaches the Running phase. + if apiErrors.IsNotFound(err) { + return true, nil + } + + if isPreReadinessInitContainerStatefulSet(sts) { + r.log.Debugf("The existing StatefulSet did not have the readiness probe init container, skipping pod annotation check.") + return true, nil + } + + r.log.Debugf("Waiting for agents to reach version %d", ac.Version) + // Note: we pass in the expected number of replicas this reconciliation as we scale members one at a time. If we were + // to pass in the final member count, we would be waiting for agents that do not exist yet to be ready. + ready, err := agent.AllReachedGoalState(ctx, sts, r.client, mdb.StatefulSetReplicasThisReconciliation(), ac.Version, r.log) + if err != nil { + return false, fmt.Errorf("failed to ensure agents have reached goal state: %s", err) + } + + return ready, nil +} + +// shouldRunInOrder returns true if the order of execution of the AutomationConfig & StatefulSet +// functions should be sequential or not. A value of false indicates they will run in reversed order. +func (r *ReplicaSetReconciler) shouldRunInOrder(ctx context.Context, mdb mdbv1.MongoDBCommunity) bool { + // The only case when we push the StatefulSet first is when we are ensuring TLS for the already existing ReplicaSet + sts, err := r.client.GetStatefulSet(ctx, mdb.NamespacedName()) + if !statefulset.IsReady(sts, mdb.StatefulSetReplicasThisReconciliation()) && mdb.Spec.Security.TLS.Enabled { + r.log.Debug("Enabling TLS on a deployment with a StatefulSet that is not Ready, the Automation Config must be updated first") + return true + } + if err == nil && mdb.Spec.Security.TLS.Enabled { + r.log.Debug("Enabling TLS on an existing deployment, the StatefulSet must be updated first") + return false + } + + // if we are scaling up, we need to make sure the StatefulSet is scaled up first. + if scale.IsScalingUp(&mdb) || mdb.CurrentArbiters() < mdb.DesiredArbiters() { + if scale.HasZeroReplicas(&mdb) { + r.log.Debug("Scaling up the ReplicaSet when there is no replicas, the Automation Config must be updated first") + return true + } + r.log.Debug("Scaling up the ReplicaSet, the StatefulSet must be updated first") + return false + } + + if scale.IsScalingDown(&mdb) { + r.log.Debug("Scaling down the ReplicaSet, the Automation Config must be updated first") + return true + } + + // when we change version, we need the StatefulSet images to be updated first, then the agent can get to goal + // state on the new version. + if mdb.IsChangingVersion() { + r.log.Debug("Version change in progress, the StatefulSet must be updated first") + return false + } + + return true +} + +// deployMongoDBReplicaSet will ensure that both the AutomationConfig secret and backing StatefulSet +// have been successfully created. A boolean is returned indicating if the process is complete +// and an error if there was one. +func (r *ReplicaSetReconciler) deployMongoDBReplicaSet(ctx context.Context, mdb mdbv1.MongoDBCommunity, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) (bool, error) { + return functions.RunSequentially(r.shouldRunInOrder(ctx, mdb), + func() (bool, error) { + return r.deployAutomationConfig(ctx, mdb, lastAppliedSpec) + }, + func() (bool, error) { + return r.deployStatefulSet(ctx, mdb) + }) +} + +// ensureService creates a Service unless it already exists. +// +// The Service definition is built from the `mdb` resource. If `isArbiter` is set to true, the Service +// will be created for the arbiters Statefulset. +func (r *ReplicaSetReconciler) ensureService(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { + processPortManager, err := r.createProcessPortManager(ctx, mdb) + if err != nil { + return err + } + + svc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: mdb.ServiceName(), Namespace: mdb.Namespace}} + op, err := controllerutil.CreateOrUpdate(ctx, r.client, svc, func() error { + resourceVersion := svc.ResourceVersion // Save resourceVersion for later + *svc = r.buildService(mdb, processPortManager) + svc.ResourceVersion = resourceVersion + return nil + }) + if err != nil { + r.log.Errorf("Could not create or patch the service: %s", err) + return nil + } + + r.log.Infow("Create/Update operation succeeded", "operation", op) + + return err +} + +// createProcessPortManager is a helper method for creating new ReplicaSetPortManager. +// ReplicaSetPortManager needs current automation config and current pod state and the code for getting them +// was extracted here as it is used in ensureService and buildAutomationConfig. +func (r *ReplicaSetReconciler) createProcessPortManager(ctx context.Context, mdb mdbv1.MongoDBCommunity) (*agent.ReplicaSetPortManager, error) { + currentAC, err := automationconfig.ReadFromSecret(ctx, r.client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + if err != nil { + return nil, fmt.Errorf("could not read existing automation config: %s", err) + } + + currentPodStates, err := agent.GetAllDesiredMembersAndArbitersPodState(ctx, mdb.NamespacedName(), r.client, mdb.StatefulSetReplicasThisReconciliation(), mdb.StatefulSetArbitersThisReconciliation(), currentAC.Version, r.log) + if err != nil { + return nil, fmt.Errorf("cannot get all pods goal state: %w", err) + } + + return agent.NewReplicaSetPortManager(r.log, mdb.Spec.AdditionalMongodConfig.GetDBPort(), currentPodStates, currentAC.Processes), nil +} + +func (r *ReplicaSetReconciler) createOrUpdateStatefulSet(ctx context.Context, mdb mdbv1.MongoDBCommunity, isArbiter bool) error { + set := appsv1.StatefulSet{} + + name := mdb.NamespacedName() + if isArbiter { + name = mdb.ArbiterNamespacedName() + } + + err := r.client.Get(ctx, name, &set) + err = k8sClient.IgnoreNotFound(err) + if err != nil { + return fmt.Errorf("error getting StatefulSet: %s", err) + } + + mongodbImage := getMongoDBImage(r.mongodbRepoUrl, r.mongodbImage, r.mongodbImageType, mdb.GetMongoDBVersion()) + buildStatefulSetModificationFunction(mdb, mongodbImage, r.agentImage, r.versionUpgradeHookImage, r.readinessProbeImage)(&set) + if isArbiter { + buildArbitersModificationFunction(mdb)(&set) + } + + if _, err = statefulset.CreateOrUpdate(ctx, r.client, set); err != nil { + return fmt.Errorf("error creating/updating StatefulSet: %s", err) + } + return nil +} + +// ensureAutomationConfig makes sure the AutomationConfig secret has been successfully created. The automation config +// that was updated/created is returned. +func (r ReplicaSetReconciler) ensureAutomationConfig(mdb mdbv1.MongoDBCommunity, ctx context.Context, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) (automationconfig.AutomationConfig, error) { + ac, err := r.buildAutomationConfig(ctx, mdb, lastAppliedSpec) + if err != nil { + return automationconfig.AutomationConfig{}, fmt.Errorf("could not build automation config: %s", err) + } + + return automationconfig.EnsureSecret(ctx, r.client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, mdb.GetOwnerReferences(), ac) +} + +func buildAutomationConfig(mdb mdbv1.MongoDBCommunity, isEnterprise bool, auth automationconfig.Auth, currentAc automationconfig.AutomationConfig, modifications ...automationconfig.Modification) (automationconfig.AutomationConfig, error) { + domain := getDomain(mdb.ServiceName(), mdb.Namespace, os.Getenv(clusterDomain)) // nolint:forbidigo + arbiterDomain := getDomain(mdb.ServiceName(), mdb.Namespace, os.Getenv(clusterDomain)) // nolint:forbidigo + + zap.S().Debugw("AutomationConfigMembersThisReconciliation", "mdb.AutomationConfigMembersThisReconciliation()", mdb.AutomationConfigMembersThisReconciliation()) + + arbitersCount := mdb.AutomationConfigArbitersThisReconciliation() + if mdb.AutomationConfigMembersThisReconciliation() < mdb.Spec.Members { + // Have not reached desired amount of members yet, should not scale arbiters + arbitersCount = mdb.Status.CurrentMongoDBArbiters + } + + var acOverrideSettings map[string]interface{} + var acReplicaSetId *string + if mdb.Spec.AutomationConfigOverride != nil { + acOverrideSettings = mdb.Spec.AutomationConfigOverride.ReplicaSet.Settings.Object + acReplicaSetId = mdb.Spec.AutomationConfigOverride.ReplicaSet.Id + } + + return automationconfig.NewBuilder(). + IsEnterprise(isEnterprise). + SetTopology(automationconfig.ReplicaSetTopology). + SetName(mdb.Name). + SetDomain(domain). + SetArbiterDomain(arbiterDomain). + SetMembers(mdb.AutomationConfigMembersThisReconciliation()). + SetArbiters(arbitersCount). + SetReplicaSetHorizons(mdb.Spec.ReplicaSetHorizons). + SetPreviousAutomationConfig(currentAc). + SetMongoDBVersion(mdb.Spec.Version). + SetFCV(mdb.Spec.FeatureCompatibilityVersion). + SetOptions(automationconfig.Options{DownloadBase: "/var/lib/mongodb-mms-automation"}). + SetAuth(auth). + SetReplicaSetId(acReplicaSetId). + SetSettings(acOverrideSettings). + SetMemberOptions(mdb.Spec.MemberConfig). + SetDataDir(mdb.GetMongodConfiguration().GetDBDataDir()). + AddModifications(getMongodConfigModification(mdb)). + AddModifications(modifications...). + AddProcessModification(func(_ int, p *automationconfig.Process) { + automationconfig.ConfigureAgentConfiguration(mdb.Spec.AgentConfiguration.SystemLog, mdb.Spec.AgentConfiguration.LogRotate, mdb.Spec.AgentConfiguration.AuditLogRotate, p) + }). + Build() +} + +func guessEnterprise(mdb mdbv1.MongoDBCommunity, mongodbImage string) bool { + overrideAssumption, err := strconv.ParseBool(os.Getenv(construct.MongoDBAssumeEnterpriseEnv)) // nolint:forbidigo + if err == nil { + return overrideAssumption + } + + var overriddenImage string + containers := mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.Template.Spec.Containers + if len(containers) > 0 { + for _, c := range containers { + if c.Name == construct.MongodbName { + if len(c.Image) > 0 { + overriddenImage = c.Image + } + } + } + } + if len(overriddenImage) > 0 { + return strings.Contains(overriddenImage, construct.OfficialMongodbEnterpriseServerImageName) + } + return mongodbImage == construct.OfficialMongodbEnterpriseServerImageName +} + +// buildService creates a Service that will be used for the Replica Set StatefulSet +// that allows all the members of the STS to see each other. +func (r *ReplicaSetReconciler) buildService(mdb mdbv1.MongoDBCommunity, portManager *agent.ReplicaSetPortManager) corev1.Service { + label := make(map[string]string) + name := mdb.ServiceName() + + label["app"] = name + + serviceBuilder := service.Builder(). + SetName(name). + SetNamespace(mdb.Namespace). + SetSelector(label). + SetLabels(label). + SetServiceType(corev1.ServiceTypeClusterIP). + SetClusterIP("None"). + SetPublishNotReadyAddresses(true). + SetOwnerReferences(mdb.GetOwnerReferences()) + + for _, servicePort := range portManager.GetServicePorts() { + tmpServicePort := servicePort + serviceBuilder.AddPort(&tmpServicePort) + } + + serviceBuilder.AddPort(prometheusPort(mdb)) + + return serviceBuilder.Build() +} + +// validateSpec checks if the MongoDB resource Spec is valid. +// If there has not yet been a successful configuration, the function runs the initial Spec validations. Otherwise, +// it checks that the attempted Spec is valid in relation to the Spec that resulted from that last successful configuration. +// The validation also returns the lastSuccessFulConfiguration Spec as mdbv1.MongoDBCommunitySpec. +func (r ReplicaSetReconciler) validateSpec(mdb mdbv1.MongoDBCommunity) (*mdbv1.MongoDBCommunitySpec, error) { + lastSuccessfulConfigurationSaved, ok := mdb.Annotations[lastSuccessfulConfiguration] + if !ok { + // First version of Spec + return nil, validation.ValidateInitialSpec(mdb, r.log) + } + + lastSpec := mdbv1.MongoDBCommunitySpec{} + err := json.Unmarshal([]byte(lastSuccessfulConfigurationSaved), &lastSpec) + if err != nil { + return &lastSpec, err + } + + return &lastSpec, validation.ValidateUpdate(mdb, lastSpec, r.log) +} + +func getCustomRolesModification(mdb mdbv1.MongoDBCommunity) (automationconfig.Modification, error) { + roles := mdb.Spec.Security.Roles + if roles == nil { + return automationconfig.NOOP(), nil + } + + return func(config *automationconfig.AutomationConfig) { + config.Roles = mdbv1.ConvertCustomRolesToAutomationConfigCustomRole(roles) + }, nil +} + +func (r ReplicaSetReconciler) buildAutomationConfig(ctx context.Context, mdb mdbv1.MongoDBCommunity, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) (automationconfig.AutomationConfig, error) { + tlsModification, err := getTLSConfigModification(ctx, r.client, r.client, mdb) + if err != nil { + return automationconfig.AutomationConfig{}, fmt.Errorf("could not configure TLS modification: %s", err) + } + + customRolesModification, err := getCustomRolesModification(mdb) + if err != nil { + return automationconfig.AutomationConfig{}, fmt.Errorf("could not configure custom roles: %s", err) + } + + currentAC, err := automationconfig.ReadFromSecret(ctx, r.client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + if err != nil { + return automationconfig.AutomationConfig{}, fmt.Errorf("could not read existing automation config: %s", err) + } + + auth := automationconfig.Auth{} + if err := authentication.Enable(ctx, &auth, r.client, &mdb, mdb.AgentCertificateSecretNamespacedName()); err != nil { + return automationconfig.AutomationConfig{}, err + } + + if lastAppliedSpec != nil { + authentication.AddRemovedUsers(&auth, mdb, lastAppliedSpec) + } + + prometheusModification := automationconfig.NOOP() + if mdb.Spec.Prometheus != nil { + secretNamespacedName := types.NamespacedName{Name: mdb.Spec.Prometheus.PasswordSecretRef.Name, Namespace: mdb.Namespace} + r.secretWatcher.Watch(ctx, secretNamespacedName, mdb.NamespacedName()) + + prometheusModification, err = getPrometheusModification(ctx, r.client, mdb) + if err != nil { + return automationconfig.AutomationConfig{}, fmt.Errorf("could not enable TLS on Prometheus endpoint: %s", err) + } + } + + if mdb.Spec.IsAgentX509() { + r.secretWatcher.Watch(ctx, mdb.AgentCertificateSecretNamespacedName(), mdb.NamespacedName()) + r.secretWatcher.Watch(ctx, mdb.AgentCertificatePemSecretNamespacedName(), mdb.NamespacedName()) + } + + processPortManager, err := r.createProcessPortManager(ctx, mdb) + if err != nil { + return automationconfig.AutomationConfig{}, err + } + + automationConfig, err := buildAutomationConfig( + mdb, + guessEnterprise(mdb, r.mongodbImage), + auth, + currentAC, + tlsModification, + customRolesModification, + prometheusModification, + processPortManager.GetPortsModification(), + ) + + if err != nil { + return automationconfig.AutomationConfig{}, fmt.Errorf("could not create an automation config: %s", err) + } + + if mdb.Spec.AutomationConfigOverride != nil { + automationConfig = merge.AutomationConfigs(automationConfig, OverrideToAutomationConfig(*mdb.Spec.AutomationConfigOverride)) + } + + return automationConfig, nil +} + +// OverrideToAutomationConfig turns an automation config override from the resource spec into an automation config +// which can be used to merge. +func OverrideToAutomationConfig(override mdbv1.AutomationConfigOverride) automationconfig.AutomationConfig { + var processes []automationconfig.Process + for _, o := range override.Processes { + p := automationconfig.Process{ + Name: o.Name, + Disabled: o.Disabled, + LogRotate: automationconfig.ConvertCrdLogRotateToAC(o.LogRotate), + } + processes = append(processes, p) + } + + return automationconfig.AutomationConfig{ + Processes: processes, + } +} + +// getMongodConfigModification will merge the additional configuration in the CRD +// into the configuration set up by the operator. +func getMongodConfigModification(mdb mdbv1.MongoDBCommunity) automationconfig.Modification { + return func(ac *automationconfig.AutomationConfig) { + for i := range ac.Processes { + // Mergo requires both objects to have the same type + // TODO: handle this error gracefully, we may need to add an error as second argument for all modification functions + _ = mergo.Merge(&ac.Processes[i].Args26, objx.New(mdb.Spec.AdditionalMongodConfig.Object), mergo.WithOverride) + } + } +} + +// buildStatefulSetModificationFunction takes a MongoDB resource and converts it into +// the corresponding stateful set +func buildStatefulSetModificationFunction(mdb mdbv1.MongoDBCommunity, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage string) statefulset.Modification { + commonModification := construct.BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage, true) + return statefulset.Apply( + commonModification, + statefulset.WithOwnerReference(mdb.GetOwnerReferences()), + statefulset.WithPodSpecTemplate( + podtemplatespec.Apply( + buildTLSPodSpecModification(mdb), + buildTLSPrometheus(mdb), + buildAgentX509(mdb), + ), + ), + + statefulset.WithCustomSpecs(mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec), + statefulset.WithObjectMetadata( + mdb.Spec.StatefulSetConfiguration.MetadataWrapper.Labels, + mdb.Spec.StatefulSetConfiguration.MetadataWrapper.Annotations, + ), + ) +} + +func buildArbitersModificationFunction(mdb mdbv1.MongoDBCommunity) statefulset.Modification { + return statefulset.Apply( + statefulset.WithReplicas(mdb.StatefulSetArbitersThisReconciliation()), + statefulset.WithServiceName(mdb.ServiceName()), + statefulset.WithName(mdb.ArbiterNamespacedName().Name), + ) +} + +func getDomain(service, namespace, clusterName string) string { + if clusterName == "" { + clusterName = "cluster.local" + } + return fmt.Sprintf("%s.%s.svc.%s", service, namespace, clusterName) +} + +// isPreReadinessInitContainerStatefulSet determines if the existing StatefulSet has been configured with the readiness probe init container. +// if this is not the case, then we should ensure to skip past the annotation check otherwise the pods will remain in pending state forever. +func isPreReadinessInitContainerStatefulSet(sts appsv1.StatefulSet) bool { + return container.GetByName(construct.ReadinessProbeContainerName, sts.Spec.Template.Spec.InitContainers) == nil +} + +func getMongoDBImage(repoUrl, mongodbImage, mongodbImageType, version string) string { + if strings.HasSuffix(repoUrl, "/") { + repoUrl = strings.TrimRight(repoUrl, "/") + } + mongoImageName := mongodbImage + for _, officialUrl := range construct.OfficialMongodbRepoUrls { + if repoUrl == officialUrl { + return fmt.Sprintf("%s/%s:%s-%s", repoUrl, mongoImageName, version, mongodbImageType) + } + } + + // This is the old images backwards compatibility code path. + return fmt.Sprintf("%s/%s:%s", repoUrl, mongoImageName, version) +} diff --git a/controllers/replicaset_controller_test.go b/controllers/replicaset_controller_test.go new file mode 100644 index 000000000..d7f2eb8da --- /dev/null +++ b/controllers/replicaset_controller_test.go @@ -0,0 +1,1513 @@ +package controllers + +import ( + "context" + "encoding/json" + "fmt" + "os" + "reflect" + "testing" + "time" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/x509" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" + "github.com/stretchr/testify/require" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/yaml" + + "github.com/stretchr/objx" + + k8sClient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + + "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/probes" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/resourcerequirements" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + AgentImage = "fake-agentImage" +) + +func newTestReplicaSet() mdbv1.MongoDBCommunity { + return mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-rs", + Namespace: "my-ns", + Annotations: map[string]string{}, + }, + Spec: mdbv1.MongoDBCommunitySpec{ + Members: 3, + Version: "6.0.5", + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + }, + }, + } +} + +func newTestReplicaSetWithSystemLogAndLogRotate() mdbv1.MongoDBCommunity { + return mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-rs", + Namespace: "my-ns", + Annotations: map[string]string{}, + }, + Spec: mdbv1.MongoDBCommunitySpec{ + Members: 3, + Version: "6.0.5", + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + }, + AgentConfiguration: mdbv1.AgentConfiguration{ + LogRotate: &automationconfig.CrdLogRotate{ + SizeThresholdMB: "1", + }, + AuditLogRotate: &automationconfig.CrdLogRotate{ + SizeThresholdMB: "1", + }, + SystemLog: &automationconfig.SystemLog{ + Destination: automationconfig.File, + Path: "/tmp/test", + }, + }, + }, + } +} + +func newScramReplicaSet(users ...mdbv1.MongoDBUser) mdbv1.MongoDBCommunity { + return mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-rs", + Namespace: "my-ns", + Annotations: map[string]string{}, + }, + Spec: mdbv1.MongoDBCommunitySpec{ + Users: users, + Members: 3, + Version: "4.2.2", + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + }, + }, + } +} + +func newTestReplicaSetWithTLS() mdbv1.MongoDBCommunity { + return newTestReplicaSetWithTLSCaCertificateReferences(&corev1.LocalObjectReference{ + Name: "caConfigMap", + }, + &corev1.LocalObjectReference{ + Name: "certificateKeySecret", + }) +} + +func newTestReplicaSetWithTLSCaCertificateReferences(caConfigMap, caCertificateSecret *corev1.LocalObjectReference) mdbv1.MongoDBCommunity { + return mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-rs", + Namespace: "my-ns", + Annotations: map[string]string{}, + }, + Spec: mdbv1.MongoDBCommunitySpec{ + Members: 3, + Version: "4.2.2", + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + TLS: mdbv1.TLS{ + Enabled: true, + CaConfigMap: caConfigMap, + CaCertificateSecret: caCertificateSecret, + CertificateKeySecret: corev1.LocalObjectReference{ + Name: "certificateKeySecret", + }, + }, + }, + }, + } +} + +func TestKubernetesResources_AreCreated(t *testing.T) { + ctx := context.Background() + // TODO: Create builder/yaml fixture of some type to construct MDB objects for unit tests + mdb := newTestReplicaSet() + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + s := corev1.Secret{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, &s) + assert.NoError(t, err) + assert.Equal(t, mdb.Namespace, s.Namespace) + assert.Equal(t, mdb.AutomationConfigSecretName(), s.Name) + assert.Contains(t, s.Data, automationconfig.ConfigKey) + assert.NotEmpty(t, s.Data[automationconfig.ConfigKey]) +} + +func TestStatefulSet_IsCorrectlyConfigured(t *testing.T) { + ctx := context.Background() + + mdb := newTestReplicaSet() + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "docker.io/mongodb", "mongodb-community-server", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts := appsv1.StatefulSet{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + + assert.Len(t, sts.Spec.Template.Spec.Containers, 2) + + agentContainer := sts.Spec.Template.Spec.Containers[1] + assert.Equal(t, construct.AgentName, agentContainer.Name) + assert.Equal(t, AgentImage, agentContainer.Image) + expectedProbe := probes.New(construct.DefaultReadiness()) + assert.True(t, reflect.DeepEqual(&expectedProbe, agentContainer.ReadinessProbe)) + + mongodbContainer := sts.Spec.Template.Spec.Containers[0] + assert.Equal(t, construct.MongodbName, mongodbContainer.Name) + assert.Equal(t, "docker.io/mongodb/mongodb-community-server:6.0.5-ubi8", mongodbContainer.Image) + + assert.Equal(t, resourcerequirements.Defaults(), agentContainer.Resources) + + acVolume, err := getVolumeByName(sts, "automation-config") + assert.NoError(t, err) + assert.NotNil(t, acVolume.Secret, "automation config should be stored in a secret!") + assert.Nil(t, acVolume.ConfigMap, "automation config should be stored in a secret, not a config map!") +} + +func TestGuessEnterprise(t *testing.T) { + type testConfig struct { + setArgs func(t *testing.T) + mdb mdbv1.MongoDBCommunity + mongodbImage string + expectedEnterprise bool + } + tests := map[string]testConfig{ + "No override and Community image": { + setArgs: func(t *testing.T) {}, + mdb: mdbv1.MongoDBCommunity{}, + mongodbImage: "mongodb-community-server", + expectedEnterprise: false, + }, + "No override and Enterprise image": { + setArgs: func(t *testing.T) {}, + mdb: mdbv1.MongoDBCommunity{}, + mongodbImage: "mongodb-enterprise-server", + expectedEnterprise: true, + }, + "Assuming enterprise manually": { + setArgs: func(t *testing.T) { + t.Setenv(construct.MongoDBAssumeEnterpriseEnv, "true") + }, + mdb: mdbv1.MongoDBCommunity{}, + mongodbImage: "mongodb-community-server", + expectedEnterprise: true, + }, + "Assuming community manually": { + setArgs: func(t *testing.T) { + t.Setenv(construct.MongoDBAssumeEnterpriseEnv, "false") + }, + mdb: mdbv1.MongoDBCommunity{}, + mongodbImage: "mongodb-enterprise-server", + expectedEnterprise: false, + }, + // This one is a corner case. We don't expect users to fall here very often as there are + // dedicated variables to control this type of behavior. + "Enterprise with StatefulSet override": { + setArgs: func(t *testing.T) {}, + mdb: mdbv1.MongoDBCommunity{ + Spec: mdbv1.MongoDBCommunitySpec{ + StatefulSetConfiguration: mdbv1.StatefulSetConfiguration{ + SpecWrapper: mdbv1.StatefulSetSpecWrapper{ + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: construct.MongodbName, + Image: "another_repo.com/another_org/mongodb-enterprise-server", + }, + }, + }, + }, + }, + }, + }, + }, + }, + mongodbImage: "mongodb-community-server", + expectedEnterprise: true, + }, + "Enterprise with StatefulSet override to Community": { + setArgs: func(t *testing.T) {}, + mdb: mdbv1.MongoDBCommunity{ + Spec: mdbv1.MongoDBCommunitySpec{ + StatefulSetConfiguration: mdbv1.StatefulSetConfiguration{ + SpecWrapper: mdbv1.StatefulSetSpecWrapper{ + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: construct.MongodbName, + Image: "another_repo.com/another_org/mongodb-community-server", + }, + }, + }, + }, + }, + }, + }, + }, + }, + mongodbImage: "mongodb-enterprise-server", + expectedEnterprise: false, + }, + } + for testName := range tests { + t.Run(testName, func(t *testing.T) { + testConfig := tests[testName] + testConfig.setArgs(t) + calculatedEnterprise := guessEnterprise(testConfig.mdb, testConfig.mongodbImage) + assert.Equal(t, testConfig.expectedEnterprise, calculatedEnterprise) + }) + } +} + +func getVolumeByName(sts appsv1.StatefulSet, volumeName string) (corev1.Volume, error) { + for _, v := range sts.Spec.Template.Spec.Volumes { + if v.Name == volumeName { + return v, nil + } + } + return corev1.Volume{}, fmt.Errorf("volume with name %s, not found", volumeName) +} + +func TestChangingVersion_ResultsInRollingUpdateStrategyType(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + mgr := client.NewManager(ctx, &mdb) + mgrClient := mgr.GetClient() + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) + assertReconciliationSuccessful(t, res, err) + + // fetch updated resource after first reconciliation + _ = mgrClient.Get(ctx, mdb.NamespacedName(), &mdb) + + sts := appsv1.StatefulSet{} + err = mgrClient.Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) + + mdbRef := &mdb + mdbRef.Spec.Version = "4.2.3" + + _ = mgrClient.Update(ctx, &mdb) + + // agents start the upgrade, they are not all ready + sts.Status.UpdatedReplicas = 1 + sts.Status.ReadyReplicas = 2 + err = mgrClient.Update(ctx, &sts) + assert.NoError(t, err) + _ = mgrClient.Get(ctx, mdb.NamespacedName(), &sts) + + // reconcilliation is successful + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts = appsv1.StatefulSet{} + err = mgrClient.Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + + assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type, + "The StatefulSet should have be re-configured to use RollingUpdates after it reached the ready state") +} + +func TestBuildStatefulSet_ConfiguresUpdateStrategyCorrectly(t *testing.T) { + t.Run("On No Version Change, Same Version", func(t *testing.T) { + mdb := newTestReplicaSet() + mdb.Spec.Version = "4.0.0" + mdb.Annotations[annotations.LastAppliedMongoDBVersion] = "4.0.0" + sts := appsv1.StatefulSet{} + buildStatefulSetModificationFunction(mdb, "fake-mongodbImage", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage")(&sts) + assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) + }) + t.Run("On No Version Change, First Version", func(t *testing.T) { + mdb := newTestReplicaSet() + mdb.Spec.Version = "4.0.0" + delete(mdb.Annotations, annotations.LastAppliedMongoDBVersion) + sts := appsv1.StatefulSet{} + buildStatefulSetModificationFunction(mdb, "fake-mongodbImage", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage")(&sts) + assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) + }) + t.Run("On Version Change", func(t *testing.T) { + mdb := newTestReplicaSet() + + mdb.Spec.Version = "4.0.0" + + prevSpec := mdbv1.MongoDBCommunitySpec{ + Version: "4.2.0", + } + + bytes, err := json.Marshal(prevSpec) + assert.NoError(t, err) + + mdb.Annotations[annotations.LastAppliedMongoDBVersion] = string(bytes) + sts := appsv1.StatefulSet{} + buildStatefulSetModificationFunction(mdb, "fake-mongodbImage", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage")(&sts) + assert.Equal(t, appsv1.OnDeleteStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) + }) +} + +func TestService_isCorrectlyCreatedAndUpdated(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + svc := corev1.Service{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + assert.NoError(t, err) + assert.Equal(t, svc.Spec.Type, corev1.ServiceTypeClusterIP) + assert.Equal(t, svc.Spec.Selector["app"], mdb.ServiceName()) + assert.Len(t, svc.Spec.Ports, 1) + assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 27017, Name: "mongodb"}) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) +} + +func TestService_usesCustomMongodPortWhenSpecified(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + + mongodConfig := objx.New(map[string]interface{}{}) + mongodConfig.Set("net.port", 1000.) + mdb.Spec.AdditionalMongodConfig.Object = mongodConfig + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + svc := corev1.Service{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + assert.NoError(t, err) + assert.Equal(t, svc.Spec.Type, corev1.ServiceTypeClusterIP) + assert.Equal(t, svc.Spec.Selector["app"], mdb.ServiceName()) + assert.Len(t, svc.Spec.Ports, 1) + assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 1000, Name: "mongodb"}) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) +} + +func createOrUpdatePodsWithVersions(ctx context.Context, t *testing.T, c k8sClient.Client, name types.NamespacedName, versions []string) { + for i, version := range versions { + createPodWithAgentAnnotation(ctx, t, c, types.NamespacedName{ + Namespace: name.Namespace, + Name: fmt.Sprintf("%s-%d", name.Name, i), + }, version) + } +} + +func createPodWithAgentAnnotation(ctx context.Context, t *testing.T, c k8sClient.Client, name types.NamespacedName, versionStr string) { + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name.Name, + Namespace: name.Namespace, + Annotations: map[string]string{ + "agent.mongodb.com/version": versionStr, + }, + }, + } + + err := c.Create(ctx, &pod) + + if err != nil && apiErrors.IsAlreadyExists(err) { + err = c.Update(ctx, &pod) + assert.NoError(t, err) + } + + assert.NoError(t, err) +} + +func TestService_changesMongodPortOnRunningClusterWithArbiters(t *testing.T) { + ctx := context.Background() + mdb := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testuser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials", + }) + + namespacedName := mdb.NamespacedName() + arbiterNamespacedName := mdb.ArbiterNamespacedName() + + const oldPort = automationconfig.DefaultDBPort + const newPort = 8000 + + mgr := client.NewManager(ctx, &mdb) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + t.Run("Prepare cluster with arbiters and change port", func(t *testing.T) { + err := createUserPasswordSecret(ctx, mgr.Client, mdb, "password-secret-name", "pass") + assert.NoError(t, err) + + mdb.Spec.Arbiters = 1 + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) + assertReconciliationSuccessful(t, res, err) + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + oldPort: "mongodb", + }) + _ = assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 1) + + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"1", "1", "1"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"1"}) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) + assertReconciliationSuccessful(t, res, err) + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + oldPort: "mongodb", + }) + _ = assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 1) + assertStatefulsetReady(ctx, t, mgr, namespacedName, 3) + assertStatefulsetReady(ctx, t, mgr, arbiterNamespacedName, 1) + + mdb.Spec.AdditionalMongodConfig = mdbv1.NewMongodConfiguration() + mdb.Spec.AdditionalMongodConfig.SetDBPort(newPort) + + err = mgr.GetClient().Update(ctx, &mdb) + assert.NoError(t, err) + + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) + }) + + t.Run("Port should be changed only in the process #0", func(t *testing.T) { + // port changes should be performed one at a time + // should set port #0 to new one + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) + require.NoError(t, err) + assert.True(t, res.Requeue) + + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 2) + require.Len(t, currentAc.Processes, 4) + assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) + assert.Equal(t, oldPort, currentAc.Processes[1].GetPort()) + assert.Equal(t, oldPort, currentAc.Processes[2].GetPort()) + assert.Equal(t, oldPort, currentAc.Processes[3].GetPort()) + + // not all ports are changed, so there are still two ports in the service + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + oldPort: "mongodb", + newPort: "mongodb-new", + }) + + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) + }) + + t.Run("Ports should be changed in processes #0,#1", func(t *testing.T) { + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"2", "2", "2"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"2"}) + + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) + require.NoError(t, err) + assert.True(t, res.Requeue) + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 3) + require.Len(t, currentAc.Processes, 4) + assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[1].GetPort()) + assert.Equal(t, oldPort, currentAc.Processes[2].GetPort()) + assert.Equal(t, oldPort, currentAc.Processes[3].GetPort()) + + // not all ports are changed, so there are still two ports in the service + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + oldPort: "mongodb", + newPort: "mongodb-new", + }) + + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) + }) + + t.Run("Ports should be changed in processes #0,#1,#2", func(t *testing.T) { + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"3", "3", "3"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"3"}) + + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) + require.NoError(t, err) + assert.True(t, res.Requeue) + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 4) + require.Len(t, currentAc.Processes, 4) + assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[1].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[2].GetPort()) + assert.Equal(t, oldPort, currentAc.Processes[3].GetPort()) + + // not all ports are changed, so there are still two ports in the service + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + oldPort: "mongodb", + newPort: "mongodb-new", + }) + + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) + }) + + t.Run("Ports should be changed in all processes", func(t *testing.T) { + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"4", "4", "4"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"4"}) + + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assert.NoError(t, err) + assert.True(t, res.Requeue) + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 5) + require.Len(t, currentAc.Processes, 4) + assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[1].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[2].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[3].GetPort()) + + // all the ports are changed but there are still two service ports for old and new port until the next reconcile + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + oldPort: "mongodb", + newPort: "mongodb-new", + }) + + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) + }) + + t.Run("At the end there should be only new port in the service", func(t *testing.T) { + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"5", "5", "5"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"5"}) + + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) + assert.NoError(t, err) + // no need to requeue, port change is finished + assert.False(t, res.Requeue) + // there should not be any changes in config anymore + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 5) + require.Len(t, currentAc.Processes, 4) + assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[1].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[2].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[3].GetPort()) + + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + newPort: "mongodb", + }) + + // only at the end, when all pods are ready we have updated connection strings + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, newPort, oldPort) + }) +} + +// assertConnectionStringSecretPorts checks that connection string secret has expectedPort and does not have notExpectedPort. +func assertConnectionStringSecretPorts(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, expectedPort int, notExpectedPort int) { + connectionStringSecret := corev1.Secret{} + scramUsers := mdb.GetAuthUsers() + require.Len(t, scramUsers, 1) + secretNamespacedName := types.NamespacedName{Name: scramUsers[0].ConnectionStringSecretName, Namespace: scramUsers[0].ConnectionStringSecretNamespace} + err := c.Get(ctx, secretNamespacedName, &connectionStringSecret) + require.NoError(t, err) + require.Contains(t, connectionStringSecret.Data, "connectionString.standard") + assert.Contains(t, string(connectionStringSecret.Data["connectionString.standard"]), fmt.Sprintf("%d", expectedPort)) + assert.NotContains(t, string(connectionStringSecret.Data["connectionString.standard"]), fmt.Sprintf("%d", notExpectedPort)) +} + +func assertServicePorts(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, expectedServicePorts map[int]string) { + svc := corev1.Service{} + + err := c.Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + require.NoError(t, err) + assert.Equal(t, corev1.ServiceTypeClusterIP, svc.Spec.Type) + assert.Equal(t, mdb.ServiceName(), svc.Spec.Selector["app"]) + assert.Len(t, svc.Spec.Ports, len(expectedServicePorts)) + + actualServicePorts := map[int]string{} + for _, servicePort := range svc.Spec.Ports { + actualServicePorts[int(servicePort.Port)] = servicePort.Name + } + + assert.Equal(t, expectedServicePorts, actualServicePorts) +} + +func assertAutomationConfigVersion(ctx context.Context, t *testing.T, c client.Client, mdb mdbv1.MongoDBCommunity, expectedVersion int) automationconfig.AutomationConfig { + ac, err := automationconfig.ReadFromSecret(ctx, c, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + require.NoError(t, err) + assert.Equal(t, expectedVersion, ac.Version) + return ac +} + +func assertStatefulsetReady(ctx context.Context, t *testing.T, mgr manager.Manager, name types.NamespacedName, expectedReplicas int) { + sts := appsv1.StatefulSet{} + err := mgr.GetClient().Get(ctx, name, &sts) + require.NoError(t, err) + assert.True(t, statefulset.IsReady(sts, expectedReplicas)) +} + +func TestService_configuresPrometheusCustomPorts(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + mdb.Spec.Prometheus = &mdbv1.Prometheus{ + Username: "username", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "secret", + }, + Port: 4321, + } + + mongodConfig := objx.New(map[string]interface{}{}) + mongodConfig.Set("net.port", 1000.) + mdb.Spec.AdditionalMongodConfig.Object = mongodConfig + + mgr := client.NewManager(ctx, &mdb) + err := secret.CreateOrUpdate(ctx, mgr.Client, secret.Builder(). + SetName("secret"). + SetNamespace(mdb.Namespace). + SetField("password", "my-password"). + Build()) + + assert.NoError(t, err) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + svc := corev1.Service{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + assert.NoError(t, err) + assert.Equal(t, svc.Spec.Type, corev1.ServiceTypeClusterIP) + assert.Equal(t, svc.Spec.Selector["app"], mdb.ServiceName()) + assert.Len(t, svc.Spec.Ports, 2) + assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 1000, Name: "mongodb"}) + assert.Equal(t, svc.Spec.Ports[1], corev1.ServicePort{Port: 4321, Name: "prometheus"}) + + assert.Equal(t, svc.Labels["app"], mdb.ServiceName()) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) +} + +func TestService_configuresPrometheus(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + mdb.Spec.Prometheus = &mdbv1.Prometheus{ + Username: "username", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "secret", + }, + } + + mgr := client.NewManager(ctx, &mdb) + err := secret.CreateOrUpdate(ctx, mgr.Client, secret.Builder(). + SetName("secret"). + SetNamespace(mdb.Namespace). + SetField("password", "my-password"). + Build()) + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + svc := corev1.Service{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + assert.NoError(t, err) + + assert.Len(t, svc.Spec.Ports, 2) + assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 27017, Name: "mongodb"}) + assert.Equal(t, svc.Spec.Ports[1], corev1.ServicePort{Port: 9216, Name: "prometheus"}) +} + +func TestCustomNetPort_Configuration(t *testing.T) { + ctx := context.Background() + svc, _ := performReconciliationAndGetService(ctx, t, "specify_net_port.yaml") + assert.Equal(t, corev1.ServiceTypeClusterIP, svc.Spec.Type) + assert.Len(t, svc.Spec.Ports, 1) + assert.Equal(t, corev1.ServicePort{Port: 40333, Name: "mongodb"}, svc.Spec.Ports[0]) +} + +func TestAutomationConfig_versionIsBumpedOnChange(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + assert.NoError(t, err) + assert.Equal(t, 1, currentAc.Version) + + mdb.Spec.Members++ + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) + + _ = mgr.GetClient().Update(ctx, &mdb) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err = automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + assert.NoError(t, err) + assert.Equal(t, 2, currentAc.Version) +} + +func TestAutomationConfig_versionIsNotBumpedWithNoChanges(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + assert.NoError(t, err) + assert.Equal(t, currentAc.Version, 1) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err = automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + assert.NoError(t, err) + assert.Equal(t, currentAc.Version, 1) +} + +func TestAutomationConfigFCVIsNotIncreasedWhenUpgradingMinorVersion(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + mdb.Spec.Version = "4.2.2" + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + assert.NoError(t, err) + assert.Len(t, currentAc.Processes, 3) + assert.Equal(t, currentAc.Processes[0].FeatureCompatibilityVersion, "4.2") + + // Upgrading minor version does not change the FCV on the automationConfig + mdbRef := &mdb + mdbRef.Spec.Version = "4.4.0" + _ = mgr.Client.Update(ctx, mdbRef) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err = automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + assert.NoError(t, err) + assert.Len(t, currentAc.Processes, 3) + assert.Equal(t, currentAc.Processes[0].FeatureCompatibilityVersion, "4.2") + +} + +func TestAutomationConfig_CustomMongodConfig(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + + mongodConfig := objx.New(map[string]interface{}{}) + mongodConfig.Set("net.port", float64(1000)) + mongodConfig.Set("storage.other", "value") + mongodConfig.Set("arbitrary.config.path", "value") + mdb.Spec.AdditionalMongodConfig.Object = mongodConfig + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + assert.NoError(t, err) + + for _, p := range currentAc.Processes { + // Ensure port was overridden + assert.Equal(t, float64(1000), p.Args26.Get("net.port").Data()) + + // Ensure custom values were added + assert.Equal(t, "value", p.Args26.Get("arbitrary.config.path").Data()) + assert.Equal(t, "value", p.Args26.Get("storage.other").Data()) + + // Ensure default settings went unchanged + assert.Equal(t, automationconfig.DefaultMongoDBDataDir, p.Args26.Get("storage.dbPath").Data()) + assert.Equal(t, mdb.Name, p.Args26.Get("replication.replSetName").Data()) + } +} + +func TestExistingPasswordAndKeyfile_AreUsedWhenTheSecretExists(t *testing.T) { + ctx := context.Background() + mdb := newScramReplicaSet() + mgr := client.NewManager(ctx, &mdb) + + c := mgr.Client + + keyFileNsName := mdb.GetAgentKeyfileSecretNamespacedName() + err := secret.CreateOrUpdate(ctx, c, secret.Builder(). + SetName(keyFileNsName.Name). + SetNamespace(keyFileNsName.Namespace). + SetField(constants.AgentKeyfileKey, "my-keyfile"). + Build()) + assert.NoError(t, err) + + passwordNsName := mdb.GetAgentPasswordSecretNamespacedName() + err = secret.CreateOrUpdate(ctx, c, secret.Builder(). + SetName(passwordNsName.Name). + SetNamespace(passwordNsName.Namespace). + SetField(constants.AgentPasswordKey, "my-pass"). + Build()) + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + assert.NoError(t, err) + assert.NotEmpty(t, currentAc.Auth.KeyFileWindows) + assert.False(t, currentAc.Auth.Disabled) + + assert.Equal(t, "my-keyfile", currentAc.Auth.Key) + assert.NotEmpty(t, currentAc.Auth.KeyFileWindows) + assert.Equal(t, "my-pass", currentAc.Auth.AutoPwd) + +} + +func TestScramIsConfigured(t *testing.T) { + ctx := context.Background() + assertReplicaSetIsConfiguredWithScram(ctx, t, newScramReplicaSet()) +} + +func TestScramIsConfiguredWhenNotSpecified(t *testing.T) { + ctx := context.Background() + assertReplicaSetIsConfiguredWithScram(ctx, t, newTestReplicaSet()) +} + +func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + mdb.Spec.Members = 5 + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) + + assert.NoError(t, err) + assert.Equal(t, 5, mdb.Status.CurrentMongoDBMembers) + + // scale members from five to three + mdb.Spec.Members = 3 + + err = mgr.GetClient().Update(ctx, &mdb) + assert.NoError(t, err) + + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) + + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) + assert.NoError(t, err) + + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) + assert.NoError(t, err) + + assert.Equal(t, true, res.Requeue) + assert.Equal(t, 4, mdb.Status.CurrentMongoDBMembers) + + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) + + assert.NoError(t, err) + + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) + assert.NoError(t, err) + assert.Equal(t, false, res.Requeue) + assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) +} + +func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) + assert.NoError(t, err) + assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) + + // scale members from three to five + mdb.Spec.Members = 5 + + err = mgr.GetClient().Update(ctx, &mdb) + assert.NoError(t, err) + + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) + + assert.NoError(t, err) + + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) + + assert.NoError(t, err) + assert.Equal(t, true, res.Requeue) + assert.Equal(t, 4, mdb.Status.CurrentMongoDBMembers) + + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) + + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) + + assert.NoError(t, err) + + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) + assert.NoError(t, err) + + assert.Equal(t, false, res.Requeue) + assert.Equal(t, 5, mdb.Status.CurrentMongoDBMembers) +} + +func TestIgnoreUnknownUsers(t *testing.T) { + ctx := context.Background() + t.Run("Ignore Unkown Users set to true", func(t *testing.T) { + mdb := newTestReplicaSet() + ignoreUnknownUsers := true + mdb.Spec.Security.Authentication.IgnoreUnknownUsers = &ignoreUnknownUsers + + assertAuthoritativeSet(ctx, t, mdb, false) + }) + + t.Run("IgnoreUnknownUsers is not set", func(t *testing.T) { + mdb := newTestReplicaSet() + mdb.Spec.Security.Authentication.IgnoreUnknownUsers = nil + assertAuthoritativeSet(ctx, t, mdb, false) + }) + + t.Run("IgnoreUnknownUsers set to false", func(t *testing.T) { + mdb := newTestReplicaSet() + ignoreUnknownUsers := false + mdb.Spec.Security.Authentication.IgnoreUnknownUsers = &ignoreUnknownUsers + assertAuthoritativeSet(ctx, t, mdb, true) + }) +} + +func TestAnnotationsAreAppliedToResource(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) + assert.NoError(t, err) + + assert.NotNil(t, mdb.Annotations) + assert.NotEmpty(t, mdb.Annotations[lastSuccessfulConfiguration], "last successful spec should have been saved as annotation but was not") + assert.Equal(t, mdb.Annotations[lastAppliedMongoDBVersion], mdb.Spec.Version, "last version should have been saved as an annotation but was not") +} + +// assertAuthoritativeSet asserts that a reconciliation of the given MongoDBCommunity resource +// results in the AuthoritativeSet of the created AutomationConfig to have the expectedValue provided. +func assertAuthoritativeSet(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity, expectedValue bool) { + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + s, err := mgr.Client.GetSecret(ctx, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + assert.NoError(t, err) + + bytes := s.Data[automationconfig.ConfigKey] + ac, err := automationconfig.FromBytes(bytes) + assert.NoError(t, err) + + assert.Equal(t, expectedValue, ac.Auth.AuthoritativeSet) +} + +func assertReplicaSetIsConfiguredWithScram(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity) { + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + t.Run("Automation Config is configured with SCRAM", func(t *testing.T) { + assert.NotEmpty(t, currentAc.Auth.Key) + assert.NoError(t, err) + assert.NotEmpty(t, currentAc.Auth.KeyFileWindows) + assert.NotEmpty(t, currentAc.Auth.AutoPwd) + assert.False(t, currentAc.Auth.Disabled) + }) + t.Run("Secret with password was created", func(t *testing.T) { + secretNsName := mdb.GetAgentPasswordSecretNamespacedName() + s, err := mgr.Client.GetSecret(ctx, secretNsName) + assert.NoError(t, err) + assert.Equal(t, s.Data[constants.AgentPasswordKey], []byte(currentAc.Auth.AutoPwd)) + }) + + t.Run("Secret with keyfile was created", func(t *testing.T) { + secretNsName := mdb.GetAgentKeyfileSecretNamespacedName() + s, err := mgr.Client.GetSecret(ctx, secretNsName) + assert.NoError(t, err) + assert.Equal(t, s.Data[constants.AgentKeyfileKey], []byte(currentAc.Auth.Key)) + }) +} + +func assertReplicaSetIsConfiguredWithScramTLS(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity) { + mgr := client.NewManager(ctx, &mdb) + newClient := client.NewClient(mgr.GetClient()) + err := createTLSSecret(ctx, newClient, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, newClient, mdb) + assert.NoError(t, err) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + t.Run("Automation Config is configured with SCRAM", func(t *testing.T) { + assert.Empty(t, currentAc.TLSConfig.AutoPEMKeyFilePath) + assert.NotEmpty(t, currentAc.Auth.Key) + assert.NoError(t, err) + assert.NotEmpty(t, currentAc.Auth.KeyFileWindows) + assert.NotEmpty(t, currentAc.Auth.AutoPwd) + assert.False(t, currentAc.Auth.Disabled) + }) + t.Run("Secret with password was created", func(t *testing.T) { + secretNsName := mdb.GetAgentPasswordSecretNamespacedName() + s, err := mgr.Client.GetSecret(ctx, secretNsName) + assert.NoError(t, err) + assert.Equal(t, s.Data[constants.AgentPasswordKey], []byte(currentAc.Auth.AutoPwd)) + }) + + t.Run("Secret with keyfile was created", func(t *testing.T) { + secretNsName := mdb.GetAgentKeyfileSecretNamespacedName() + s, err := mgr.Client.GetSecret(ctx, secretNsName) + assert.NoError(t, err) + assert.Equal(t, s.Data[constants.AgentKeyfileKey], []byte(currentAc.Auth.Key)) + }) +} + +func assertReplicaSetIsConfiguredWithX509(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity) { + mgr := client.NewManager(ctx, &mdb) + newClient := client.NewClient(mgr.GetClient()) + err := createTLSSecret(ctx, newClient, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, newClient, mdb) + assert.NoError(t, err) + crt, key, err := x509.CreateAgentCertificate() + assert.NoError(t, err) + err = createAgentCertSecret(ctx, newClient, mdb, crt, key, "") + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + + t.Run("Automation Config is configured with X509", func(t *testing.T) { + assert.NotEmpty(t, currentAc.TLSConfig.AutoPEMKeyFilePath) + assert.Equal(t, automationAgentPemMountPath+"/"+mdb.AgentCertificatePemSecretNamespacedName().Name, currentAc.TLSConfig.AutoPEMKeyFilePath) + assert.NotEmpty(t, currentAc.Auth.Key) + assert.NoError(t, err) + assert.NotEmpty(t, currentAc.Auth.KeyFileWindows) + assert.Empty(t, currentAc.Auth.AutoPwd) + assert.False(t, currentAc.Auth.Disabled) + assert.Equal(t, "CN=mms-automation-agent,OU=ENG,O=MongoDB,C=US", currentAc.Auth.AutoUser) + }) + t.Run("Secret with password was not created", func(t *testing.T) { + secretNsName := mdb.GetAgentPasswordSecretNamespacedName() + _, err := mgr.Client.GetSecret(ctx, secretNsName) + assert.Error(t, err) + }) + t.Run("Secret with keyfile was created", func(t *testing.T) { + secretNsName := mdb.GetAgentKeyfileSecretNamespacedName() + s, err := mgr.Client.GetSecret(ctx, secretNsName) + assert.NoError(t, err) + assert.Equal(t, s.Data[constants.AgentKeyfileKey], []byte(currentAc.Auth.Key)) + }) +} + +func TestX509andSCRAMIsConfiguredWithX509Agent(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509", "SCRAM"} + mdb.Spec.Security.Authentication.AgentMode = "X509" + + assertReplicaSetIsConfiguredWithX509(ctx, t, mdb) +} + +func TestX509andSCRAMIsConfiguredWithSCRAMAgent(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509", "SCRAM"} + mdb.Spec.Security.Authentication.AgentMode = "SCRAM" + + assertReplicaSetIsConfiguredWithScramTLS(ctx, t, mdb) +} + +func TestX509IsConfigured(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509"} + + assertReplicaSetIsConfiguredWithX509(ctx, t, mdb) +} + +func TestReplicaSet_IsScaledUpToDesiredMembers_WhenFirstCreated(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) + assert.NoError(t, err) + + assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) +} + +func TestVolumeClaimTemplates_Configuration(t *testing.T) { + ctx := context.Background() + sts, _ := performReconciliationAndGetStatefulSet(ctx, t, "volume_claim_templates_mdb.yaml") + + assert.Len(t, sts.Spec.VolumeClaimTemplates, 3) + + pvcSpec := sts.Spec.VolumeClaimTemplates[2].Spec + + storage := pvcSpec.Resources.Requests[corev1.ResourceStorage] + storageRef := &storage + + assert.Equal(t, "1Gi", storageRef.String()) + assert.Len(t, pvcSpec.AccessModes, 1) + assert.Contains(t, pvcSpec.AccessModes, corev1.ReadWriteOnce) +} + +func TestChangeDataVolume_Configuration(t *testing.T) { + ctx := context.Background() + sts, _ := performReconciliationAndGetStatefulSet(ctx, t, "change_data_volume.yaml") + assert.Len(t, sts.Spec.VolumeClaimTemplates, 2) + + dataVolume := sts.Spec.VolumeClaimTemplates[0] + + storage := dataVolume.Spec.Resources.Requests[corev1.ResourceStorage] + storageRef := &storage + + assert.Equal(t, "data-volume", dataVolume.Name) + assert.Equal(t, "50Gi", storageRef.String()) +} + +func TestCustomStorageClass_Configuration(t *testing.T) { + ctx := context.Background() + sts, _ := performReconciliationAndGetStatefulSet(ctx, t, "custom_storage_class.yaml") + + dataVolume := sts.Spec.VolumeClaimTemplates[0] + + storage := dataVolume.Spec.Resources.Requests[corev1.ResourceStorage] + storageRef := &storage + + expectedStorageClass := "my-storage-class" + expectedStorageClassRef := &expectedStorageClass + + assert.Equal(t, "data-volume", dataVolume.Name) + assert.Equal(t, "1Gi", storageRef.String()) + assert.Equal(t, expectedStorageClassRef, dataVolume.Spec.StorageClassName) +} + +func TestCustomTaintsAndTolerations_Configuration(t *testing.T) { + ctx := context.Background() + sts, _ := performReconciliationAndGetStatefulSet(ctx, t, "tolerations_example.yaml") + + assert.Len(t, sts.Spec.Template.Spec.Tolerations, 2) + assert.Equal(t, "example-key", sts.Spec.Template.Spec.Tolerations[0].Key) + assert.Equal(t, corev1.TolerationOpExists, sts.Spec.Template.Spec.Tolerations[0].Operator) + assert.Equal(t, corev1.TaintEffectNoSchedule, sts.Spec.Template.Spec.Tolerations[0].Effect) + + assert.Equal(t, "example-key-2", sts.Spec.Template.Spec.Tolerations[1].Key) + assert.Equal(t, corev1.TolerationOpEqual, sts.Spec.Template.Spec.Tolerations[1].Operator) + assert.Equal(t, corev1.TaintEffectNoExecute, sts.Spec.Template.Spec.Tolerations[1].Effect) +} + +func TestCustomDataDir_Configuration(t *testing.T) { + ctx := context.Background() + sts, c := performReconciliationAndGetStatefulSet(ctx, t, "specify_data_dir.yaml") + + agentContainer := container.GetByName("mongodb-agent", sts.Spec.Template.Spec.Containers) + assert.NotNil(t, agentContainer) + assertVolumeMountPath(t, agentContainer.VolumeMounts, "data-volume", "/some/path/db") + + mongoContainer := container.GetByName("mongod", sts.Spec.Template.Spec.Containers) + assert.NotNil(t, mongoContainer) + + lastCommand := mongoContainer.Command[len(agentContainer.Command)-1] + assert.Contains(t, lastCommand, "/some/path/db", "startup command should be using the newly specified path") + + ac, err := automationconfig.ReadFromSecret(ctx, c, types.NamespacedName{Name: "example-mongodb-config", Namespace: "test-ns"}) + assert.NoError(t, err) + + for _, p := range ac.Processes { + actualStoragePath := p.Args26.Get("storage.dbPath").String() + assert.Equal(t, "/some/path/db", actualStoragePath, "process dbPath should have been set") + } +} + +func TestInconsistentReplicas(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + stsReplicas := new(int32) + *stsReplicas = 3 + mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.Replicas = stsReplicas + mdb.Spec.Members = 4 + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + _, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assert.NoError(t, err) +} + +func assertVolumeMountPath(t *testing.T, mounts []corev1.VolumeMount, name, path string) { + for _, v := range mounts { + if v.Name == name { + assert.Equal(t, path, v.MountPath) + return + } + } + t.Fatalf("volume with name %s was not present!", name) +} + +func performReconciliationAndGetStatefulSet(ctx context.Context, t *testing.T, filePath string) (appsv1.StatefulSet, client.Client) { + mdb, err := loadTestFixture(filePath) + assert.NoError(t, err) + mgr := client.NewManager(ctx, &mdb) + assert.NoError(t, generatePasswordsForAllUsers(ctx, mdb, mgr.Client)) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) + assertReconciliationSuccessful(t, res, err) + + sts, err := mgr.Client.GetStatefulSet(ctx, mdb.NamespacedName()) + assert.NoError(t, err) + return sts, mgr.Client +} + +func performReconciliationAndGetService(ctx context.Context, t *testing.T, filePath string) (corev1.Service, client.Client) { + mdb, err := loadTestFixture(filePath) + assert.NoError(t, err) + mgr := client.NewManager(ctx, &mdb) + assert.NoError(t, generatePasswordsForAllUsers(ctx, mdb, mgr.Client)) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) + assertReconciliationSuccessful(t, res, err) + svc, err := mgr.Client.GetService(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}) + assert.NoError(t, err) + return svc, mgr.Client +} + +func generatePasswordsForAllUsers(ctx context.Context, mdb mdbv1.MongoDBCommunity, c client.Client) error { + for _, user := range mdb.Spec.Users { + + key := "password" + if user.PasswordSecretRef.Key != "" { + key = user.PasswordSecretRef.Key + } + + passwordSecret := secret.Builder(). + SetName(user.PasswordSecretRef.Name). + SetNamespace(mdb.Namespace). + SetField(key, "GAGTQK2ccRRaxJFudI5y"). + Build() + + if err := c.CreateSecret(ctx, passwordSecret); err != nil { + return err + } + } + + return nil +} + +func assertReconciliationSuccessful(t *testing.T, result reconcile.Result, err error) { + assert.NoError(t, err) + assert.Equal(t, false, result.Requeue) + assert.Equal(t, time.Duration(0), result.RequeueAfter) +} + +// makeStatefulSetReady updates the StatefulSet corresponding to the +// provided MongoDB resource to mark it as ready for the case of `statefulset.IsReady` +func makeStatefulSetReady(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity) { + setStatefulSetReadyReplicas(ctx, t, c, mdb, mdb.StatefulSetReplicasThisReconciliation()) +} + +func setStatefulSetReadyReplicas(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, readyReplicas int) { + sts := appsv1.StatefulSet{} + err := c.Get(ctx, mdb.NamespacedName(), &sts) + assert.NoError(t, err) + sts.Status.ReadyReplicas = int32(readyReplicas) + sts.Status.UpdatedReplicas = int32(mdb.StatefulSetReplicasThisReconciliation()) + err = c.Update(ctx, &sts) + assert.NoError(t, err) +} + +func setArbiterStatefulSetReadyReplicas(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, readyReplicas int) { + sts := appsv1.StatefulSet{} + err := c.Get(ctx, mdb.ArbiterNamespacedName(), &sts) + assert.NoError(t, err) + sts.Status.ReadyReplicas = int32(readyReplicas) + sts.Status.UpdatedReplicas = int32(mdb.StatefulSetArbitersThisReconciliation()) + err = c.Update(ctx, &sts) + assert.NoError(t, err) +} + +// loadTestFixture will create a MongoDB resource from a given fixture +func loadTestFixture(yamlFileName string) (mdbv1.MongoDBCommunity, error) { + testPath := fmt.Sprintf("testdata/%s", yamlFileName) + mdb := mdbv1.MongoDBCommunity{} + data, err := os.ReadFile(testPath) + if err != nil { + return mdb, fmt.Errorf("error reading file: %s", err) + } + + if err := marshalRuntimeObjectFromYAMLBytes(data, &mdb); err != nil { + return mdb, fmt.Errorf("error converting yaml bytes to service account: %s", err) + } + + return mdb, nil +} + +// marshalRuntimeObjectFromYAMLBytes accepts the bytes of a yaml resource +// and unmarshals them into the provided runtime Object +func marshalRuntimeObjectFromYAMLBytes(bytes []byte, obj runtime.Object) error { + jsonBytes, err := yaml.YAMLToJSON(bytes) + if err != nil { + return err + } + return json.Unmarshal(jsonBytes, &obj) +} + +func TestGetMongoDBImage(t *testing.T) { + type testConfig struct { + mongodbRepoUrl string + mongodbImage string + mongodbImageType string + version string + expectedImage string + } + tests := map[string]testConfig{ + "Default UBI8 Community image": { + mongodbRepoUrl: "docker.io/mongodb", + mongodbImage: "mongodb-community-server", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "docker.io/mongodb/mongodb-community-server:6.0.5-ubi8", + }, + "Overridden UBI8 Enterprise image": { + mongodbRepoUrl: "docker.io/mongodb", + mongodbImage: "mongodb-enterprise-server", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "docker.io/mongodb/mongodb-enterprise-server:6.0.5-ubi8", + }, + "Overridden UBI8 Enterprise image from Quay": { + mongodbRepoUrl: "quay.io/mongodb", + mongodbImage: "mongodb-enterprise-server", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "quay.io/mongodb/mongodb-enterprise-server:6.0.5-ubi8", + }, + "Overridden Ubuntu Community image": { + mongodbRepoUrl: "docker.io/mongodb", + mongodbImage: "mongodb-community-server", + mongodbImageType: "ubuntu2204", + version: "6.0.5", + expectedImage: "docker.io/mongodb/mongodb-community-server:6.0.5-ubuntu2204", + }, + "Overridden UBI Community image": { + mongodbRepoUrl: "docker.io/mongodb", + mongodbImage: "mongodb-community-server", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "docker.io/mongodb/mongodb-community-server:6.0.5-ubi8", + }, + "Docker Inc images": { + mongodbRepoUrl: "docker.io", + mongodbImage: "mongo", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "docker.io/mongo:6.0.5", + }, + "Deprecated AppDB images defined the old way": { + mongodbRepoUrl: "quay.io", + mongodbImage: "mongodb/mongodb-enterprise-appdb-database-ubi", + // In this example, we intentionally don't use the suffix from the env. variable and let users + // define it in the version instead. There are some known customers who do this. + // This is a backwards compatibility case. + mongodbImageType: "will-be-ignored", + version: "5.0.14-ent", + expectedImage: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:5.0.14-ent", + }, + } + for testName := range tests { + t.Run(testName, func(t *testing.T) { + testConfig := tests[testName] + image := getMongoDBImage(testConfig.mongodbRepoUrl, testConfig.mongodbImage, testConfig.mongodbImageType, testConfig.version) + assert.Equal(t, testConfig.expectedImage, image) + }) + } +} diff --git a/controllers/testdata/change_data_volume.yaml b/controllers/testdata/change_data_volume.yaml new file mode 100644 index 000000000..0ab77019c --- /dev/null +++ b/controllers/testdata/change_data_volume.yaml @@ -0,0 +1,31 @@ +apiVersion: mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: change-data-volume-mdb +spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + statefulSet: + spec: + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: [ "ReadWriteOnce", "ReadWriteMany" ] + resources: + requests: + storage: 50Gi diff --git a/controllers/testdata/custom_storage_class.yaml b/controllers/testdata/custom_storage_class.yaml new file mode 100644 index 000000000..9740ce4ec --- /dev/null +++ b/controllers/testdata/custom_storage_class.yaml @@ -0,0 +1,22 @@ +apiVersion: mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: custom-storage-class-mdb +spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: ["SCRAM"] + statefulSet: + spec: + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "my-storage-class" + resources: + requests: + storage: 1Gi diff --git a/controllers/testdata/openshift_mdb.yaml b/controllers/testdata/openshift_mdb.yaml new file mode 100644 index 000000000..11a2e21b0 --- /dev/null +++ b/controllers/testdata/openshift_mdb.yaml @@ -0,0 +1,21 @@ +apiVersion: mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-openshift-mongodb +spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin diff --git a/controllers/testdata/specify_data_dir.yaml b/controllers/testdata/specify_data_dir.yaml new file mode 100644 index 000000000..d2b80012c --- /dev/null +++ b/controllers/testdata/specify_data_dir.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb + namespace: test-ns +spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + storage.dbPath: /some/path/db diff --git a/controllers/testdata/specify_net_port.yaml b/controllers/testdata/specify_net_port.yaml new file mode 100644 index 000000000..f57d367dd --- /dev/null +++ b/controllers/testdata/specify_net_port.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb + namespace: test-ns +spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + net.port: 40333 diff --git a/controllers/testdata/tolerations_example.yaml b/controllers/testdata/tolerations_example.yaml new file mode 100644 index 000000000..b8c303ae7 --- /dev/null +++ b/controllers/testdata/tolerations_example.yaml @@ -0,0 +1,24 @@ +apiVersion: mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: tolerations-mdb +spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: ["SCRAM"] + statefulSet: + spec: + template: + spec: + containers: + - name: mongodb-agent + tolerations: + - key: "example-key" + operator: "Exists" + effect: "NoSchedule" + - key: "example-key-2" + operator: "Equal" + effect: "NoExecute" diff --git a/controllers/testdata/volume_claim_templates_mdb.yaml b/controllers/testdata/volume_claim_templates_mdb.yaml new file mode 100644 index 000000000..1e20915b9 --- /dev/null +++ b/controllers/testdata/volume_claim_templates_mdb.yaml @@ -0,0 +1,21 @@ +apiVersion: mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: volume-claim-templates-mdb +spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: ["SCRAM"] + statefulSet: + spec: + volumeClaimTemplates: + - metadata: + name: volume-claim-templates + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi diff --git a/controllers/validation/validation.go b/controllers/validation/validation.go new file mode 100644 index 000000000..3d84cc1c0 --- /dev/null +++ b/controllers/validation/validation.go @@ -0,0 +1,207 @@ +package validation + +import ( + "errors" + "fmt" + "strings" + + "go.uber.org/zap" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" +) + +// ValidateInitialSpec checks if the resource's initial Spec is valid. +func ValidateInitialSpec(mdb mdbv1.MongoDBCommunity, log *zap.SugaredLogger) error { + return validateSpec(mdb, log) +} + +// ValidateUpdate validates that the new Spec, corresponding to the existing one, is still valid. +func ValidateUpdate(mdb mdbv1.MongoDBCommunity, oldSpec mdbv1.MongoDBCommunitySpec, log *zap.SugaredLogger) error { + if oldSpec.Security.TLS.Enabled && !mdb.Spec.Security.TLS.Enabled { + return errors.New("TLS can't be set to disabled after it has been enabled") + } + return validateSpec(mdb, log) +} + +// validateSpec validates the specs of the given resource definition. +func validateSpec(mdb mdbv1.MongoDBCommunity, log *zap.SugaredLogger) error { + if err := validateUsers(mdb); err != nil { + return err + } + + if err := validateArbiterSpec(mdb); err != nil { + return err + } + + if err := validateAuthModeSpec(mdb, log); err != nil { + return err + } + + if err := validateAgentCertSecret(mdb, log); err != nil { + return err + } + + if err := validateStatefulSet(mdb); err != nil { + return err + } + + return nil +} + +// validateUsers checks if the users configuration is valid +func validateUsers(mdb mdbv1.MongoDBCommunity) error { + connectionStringSecretNameMap := map[string]authtypes.User{} + nameCollisions := []string{} + + scramSecretNameMap := map[string]authtypes.User{} + scramSecretNameCollisions := []string{} + expectedAuthMethods := map[string]struct{}{} + + if len(mdb.Spec.Security.Authentication.Modes) == 0 { + expectedAuthMethods[constants.Sha256] = struct{}{} + } + + for _, auth := range mdb.Spec.Security.Authentication.Modes { + expectedAuthMethods[mdbv1.ConvertAuthModeToAuthMechanism(auth)] = struct{}{} + } + + for _, user := range mdb.GetAuthUsers() { + + // Ensure no collisions in the connection string secret names + connectionStringSecretName := user.ConnectionStringSecretName + if previousUser, exists := connectionStringSecretNameMap[connectionStringSecretName]; exists { + nameCollisions = append(nameCollisions, + fmt.Sprintf(`[connection string secret name: "%s" for user: "%s", db: "%s" and user: "%s", db: "%s"]`, + connectionStringSecretName, + previousUser.Username, + previousUser.Database, + user.Username, + user.Database)) + } else { + connectionStringSecretNameMap[connectionStringSecretName] = user + } + + // Ensure no collisions in the secret holding scram credentials + scramSecretName := user.ScramCredentialsSecretName + if previousUser, exists := scramSecretNameMap[scramSecretName]; exists { + scramSecretNameCollisions = append(scramSecretNameCollisions, + fmt.Sprintf(`[scram secret name: "%s" for user: "%s" and user: "%s"]`, + scramSecretName, + previousUser.Username, + user.Username)) + } else { + scramSecretNameMap[scramSecretName] = user + } + + if user.Database == constants.ExternalDB { + if _, ok := expectedAuthMethods[constants.X509]; !ok { + return fmt.Errorf("X.509 user %s present but X.509 is not enabled", user.Username) + } + if user.PasswordSecretKey != "" { + return fmt.Errorf("X509 user %s should not have a password secret key", user.Username) + } + if user.PasswordSecretName != "" { + return fmt.Errorf("X509 user %s should not have a password secret name", user.Username) + } + if user.ScramCredentialsSecretName != "" { + return fmt.Errorf("X509 user %s should not have scram credentials secret name", user.Username) + } + } else { + _, sha1 := expectedAuthMethods[constants.Sha1] + _, sha256 := expectedAuthMethods[constants.Sha256] + if !sha1 && !sha256 { + return fmt.Errorf("SCRAM user %s present but SCRAM is not enabled", user.Username) + } + if user.PasswordSecretKey == "" { + return fmt.Errorf("SCRAM user %s is missing password secret key", user.Username) + } + if user.PasswordSecretName == "" { + return fmt.Errorf("SCRAM user %s is missing password secret name", user.Username) + } + if user.ScramCredentialsSecretName == "" { + return fmt.Errorf("SCRAM user %s is missing scram credentials secret name", user.Username) + } + } + } + if len(nameCollisions) > 0 { + return fmt.Errorf("connection string secret names collision, update at least one of the users so that the resulted secret names (--) are unique: %s", + strings.Join(nameCollisions, ", ")) + } + + if len(scramSecretNameCollisions) > 0 { + return fmt.Errorf("scram credential secret names collision, update at least one of the users: %s", + strings.Join(scramSecretNameCollisions, ", ")) + } + + return nil +} + +// validateArbiterSpec checks if the initial Member spec is valid. +func validateArbiterSpec(mdb mdbv1.MongoDBCommunity) error { + if mdb.Spec.Arbiters < 0 { + return fmt.Errorf("number of arbiters must be greater or equal than 0") + } + if mdb.Spec.Arbiters >= mdb.Spec.Members { + return fmt.Errorf("number of arbiters specified (%v) is greater or equal than the number of members in the replicaset (%v). At least one member must not be an arbiter", mdb.Spec.Arbiters, mdb.Spec.Members) + } + + return nil +} + +// validateAuthModeSpec checks that the list of modes does not contain duplicates. +func validateAuthModeSpec(mdb mdbv1.MongoDBCommunity, log *zap.SugaredLogger) error { + allModes := mdb.Spec.Security.Authentication.Modes + mapMechanisms := make(map[string]struct{}) + + // Issue warning if Modes array is empty + if len(allModes) == 0 { + mapMechanisms[constants.Sha256] = struct{}{} + log.Warnf("An empty Modes array has been provided. The default mode (SCRAM-SHA-256) will be used.") + } + + // Check that no auth is defined more than once + for _, mode := range allModes { + if value := mdbv1.ConvertAuthModeToAuthMechanism(mode); value == "" { + return fmt.Errorf("unexpected value (%q) defined for supported authentication modes", value) + } else if value == constants.X509 && !mdb.Spec.Security.TLS.Enabled { + return fmt.Errorf("TLS must be enabled when using X.509 authentication") + } + mapMechanisms[mdbv1.ConvertAuthModeToAuthMechanism(mode)] = struct{}{} + } + + if len(mapMechanisms) < len(allModes) { + return fmt.Errorf("some authentication modes are declared twice or more") + } + + agentMode := mdb.Spec.GetAgentAuthMode() + if agentMode == "" && len(allModes) > 1 { + return fmt.Errorf("If spec.security.authentication.modes contains different authentication modes, the agent mode must be specified ") + } + if _, present := mapMechanisms[mdbv1.ConvertAuthModeToAuthMechanism(agentMode)]; !present { + return fmt.Errorf("Agent authentication mode: %s must be part of the spec.security.authentication.modes", agentMode) + } + + return nil +} + +func validateAgentCertSecret(mdb mdbv1.MongoDBCommunity, log *zap.SugaredLogger) error { + agentMode := mdb.Spec.GetAgentAuthMode() + if agentMode != "X509" && + mdb.Spec.Security.Authentication.AgentCertificateSecret != nil && + mdb.Spec.Security.Authentication.AgentCertificateSecret.Name != "" { + log.Warnf("Agent authentication is not X.509, but the agent certificate secret is configured, it will be ignored") + } + return nil +} + +func validateStatefulSet(mdb mdbv1.MongoDBCommunity) error { + stsReplicas := mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.Replicas + + if stsReplicas != nil && *stsReplicas != int32(mdb.Spec.Members) { + return fmt.Errorf("spec.statefulset.spec.replicas has to be equal to spec.members") + } + + return nil +} diff --git a/controllers/watch/watch.go b/controllers/watch/watch.go new file mode 100644 index 000000000..9522c53c3 --- /dev/null +++ b/controllers/watch/watch.go @@ -0,0 +1,76 @@ +package watch + +import ( + "context" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ResourceWatcher implements handler.EventHandler and is used to trigger reconciliation when +// a watched object changes. It's designed to only be used for a single type of object. +// If multiple types should be watched, one ResourceWatcher for each type should be used. +type ResourceWatcher struct { + watched map[types.NamespacedName][]types.NamespacedName +} + +var _ handler.EventHandler = &ResourceWatcher{} + +// New will create a new ResourceWatcher with no watched objects. +func New() ResourceWatcher { + return ResourceWatcher{ + watched: make(map[types.NamespacedName][]types.NamespacedName), + } +} + +// Watch will add a new object to watch. +func (w ResourceWatcher) Watch(ctx context.Context, watchedName, dependentName types.NamespacedName) { + existing, hasExisting := w.watched[watchedName] + if !hasExisting { + existing = []types.NamespacedName{} + } + + // Check if resource is already being watched. + if contains.NamespacedName(existing, dependentName) { + return + } + + w.watched[watchedName] = append(existing, dependentName) +} + +func (w ResourceWatcher) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { + w.handleEvent(event.Object, queue) +} + +func (w ResourceWatcher) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { + w.handleEvent(event.ObjectOld, queue) +} + +func (w ResourceWatcher) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { + w.handleEvent(event.Object, queue) +} + +func (w ResourceWatcher) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { + w.handleEvent(event.Object, queue) +} + +// handleEvent is called when an event is received for an object. +// It will check if the object is being watched and trigger a reconciliation for +// the dependent object. +func (w ResourceWatcher) handleEvent(meta metav1.Object, queue workqueue.RateLimitingInterface) { + changedObjectName := types.NamespacedName{ + Name: meta.GetName(), + Namespace: meta.GetNamespace(), + } + + // Enqueue reconciliation for each dependent object. + for _, reconciledObjectName := range w.watched[changedObjectName] { + queue.Add(reconcile.Request{ + NamespacedName: reconciledObjectName, + }) + } +} diff --git a/controllers/watch/watch_test.go b/controllers/watch/watch_test.go new file mode 100644 index 000000000..ab8c522be --- /dev/null +++ b/controllers/watch/watch_test.go @@ -0,0 +1,159 @@ +package watch + +import ( + "context" + "testing" + + "k8s.io/apimachinery/pkg/types" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + + "github.com/stretchr/testify/assert" + + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/workqueue" + + "sigs.k8s.io/controller-runtime/pkg/event" +) + +func TestWatcher(t *testing.T) { + ctx := context.Background() + obj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "namespace", + }, + } + objNsName := types.NamespacedName{Name: obj.Name, Namespace: obj.Namespace} + + mdb1 := mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mdb1", + Namespace: "namespace", + }, + } + + mdb2 := mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mdb2", + Namespace: "namespace", + }, + } + + t.Run("Non-watched object", func(t *testing.T) { + watcher := New() + queue := controllertest.Queue{Interface: workqueue.New()} + + watcher.Create(ctx, event.CreateEvent{ + Object: obj, + }, &queue) + + // Ensure no reconciliation is queued if object is not watched. + assert.Equal(t, 0, queue.Len()) + }) + + t.Run("Multiple objects to reconcile", func(t *testing.T) { + watcher := New() + queue := controllertest.Queue{Interface: workqueue.New()} + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb2.NamespacedName()) + + watcher.Create(ctx, event.CreateEvent{ + Object: obj, + }, &queue) + + // Ensure multiple reconciliations are enqueued. + assert.Equal(t, 2, queue.Len()) + }) + + t.Run("Create event", func(t *testing.T) { + watcher := New() + queue := controllertest.Queue{Interface: workqueue.New()} + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) + + watcher.Create(ctx, event.CreateEvent{ + Object: obj, + }, &queue) + + assert.Equal(t, 1, queue.Len()) + }) + + t.Run("Update event", func(t *testing.T) { + watcher := New() + queue := controllertest.Queue{Interface: workqueue.New()} + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) + + watcher.Update(ctx, event.UpdateEvent{ + ObjectOld: obj, + ObjectNew: obj, + }, &queue) + + assert.Equal(t, 1, queue.Len()) + }) + + t.Run("Delete event", func(t *testing.T) { + watcher := New() + queue := controllertest.Queue{Interface: workqueue.New()} + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) + + watcher.Delete(ctx, event.DeleteEvent{ + Object: obj, + }, &queue) + + assert.Equal(t, 1, queue.Len()) + }) + + t.Run("Generic event", func(t *testing.T) { + watcher := New() + queue := controllertest.Queue{Interface: workqueue.New()} + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) + + watcher.Generic(ctx, event.GenericEvent{ + Object: obj, + }, &queue) + + assert.Equal(t, 1, queue.Len()) + }) +} + +func TestWatcherAdd(t *testing.T) { + ctx := context.Background() + watcher := New() + assert.Empty(t, watcher.watched) + + watchedName := types.NamespacedName{Name: "object", Namespace: "namespace"} + + mdb1 := mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mdb1", + Namespace: "namespace", + }, + } + mdb2 := mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mdb2", + Namespace: "namespace", + }, + } + + // Ensure single object can be added to empty watchlist. + watcher.Watch(ctx, watchedName, mdb1.NamespacedName()) + assert.Len(t, watcher.watched, 1) + assert.Equal(t, []types.NamespacedName{mdb1.NamespacedName()}, watcher.watched[watchedName]) + + // Ensure object can only be watched once. + watcher.Watch(ctx, watchedName, mdb1.NamespacedName()) + assert.Len(t, watcher.watched, 1) + assert.Equal(t, []types.NamespacedName{mdb1.NamespacedName()}, watcher.watched[watchedName]) + + // Ensure a single object can be watched for multiple reconciliations. + watcher.Watch(ctx, watchedName, mdb2.NamespacedName()) + assert.Len(t, watcher.watched, 1) + assert.Equal(t, []types.NamespacedName{ + mdb1.NamespacedName(), + mdb2.NamespacedName(), + }, watcher.watched[watchedName]) +} diff --git a/deploy/role.yaml b/deploy/clusterwide/cluster_role.yaml similarity index 50% rename from deploy/role.yaml rename to deploy/clusterwide/cluster_role.yaml index 314e70e9a..de8abc63c 100644 --- a/deploy/role.yaml +++ b/deploy/clusterwide/cluster_role.yaml @@ -1,18 +1,12 @@ apiVersion: rbac.authorization.k8s.io/v1 -kind: Role +kind: ClusterRole metadata: - creationTimestamp: null name: mongodb-kubernetes-operator rules: - apiGroups: - "" resources: - - pods - services - - services/finalizers - - endpoints - - persistentvolumeclaims - - events - configmaps - secrets verbs: @@ -26,9 +20,6 @@ rules: - apiGroups: - apps resources: - - deployments - - daemonsets - - replicasets - statefulsets verbs: - create @@ -38,44 +29,27 @@ rules: - patch - update - watch -- apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - get - - create -- apiGroups: - - apps - resourceNames: - - mongodb-kubernetes-operator - resources: - - deployments/finalizers - verbs: - - update - apiGroups: - "" resources: - pods verbs: + - delete - get + - list + - patch + - update + - watch - apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - get -- apiGroups: - - mongodb.com + - mongodbcommunity.mongodb.com resources: - - '*' - - mongodbs + - mongodbcommunity + - mongodbcommunity/status + - mongodbcommunity/spec + - mongodbcommunity/finalizers verbs: - - create - - delete - get - - list - patch + - list - update - watch diff --git a/deploy/testrunner/cluster_role_binding.yaml b/deploy/clusterwide/cluster_role_binding.yaml similarity index 55% rename from deploy/testrunner/cluster_role_binding.yaml rename to deploy/clusterwide/cluster_role_binding.yaml index 349a9531f..7617ec02d 100644 --- a/deploy/testrunner/cluster_role_binding.yaml +++ b/deploy/clusterwide/cluster_role_binding.yaml @@ -1,12 +1,12 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: test-runner + name: mongodb-kubernetes-operator subjects: - kind: ServiceAccount - name: test-runner - namespace: default + # namespace: + name: mongodb-kubernetes-operator roleRef: kind: ClusterRole - name: cluster-admin # TODO: create cluster role with only required permissions + name: mongodb-kubernetes-operator apiGroup: rbac.authorization.k8s.io diff --git a/deploy/clusterwide/role-for-binding.yaml b/deploy/clusterwide/role-for-binding.yaml new file mode 100644 index 000000000..8bc7daaed --- /dev/null +++ b/deploy/clusterwide/role-for-binding.yaml @@ -0,0 +1,10 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: read-access-for-service-binding + labels: + servicebinding.io/controller: "true" +rules: + - apiGroups: ["mongodbcommunity.mongodb.com"] + resources: ["mongodbcommunity", "mongodbcommunity/status"] + verbs: ["get", "list", "watch"] diff --git a/deploy/crds/mongodb.com_mongodb_crd.yaml b/deploy/crds/mongodb.com_mongodb_crd.yaml deleted file mode 100644 index 110682d32..000000000 --- a/deploy/crds/mongodb.com_mongodb_crd.yaml +++ /dev/null @@ -1,70 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: mongodb.mongodb.com -spec: - group: mongodb.com - names: - kind: MongoDB - listKind: MongoDBList - plural: mongodb - shortNames: - - mdb - singular: mongodb - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: MongoDB is the Schema for the mongodbs API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: MongoDBSpec defines the desired state of MongoDB - properties: - featureCompatibilityVersion: - description: FeatureCompatibilityVersion configures the feature compatibility - version that will be set for the deployment - type: string - members: - description: Members is the number of members in the replica set - type: integer - type: - description: Type defines which type of MongoDB deployment the resource - should create - type: string - version: - description: Version defines which version of MongoDB will be used - type: string - required: - - type - - version - type: object - status: - description: MongoDBStatus defines the observed state of MongoDB - properties: - mongoUri: - type: string - phase: - type: string - required: - - mongoUri - - phase - type: object - type: object - version: v1 - versions: - - name: v1 - served: true - storage: true diff --git a/deploy/crds/mongodb.com_v1_mongodb_cr.yaml b/deploy/crds/mongodb.com_v1_mongodb_cr.yaml deleted file mode 100644 index 3464de36a..000000000 --- a/deploy/crds/mongodb.com_v1_mongodb_cr.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: mongodb.com/v1 -kind: MongoDB -metadata: - name: example-mongodb -spec: - members: 3 - type: ReplicaSet - version: "4.0.6" diff --git a/deploy/e2e/role.yaml b/deploy/e2e/role.yaml new file mode 100644 index 000000000..b11b12dd1 --- /dev/null +++ b/deploy/e2e/role.yaml @@ -0,0 +1,297 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: e2e-test +rules: +- apiGroups: + - "" + resources: + - pods + - serviceaccounts + - services + - services/finalizers + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + - namespaces + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - create + - list +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create +- apiGroups: + - apps + resourceNames: + - mongodb-kubernetes-operator + resources: + - deployments/finalizers + verbs: + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create +- apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get +- apiGroups: + - mongodbcommunity.mongodb.com + resources: + - mongodbcommunity + - mongodbcommunity/status + - mongodbcommunity/spec + - mongodbcommunity/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + - clusterrolebindings + - clusterroles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +# needed for cert-manager integration +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - create + - delete + - update +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - acme.cert-manager.io + resources: + - challenges + - challenges/finalizers + - challenges/status + - orders + - orders/finalizers + - orders/status + verbs: + - create + - delete + - update + - get + - list + - watch + - patch + - deletecollection +- apiGroups: + - cert-manager.io + resources: + - clusterissuers + - clusterissuers/status + - issuers + - issuers/status + - certificates + - certificaterequests + - certificaterequests/finalizers + - certificaterequests/status + - certificates/finalizers + - certificates/status + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - patch + - deletecollection +- apiGroups: + - cert-manager.io + resources: + - signers + resourceNames: + - clusterissuers.cert-manager.io/* + - issuers.cert-manager.io/* + verbs: + - approve +- apiGroups: + - networking.k8s.io + resources: + - ingresses + - ingresses/finalizers + verbs: + - get + - list + - watch + - create + - delete + - update +- apiGroups: + - networking.x-k8s.io + resources: + - httproutes + - gateways + - gateways/finalizers + - httproutes/finalizers + verbs: + - get + - list + - watch + - create + - delete + - update +- apiGroups: + - route.openshift.io + resources: + - routes/custom-host + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +- apiGroups: + - apiregistration.k8s.io + resources: + - apiservices + verbs: + - get + - list + - watch + - create + - delete + - update +- apiGroups: + - auditregistration.k8s.io + resources: + - auditsinks + verbs: + - get + - list + - watch + - create + - delete + - update +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + - certificatesigningrequests/status + verbs: + - get + - list + - watch + - create + - delete + - update +- apiGroups: + - certificates.k8s.io + resources: + - signers + resourceNames: + - clusterissuers.cert-manager.io/* + - issuers.cert-manager.io/* + verbs: + - sign +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - cert-manager-controller + - cert-manager-cainjector-leader-election + - cert-manager-cainjector-leader-election-core + verbs: + - get + - update + - patch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - update diff --git a/deploy/e2e/role_binding.yaml b/deploy/e2e/role_binding.yaml new file mode 100644 index 000000000..17a3828b3 --- /dev/null +++ b/deploy/e2e/role_binding.yaml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: e2e-test +subjects: +- kind: ServiceAccount + name: e2e-test + namespace: mongodb +roleRef: + kind: ClusterRole + name: e2e-test + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/e2e/service_account.yaml b/deploy/e2e/service_account.yaml new file mode 100644 index 000000000..84fea363b --- /dev/null +++ b/deploy/e2e/service_account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: e2e-test + namespace: mongodb diff --git a/deploy/openshift/operator_openshift.yaml b/deploy/openshift/operator_openshift.yaml new file mode 100644 index 000000000..b7011a1cc --- /dev/null +++ b/deploy/openshift/operator_openshift.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + email: support@mongodb.com + labels: + owner: mongodb + name: mongodb-kubernetes-operator +spec: + replicas: 1 + selector: + matchLabels: + name: mongodb-kubernetes-operator + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + name: mongodb-kubernetes-operator + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: name + operator: In + values: + - mongodb-kubernetes-operator + topologyKey: kubernetes.io/hostname + containers: + - command: + - /usr/local/bin/entrypoint + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MANAGED_SECURITY_CONTEXT + value: 'true' + - name: OPERATOR_NAME + value: mongodb-kubernetes-operator + - name: AGENT_IMAGE + value: quay.io/mongodb/mongodb-agent-ubi:108.0.6.8796-1 + - name: READINESS_PROBE_IMAGE + value: quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.23 + - name: VERSION_UPGRADE_HOOK_IMAGE + value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.10 + - name: MONGODB_IMAGE + value: mongo + - name: MONGODB_REPO_URL + value: quay.io + image: quay.io/mongodb/mongodb-kubernetes-operator:0.13.0 + imagePullPolicy: Always + name: mongodb-kubernetes-operator + resources: + limits: + cpu: 1100m + memory: 1Gi + requests: + cpu: 500m + memory: 200Mi + serviceAccountName: mongodb-kubernetes-operator diff --git a/deploy/operator.yaml b/deploy/operator.yaml deleted file mode 100644 index a4572f685..000000000 --- a/deploy/operator.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: mongodb-kubernetes-operator -spec: - replicas: 1 - selector: - matchLabels: - name: mongodb-kubernetes-operator - template: - metadata: - labels: - name: mongodb-kubernetes-operator - spec: - serviceAccountName: mongodb-kubernetes-operator - containers: - - name: mongodb-kubernetes-operator - image: quay.io/chatton/mongodb-kubernetes-operator - command: - - mongodb-kubernetes-operator - imagePullPolicy: Always - env: - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: OPERATOR_NAME - value: "mongodb-kubernetes-operator" - - name: AGENT_IMAGE # The MongoDB Agent the operator will deploy to manage MongoDB deployments - value: quay.io/chatton/mongodb-agent diff --git a/deploy/testrunner/role.yaml b/deploy/testrunner/role.yaml deleted file mode 100644 index 996c87b34..000000000 --- a/deploy/testrunner/role.yaml +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - creationTimestamp: null - name: test-runner -rules: -- apiGroups: - - "" - resources: - - pods - - services - - services/finalizers - - endpoints - - persistentvolumeclaims - - events - - configmaps - - secrets - - namespaces - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - get - - create -- apiGroups: - - apps - resourceNames: - - test-runner - resources: - - deployments/finalizers - verbs: - - update -- apiGroups: - - "" - resources: - - pods - verbs: - - get -- apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - get -- apiGroups: - - mongodb.com - resources: - - '*' - - mongodbs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..7475a0d10 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,11 @@ +# MongoDB Community Kubernetes Operator Documentation # + +## Table of Contents + +- [Contribute to the MongoDB Kubernetes Operator](contributing.md) +- [MongoDB Community Kubernetes Operator Architecture](architecture.md) +- [Install and Upgrade the Community Kubernetes Operator](install-upgrade.md) +- [Deploy and Configure MongoDBCommunity Resources](deploy-configure.md) +- [Configure Logging of the MongoDB components](logging.md) +- [Create Database Users](users.md) +- [Secure MongoDBCommunity Resources](secure.md) diff --git a/docs/RELEASE_NOTES.md b/docs/RELEASE_NOTES.md new file mode 100644 index 000000000..6109fac02 --- /dev/null +++ b/docs/RELEASE_NOTES.md @@ -0,0 +1,18 @@ +# MongoDB Kubernetes Operator 0.13.0 + +## Dependency updates + - Updated kubernetes dependencies to 1.30 + - Bumped Go dependency to 1.24 + - Updated packages `crypto`, `net` and `oauth2` to remediate multiple CVEs + +## MongoDBCommunity Resource + - Added support for overriding the ReplicaSet ID ([#1656](https://github.com/mongodb/mongodb-kubernetes-operator/pull/1656)). + +## Improvements + - Refactored environment variable propagation ([#1676](https://github.com/mongodb/mongodb-kubernetes-operator/pull/1676)). + - Introduced a linter to limit inappropriate usage of environment variables within the codebase ([#1690](https://github.com/mongodb/mongodb-kubernetes-operator/pull/1690)). + +## Security & Dependency Updates + - **CVE Updates**: Updated packages `crypto`, `net` and `oauth2` to remediate multiple CVEs + - Upgraded to Go 1.24 and Kubernetes dependencies to 1.30.x . + diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 000000000..5e11baf6e --- /dev/null +++ b/docs/architecture.md @@ -0,0 +1,93 @@ +# MongoDB Community Kubernetes Operator Architecture + +The MongoDB Community Kubernetes Operator is a [Custom Resource Definition](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) and a [Controller](https://kubernetes.io/docs/concepts/architecture/controller/). + +## Table of Contents + +- [Cluster Configuration](#cluster-configuration) +- [Example: MongoDB Version Upgrade](#example-mongodb-version-upgrade) +- [MongoDB Docker Images](#mongodb-docker-images) + +## Cluster Configuration + +You create and update MongoDBCommunity resources by defining a MongoDBCommunity resource definition. When you apply the MongoDBCommunity resource definition to your Kubernetes environment, the Operator: + +1. Creates a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) that contains one [pod](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) for each [replica set](https://www.mongodb.com/docs/manual/replication/) member. +1. Writes the Automation configuration as a [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and mounts it to each pod. +1. Creates one [init container](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) and two [containers](https://kubernetes.io/docs/concepts/containers/overview/) in each pod: + + - An init container which copies the `cmd/versionhook` binary to the main `mongod` container. This is run before `mongod` starts to handle [version upgrades](#example-mongodb-version-upgrade). + + - A container for the [`mongod`](https://www.mongodb.com/docs/manual/reference/program/mongod/index.html) process binary. `mongod` is the primary daemon process for the MongoDB system. It handles data requests, manages data access, and performs background management operations. + + - A container for the MongoDB Agent. The Automation function of the MongoDB Agent handles configuring, stopping, and restarting the `mongod` process. The MongoDB Agent periodically polls the `mongod` to determine status and can deploy changes as needed. + +1. Creates several volumes: + + - `data-volume` which is [persistent](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) and mounts to `/data` on both the server and agent containers. Stores server data as well as `automation-mongod.conf` written by the agent and some locks the agent needs. + - `automation-config` which is mounted from the previously generated `Secret` to both the server and agent. Only lives as long as the pod. + - `healthstatus` which contains the agent's current status. This is shared with the `mongod` container where it's used by the pre-stop hook. Only lives as long as the pod. + +1. Initiates the MongoDB Agent, which in turn creates the database configuration and launches the `mongod` process according to your [MongoDBCommunity resource definition](../config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml). + + + +This architecture maximizes use of the MongoDB Agent while integrating naturally with Kubernetes to produce a number of benefits. + +- The database container is not tied to the lifecycle of the Agent container or to the Operator, so you can: + - Use your preferred Linux distribution inside the container. + - Update operating system packages on your own schedule. + - Upgrade the Operator or Agent without affecting the database image or uptime of the MongoDB servers. +- Containers are immutable and have a single responsibility or process, so you can: + - Describe and understand each container. + - Configure resources independently for easier debugging and triage. + - Inspect resources independently, including tailing the logs. + - Expose the state of each container. +- Pods are defined as StatefulSets so they benefit from stable identities. +- You can upgrade the Operator without restarting either the database or the MongoDB Agent containers. +- You can set up a MongoDB Kubernetes cluster offline once you download the Docker containers for the database and MongoDB Agent. + +## Example: MongoDB Version Upgrade + +The MongoDB Community Kubernetes Operator uses the Automation function of the MongoDB Agent to efficiently handle rolling upgrades. The Operator configures the StatefulSet to block Kubernetes from performing native rolling upgrades because the native process can trigger multiple re-elections in your MongoDB cluster. + +When you update the MongoDB version in your resource definition and reapply it to your Kubernetes environment, the Operator initiates a rolling upgrade: + +1. The Operator changes the StatefulSet [update strategy](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies) from `RollingUpdate` to `OnDelete`. + +1. The Operator updates the [image](https://kubernetes.io/docs/concepts/containers/images/) specification to the new version of MongoDB and writes a new Automation configuration ConfigMap to each pod. + +1. The MongoDB Agent chooses the first pod to upgrade and stops the `mongod` process using a local connection and [`db.shutdownServer`](https://www.mongodb.com/docs/manual/reference/method/db.shutdownServer/#db.shutdownServer). + +1. Kubernetes will restart the `mongod` container causing the version change hook to run before the `mongod` process and check the state of the MongoDB Agent. If the MongoDB Agent expects the `mongod` process to start with a new version, the hook uses a Kubernetes API call to delete the pod. + +1. The Kubernetes Controller downloads the target version of MongoDB from its default docker registry and restarts the pod with the target version of `mongod` in the database container. + +1. The MongoDB Agent starts. It checks the target version of the new `mongod`, then generates the configuration file for the `mongod` process. + +1. The `mongod` process receives the configuration file from the MongoDB Agent and starts. + +1. The MongoDB Agent reaches goal state. + +1. The MongoDB Agent chooses the next pod to upgrade and repeats the process until all pods are upgraded. + +1. The Operator changes the StatefulSet update strategy from `OnDelete` back to `RollingUpdate`. + + + +This upgrade process allows the MongoDB Agent to: + +- Perform pre-conditions. +- Upgrade the secondaries first. +- Wait for the secondaries' oplogs to catch up before triggering an election. +- Upgrade quickly for large replica sets. +- Consider voting nodes. +- Ensure a replica set is always available throughout the entire upgrade process. + +## MongoDB Docker Images + +MongoDB images are available on [Docker Hub](https://hub.docker.com/_/mongo?tab=tags&page=1&ordering=last_updated). diff --git a/docs/build_operator_locally.md b/docs/build_operator_locally.md new file mode 100644 index 000000000..33dfff340 --- /dev/null +++ b/docs/build_operator_locally.md @@ -0,0 +1,54 @@ +# Quick start for building and deploy the operator locally + +This document contains a quickstart guide to build and deploy the operator locally. + + +## Prerequisites +This guide assumes that you have already prepared [Python virtual env](contributing.md#python-environment) and installed the following tools: + +* [Kind](https://kind.sigs.k8s.io/) +* [Docker](https://www.docker.com/) +* [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + + +## Steps + +1. Create a local kubernetes cluster and start a local registry by running + +```sh +./scripts/dev/setup_kind_cluster.sh -n test-cluster +``` + +2. Alternatively create a local cluster and set current kubectl context to it. +```sh +./scripts/dev/setup_kind_cluster.sh -en test-cluster +``` + +3. Run the following to get kind credentials and switch current context to the newly created cluster: + +```sh +kind export kubeconfig --name test-cluster +# should return kind-test-cluster +kubectl config current-context +# should have test-cluster-control-plane node listed +kubectl get nodes +``` + +4. If you didn't clone the repository with `--recurse-submodules` flag you will need to download the helm-chart submodule locally by running the following command: +```sh +git submodule update --init +``` + + +5. Build and deploy the operator. Also add `IMG_BUILD_ARGS=--insecure` as described [here](contributing.md#deploying-the-operator) if necessary: + +```sh +# builds all required images and then deploys the operator +make all-images deploy +``` + +Note: this will build and push the operator at `repo_url/mongodb-kubernetes-operator`, where `repo_url` is extracted from the [dev config file](./contributing.md#developer-configuration) + +6. Change the `image` field in the [manager.yaml](../config/manager/manager.yaml) file to have the image you just built + +7. You can now deploy your resources following the [docs](../docs/README.md) diff --git a/docs/contributing.md b/docs/contributing.md new file mode 100644 index 000000000..139d11b71 --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1,275 @@ +# Contributing to MongoDB Kubernetes Operator + +First you need to get familiar with the [Architecture guide](architecture.md), which explains +from a high perspective how everything works together. + +After our experience building the [Enterprise MongoDB Kubernetes +Operator](https://github.com/mongodb/mongodb-enterprise-kubernetes), we have +realized that it is very important to have a clean environment to work, and as such we have +adopted a strategy that makes it easier for everyone to contribute. + +This strategy is based on using +[`envtest`](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest) for setting up the tests +and `go test` for running the tests, and making the test-runner itself run as a Kubernetes Pod. This +makes it easier to run the tests in environments with access to a Kubernetes +cluster with no go toolchain installed locally, making it easier to reproduce +our local working environments in CI/CD systems. + +# High-Perspective Architecture + +The operator itself consists of 1 image, that has all the operational logic to deploy and +maintain the MongoDBCommunity resources in your cluster. + +The operator deploys MongoDB in Pods (via a higher-level resource, a +StatefulSet), on each Pod there will be multiple images coexisting during the +lifetime of the MongoDB server. + +* Agent image: This image includes a binary provided by MongoDB that handles +the local operation of a MongoDB server given a series of configurations +provided by the operator. The configuration exists as a ConfigMap that's created +by the operator and mounted in the Agent's Pod. + +* MongoDB image: Docker image that includes the MongoDB server. + +* Version upgrade post-start hook image: This image includes a binary that helps orchestrate the + restarts of the MongoDB Replica Set members, in particular, when dealing with + version upgrades, which requires a very precise set of operations to allow for + seamless upgrades and downgrades, with no downtime. + +Each Pod holds a member of a Replica Set, and each Pod has different components, +each one of them in charge of some part of the lifecycle of the MongoDB database. + +# Getting Started + +## PR Prerequisites +* Please ensure you have signed our Contributor Agreement. You can find it [here](https://www.mongodb.com/legal/contributor-agreement). + +* Please ensure that all commits are signed. + +## Developer Configuration + +The operator is built using `golang`. We use a simple +json file that describe some local options that you need to set for the testing environment +to be able to run properly. Create a json file with the following content: + +```json +{ + "namespace": "mongodb", + "repo_url": "localhost:5000", + "operator_image": "mongodb-kubernetes-operator", + "e2e_image": "community-operator-e2e", + "version_upgrade_hook_image": "mongodb-kubernetes-operator-version-upgrade-post-start-hook", + "agent_image": "mongodb-agent-ubi-dev", + "readiness_probe_image": "mongodb-kubernetes-readinessprobe", + "s3_bucket": "" +} +``` + +#### Config Options + +1. `namespace` is the namespace that will be used by scripts/tooling. All the resources will be deployed here. +2. `repo_url` the repository that should be used to push/pull all images. +3. `operator_image` will be used as the name of the operator deployment, and the name of the operator image when build. +4. `e2e_image` the name of e2e test image that will be built. +5. `version_upgrade_hook_image` the name of the version upgrade post start hook image. +6. `agent_image` the name of the agent image. +7. `s3_bucket` the S3 bucket that Dockerfiles will be pushed to as part of the release process. Note: this is only required when running the release tasks locally. + + +You can set the `MONGODB_COMMUNITY_CONFIG` environment variable to be the absolute path of this file. +It will default to `~/.community-operator-dev/config.json` + +Please see [here](./build_operator_locally.md) to see how to build and deploy the operator locally. + +## Configure Docker registry + +The build process consists of multiple Docker images being built. You need to specify +where you want the locally built images to be pushed. The Docker registry needs to be +accessible from your Kubernetes cluster. + +### Local kind cluster +For local testing you can use a [local Kind cluster](build_operator_locally.md#steps). + +## Test Namespace + +You can change the namespace used for tests, if you are using `Kind`, for +instance, you can leave this as `mongodb`. + +## Python Environment + +The test runner is a Python script, in order to use it a virtualenv needs to be +created. + +**Python 3.9 is not supported yet. Please use Python 3.8.** + +### Pip +```sh +python -m venv venv +source venv/bin/activate +python -m pip install -r requirements.txt +``` + +### Pipenv + +* create a python environment and install dependencies. +```bash +pipenv install -r requirements.txt +``` + +* activate the python environment. +```bash +pipenv shell +``` + + +# Deploying the Operator + +In order to deploy the Operator from source, you can run the following command. + +```sh +make operator-image deploy +``` + +This will build and deploy the operator to namespace specified in your configuration file. + +If you are using a local docker registry you should run the following command. +The additional `IMG_BUILD_ARGS=--insecure` variable will add the `--insecure` flag to the command creating the manifests. +This is necessary if your local registry is not secure. Read more about the flag on the [documentatio](https://docs.docker.com/reference/cli/docker/manifest/#working-with-insecure-registries) + +```sh +IMG_BUILD_ARGS=--insecure make operator-image deploy +``` + + +#### See the operator deployment +```sh +kubectl get pods +``` + +#### (Optional) Create a MongoDBCommunity Resource + +Follow the steps outlined [here](./deploy-configure.md) to deploy some resources. + +#### Cleanup +To remove the operator and any created resources you can run + +```sh +make undeploy +``` + +Alternatively, you can run the operator locally. Make sure you follow the steps outlined in [run-operator-locally.md](run-operator-locally.md) + +```sh +make run +``` + +# Running Tests + +### Unit tests + +Unit tests should be run from the root of the project with: + +```sh +make test +``` + +### E2E Tests + +If this is the first time running E2E tests, you will need to ensure that you have built and pushed +all images required by the E2E tests. You can do this by running the following command, +or with the additional `IMG_BUILD_ARGS=--insecure` described above. + +```sh +make all-images +``` + +For subsequent tests you can use + +```sh +make e2e-k8s test= +``` + +This will only re-build the e2e test image. Add `IMG_BUILD_ARGS=--insecure` if necessary + +We have built a simple mechanism to run E2E tests on your cluster using a runner +that deploys a series of Kubernetes objects, runs them, and awaits for their +completion. If the objects complete with a Success status, it means that the +tests were run successfully. + +The available tests can be found in the `tests/e2e` directory, at the time of this +writing we have: + +```sh +$ ls -l test/e2e +replica_set +replica_set_change_version +replica_set_readiness_probe +replica_set_scale +... +``` + +The tests should run individually using the runner like this, or additionally with `IMG_BUILD_ARGS=--insecure`: + +```sh +make e2e-k8s test=replica_set +``` + +This will run the `replica_set` E2E test which is a simple test which installs a +MongoDB Replica Set and asserts that the deployed server can be connected to. + +### Run the test locally with go test & Telepresence +```sh +make e2e-telepresence test= +``` + +This method uses telepresence to allow connectivity as if your local machine is in the kubernetes cluster, +there will be full MongoDB connectivity using `go test` locally. + +Note: you must install [telepresence](https://www.getambassador.io/docs/telepresence/latest/quick-start/) before using this method. + +If on MacOS, you can run `make install-prerequisites-macos` which will perform the installation. + +### Running with Github Actions + +Run a single test + +```sh +make e2e-gh test= +``` + +Run all tests. + +* Navigate to the Actions tab on the github repo +* `Run E2E` > `Run Workflow` > `Your Branch` + +Note: the code must be pushed to a remote branch before this will work. + + +## Troubleshooting +When you run a test locally, if the `e2e-test` pod is present, you will have to +first manually delete it; failing to do so will cause the `e2e-test` pod to fail. + +# Writing new E2E tests + +You can start with the `replica_set` test as a starting point to write a new test. +The tests are written using `envtest` and they are run using `go test`. + +Adding a new test is as easy as creating a new directory in `test/e2e` with the +new E2E test, and to run them: + +```sh +make e2e test= +``` + +# Before Committing your code + +## Set up pre-commit hooks +To set up the pre-commit hooks, please create symbolic links from the provided [hooks](https://github.com/mongodb/mongodb-kubernetes-operator/tree/master/scripts/git-hooks): + +* Navigate to your `.git/hooks` directory: + + `cd .git/hooks` + +* Create a symlink for every file in the `scripts/git-hooks` directory: + + `ln -s -f ../../scripts/git-hooks/* .` diff --git a/docs/deploy-configure.md b/docs/deploy-configure.md new file mode 100644 index 000000000..e090693f4 --- /dev/null +++ b/docs/deploy-configure.md @@ -0,0 +1,395 @@ +# Deploy and Configure a MongoDBCommunity Resource # + +The [`/config/samples`](../config/samples) directory contains example MongoDBCommunity resources that you can modify and deploy. + +## Table of Contents + +- [Deploy a Replica Set](#deploy-a-replica-set) +- [Scale a Replica Set](#scale-a-replica-set) +- [Add Arbiters to a Replica Set](#add-arbiters-to-a-replica-set) +- [Upgrade your MongoDBCommunity Resource Version and Feature Compatibility Version](#upgrade-your-mongodbcommunity-resource-version-and-feature-compatibility-version) + - [Example](#example) +- [Deploy Replica Sets on OpenShift](#deploy-replica-sets-on-openshift) +- [Define a Custom Database Role](#define-a-custom-database-role) +- [Specify Non-Default Values for Readiness Probe](#specify-non-default-values-for-readiness-probe) + - [When to specify custom values for the Readiness Probe](#when-to-specify-custom-values-for-the-readiness-probe) + +## Deploy a Replica Set + +**Warning:** When you delete MongoDB resources, persistent volumes remain +to help ensure that no unintended data loss occurs. If you create a new +MongoDB resource with the same name and persistent volumes, the +pre-existing data might cause issues if the new MongoDB resources have a +different topology than the previous ones. + +To deploy your first replica set: + +1. Replace `` in [config/samples/mongodb.com_v1_mongodbcommunity_cr.yaml](../config/samples/mongodb.com_v1_mongodbcommunity_cr.yaml) to the password you wish to use. +2. Invoke the following `kubectl` command: + ``` + kubectl apply -f config/samples/mongodb.com_v1_mongodbcommunity_cr.yaml --namespace + ``` +3. Verify that the MongoDBCommunity resource deployed: + ``` + kubectl get mongodbcommunity --namespace + ``` + +4. The Community Kubernetes Operator creates secrets that contains users' connection strings and credentials. + + The secrets follow this naming convention: `--`, where: + + | Variable | Description | Value in Sample | + |----|----|----| + | `` | Name of the MongoDB database resource. | `example-mongodb` | + | `` | [Authentication database](https://www.mongodb.com/docs/manual/core/security-users/#std-label-user-authentication-database) where you defined the database user. | `admin` | + | `` | Username of the database user. | `my-user` | + + **NOTE**: Alternatively, you can specify an optional + `users[i].connectionStringSecretName` field in the + ``MongoDBCommunity`` custom resource to specify + the name of the connection string secret that the + Community Kubernetes Operator creates. + + Update the variables in the following command, then run it to retrieve a user's connection strings to the replica set from the secret: + + **NOTE**: The following command requires [jq](https://stedolan.github.io/jq/) version 1.6 or higher.

+ + ```sh + kubectl get secret -n \ + -o json | jq -r '.data | with_entries(.value |= @base64d)' + ``` + + The command returns the replica set's standard and DNS seed list [connection strings](https://www.mongodb.com/docs/manual/reference/connection-string/#connection-string-formats) in addition to the user's name and password: + + ```json + { + "connectionString.standard": "mongodb://:@example-mongodb-0.example-mongodb-svc.mongodb.svc.cluster.local:27017,example-mongodb-1.example-mongodb-svc.mongodb.svc.cluster.local:27017,example-mongodb-2.example-mongodb-svc.mongodb.svc.cluster.local:27017/admin?ssl=true", + "connectionString.standardSrv": "mongodb+srv://:@example-mongodb-svc.mongodb.svc.cluster.local/admin?ssl=true", + "password": "", + "username": "" + } + ``` + + **NOTE**: The Community Kubernetes Operator sets the [`ssl` connection option](https://www.mongodb.com/docs/manual/reference/connection-string/#connection-options) to `true` if you [Secure MongoDBCommunity Resource Connections using TLS](secure.md#secure-mongodbcommunity-resource-connections-using-tls).

+ + You can use the connection strings in this secret in your application: + + ```yaml + containers: + - name: test-app + env: + - name: "CONNECTION_STRING" + valueFrom: + secretKeyRef: + name: -- + key: connectionString.standardSrv + +5. Connect to one of your application's pods in the Kubernetes cluster: + + **NOTE**: You can access your replica set only from a pod in the same Kubernetes cluster. You can't access your replica set from outside of the Kubernetes cluster. + + ``` + kubectl -n exec --stdin --tty -- /bin/bash + ``` + + When you connect to your application pod, a shell prompt appears for your application's container: + + ``` + user@app:~$ + ``` + +6. Use one of the connection strings returned in step 4 to connect to the replica set. The following example uses [`mongosh`](https://www.mongodb.com/docs/mongodb-shell/) to connect to a replica set: + + ``` + mongosh "mongodb+srv://:@example-mongodb-svc.mongodb.svc.cluster.local/admin?ssl=true" + ``` + +## Scale a Replica Set + +You can scale up (increase) or scale down (decrease) the number of +members in a replica set. + +Consider the following example MongoDBCommunity resource definition: + +```yaml +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "4.2.7" +``` + +To scale a replica set: + +1. Edit the resource definition. + + Update `members` to the number of members that you want the replica set to have. + + ```yaml + apiVersion: mongodbcommunity.mongodb.com/v1 + kind: MongoDBCommunity + metadata: + name: example-mongodb + spec: + members: 5 + type: ReplicaSet + version: "4.2.7" + ``` + +2. Reapply the configuration to Kubernetes: + ``` + kubectl apply -f .yaml --namespace + ``` + + **NOTE**: When you scale down a MongoDBCommunity resource, the Community Operator + might take several minutes to remove the StatefulSet replicas for the + members that you remove from the replica set. + +## Add Arbiters to a Replica Set + +To add [arbiters](https://www.mongodb.com/docs/manual/core/replica-set-arbiter/) to +your replica set, add the `spec.arbiters` field to your MongoDBCommunity +resource definition. This attribute configures the absolute amount of arbiters +in this Replica Set, this is, the amount of `mongod` instances will be +`spec.members` + `spec.arbiters`. + +The value of the `spec.arbiters` field must be: + +- a positive integer, and +- less than the value of the `spec.members` field. + +**NOTE**: At least one replica set member must not be an arbiter. + +Consider the following MongoDBCommunity resource definition example, with a PSS +(Primary-Secondary-Secondary) configuration: + +```yaml +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + type: ReplicaSet + members: 3 + version: "4.2.7" +``` + +To add arbiters: + +1. Edit the resource definition. + + Add the `spec.arbiters` field and assign its value to the number of arbiters that you want the replica set to have. + + ```yaml + apiVersion: mongodbcommunity.mongodb.com/v1 + kind: MongoDBCommunity + metadata: + name: example-mongodb + spec: + type: ReplicaSet + members: 3 + arbiters: 1 + version: "4.4.13" + ``` + +2. Reapply the configuration to Kubernetes: + ``` + kubectl apply -f .yaml --namespace + ``` + +The resulting Replica Set has a PSSA (Primary-Secondary-Secondary-Arbiter) +configuration. + +## Upgrade your MongoDBCommunity Resource Version and Feature Compatibility Version + +You can upgrade the major, minor, and/or feature compatibility versions of your MongoDBCommunity resource. These settings are configured in your resource definition YAML file. + +- To upgrade your resource's major and/or minor versions, set the `spec.version` setting to the desired MongoDB version. Make sure to specify a full image tag, such as `5.0.3`. Setting the `spec.version` to loosely-defined tags such as `5.0` is not currently supported. + +- To modify your resource's [feature compatibility version](https://www.mongodb.com/docs/manual/reference/command/setFeatureCompatibilityVersion/), set the `spec.featureCompatibilityVersion` setting to the desired version. + +If you update `spec.version` to a later version, consider setting `spec.featureCompatibilityVersion` to the current working MongoDB version to give yourself the option to downgrade if necessary. To learn more about feature compatibility, see [`setFeatureCompatibilityVersion`](https://www.mongodb.com/docs/manual/reference/command/setFeatureCompatibilityVersion/) in the MongoDB Manual. + +### Example + +Consider the following example MongoDBCommunity resource definition: + +```yaml +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "4.0.6" +``` +To upgrade this resource from `4.0.6` to `4.2.7`: + +1. Edit the resource definition. + + a. Update `spec.version` to `4.2.7`. + + b. Update `spec.featureCompatibilityVersion` to `4.0`. + + ```yaml + apiVersion: mongodbcommunity.mongodb.com/v1 + kind: MongoDBCommunity + metadata: + name: example-mongodb + spec: + members: 3 + type: ReplicaSet + version: "4.2.7" + featureCompatibilityVersion: "4.0" + ``` + + **NOTE:** Setting `featureCompatibilityVersion` to `4.0` disables [4.2 features incompatible with MongoDB 4.0](https://www.mongodb.com/docs/manual/release-notes/4.2-compatibility/#compatibility-enabled). + +2. Reapply the configuration to Kubernetes: + ``` + kubectl apply -f .yaml --namespace + ``` + +## Deploy Replica Sets on OpenShift + +To deploy the operator on OpenShift you will have to provide the environment variable `MANAGED_SECURITY_CONTEXT` set to `true` for the operator deployment. + +See [here](../config/samples/mongodb.com_v1_mongodbcommunity_openshift_cr.yaml) for +an example of how to provide the required configuration for a MongoDB +replica set. + +See [here](../deploy/openshift/operator_openshift.yaml) for an example of how to configure the Operator deployment. + +## Define a Custom Database Role + +You can define [custom roles](https://www.mongodb.com/docs/manual/core/security-user-defined-roles/) to give you fine-grained access control over your MongoDB database resource. + + **NOTE**: Custom roles are scoped to a single MongoDB database resource. + +To define a custom role: + +1. Add the following fields to the MongoDBCommunity resource definition: + + | Key | Type | Description | Required? | + |----|----|----|----| + | `spec.security.authentication.ignoreUnknownUsers` | boolean | Flag that indicates whether you can add users that don't exist in the `MongoDBCommunity` resource. If omitted, defaults to `true`. | No | + | `spec.security.roles` | array | Array that defines [custom roles](https://www.mongodb.com/docs/manual/core/security-user-defined-roles/) roles that give you fine-grained access control over your MongoDB deployment. | Yes | + | `spec.security.roles.role` | string | Name of the custom role. | Yes | + | `spec.security.roles.db` | string | Database in which you want to store the user-defined role. | Yes | + | `spec.security.roles.authenticationRestrictions` | array | Array that defines the IP address from which and to which users assigned this role can connect. | No | + | `spec.security.roles.authenticationRestrictions.clientSource` | array | Array of IP addresses or CIDR blocks from which users assigned this role can connect.

MongoDB servers reject connection requests from users with this role if the requests come from a client that is not present in this array. | No | + | `spec.security.roles.authenticationRestrictions.serverAddress` | array | Array of IP addresses or CIDR blocks to which users assigned this role can connect.

MongoDB servers reject connection requests from users with this role if the client requests to connect to a server that is not present in this array. | No | + | `spec.security.roles.privileges` | array | List of actions that users granted this role can perform. For a list of accepted values, see [Privilege Actions](https://www.mongodb.com/docs/manual/reference/privilege-actions/#database-management-actions) in the MongoDB Manual for the MongoDB versions you deploy with the Kubernetes Operator. | Yes | + | `spec.security.roles.privileges.actions` | array | Name of the role. Valid values are [built-in roles](https://www.mongodb.com/docs/manual/reference/built-in-roles/#built-in-roles). | Yes | + | `spec.security.roles.privileges.resource.database`| string | Database for which the privilege `spec.security.roles.privileges.actions` apply. An empty string (`""`) indicates that the privilege actions apply to all databases.

If you provide a value for this setting, you must also provide a value for `spec.security.roles.privileges.resource.collection`. | Conditional | + | `spec.security.roles.privileges.resource.collection`| string | Collection for which the privilege `spec.security.roles.privileges.actions` apply. An empty string (`""`) indicates that the privilege actions apply to all of the database's collections.

If you provide a value for this setting, you must also provide a value for `spec.security.roles.privileges.resource.database`. | Conditional | + | `spec.security.roles.privileges.resource.cluster`| string | Flag that indicates that the privilege `spec.security.roles.privileges.actions` apply to all databases and collections in the MongoDB deployment. If omitted, defaults to `false`.

If set to `true`, do not provide values for `spec.security.roles.privileges.resource.database` and `spec.security.roles.privileges.resource.collection`. | Conditional | + | `spec.security.roles.roles`| array | An array of roles from which this role inherits privileges.

You must include the roles field. Use an empty array (`[]`) to specify no roles to inherit from. | Yes | + | `spec.security.roles.roles.role` | string | Name of the role to inherit from. | Conditional | + | `spec.security.roles.roles.database` | string | Name of database that contains the role to inherit from. | Conditional | + + ```yaml + --- + apiVersion: mongodbcommunity.mongodb.com/v1 + kind: MongoDBCommunity + metadata: + name: custom-role-mongodb + spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: ["SCRAM"] + roles: # custom roles are defined here + - role: testRole + db: admin + privileges: + - resource: + db: "test" + collection: "" # an empty string indicates any collection + actions: + - find + roles: [] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + - name: testRole # apply the custom role to the user + db: admin + scramCredentialsSecretName: my-scram + ``` + +2. Save the file. +3. Apply the updated MongoDBCommunity resource definition: + + ``` + kubectl apply -f .yaml --namespace + ``` + + +## Specify Non-Default Values for Readiness Probe + +Under some circumstances it might be necessary to set your own custom values for +the `ReadinessProbe` used by the MongoDB Community Operator. To do so, you +should use the `statefulSet` attribute in `resource.spec`, as in the following +provided example [yaml +file](../config/samples/mongodb.com_v1_mongodbcommunity_readiness_probe_values.yaml). +Only those attributes passed will be set, for instance, given the following structure: + +```yaml +spec: + statefulSet: + spec: + template: + spec: + containers: + - name: mongodb-agent + readinessProbe: + failureThreshold: 40 + initialDelaySeconds: 5 +``` + +*Only* the values of `failureThreshold` and `initialDelaySeconds` will be set to +their custom, specified values. The rest of the attributes will be set to their +default values. + +*Please note that these are the actual values set by the Operator for our +MongoDB Custom Resources.* + +### When to specify custom values for the Readiness Probe + +In some cases, for instance, with a less than optimal download speed from the +image registry, it could be necessary for the Operator to tolerate a Pod that +has taken longer than expected to restart or upgrade to a different version of +MongoDB. In these cases we want the Kubernetes API to wait a little longer +before giving up, we could increase the value of `failureThreshold` to `60`. + +In other cases, if the Kubernetes API is slower than usual, we would increase +the value of `periodSeconds` to `20`, so the Kubernetes API will do half of the +requests it normally does (default value for `periodSeconds` is `10`). + +*Please note that these are referential values only!* + +### Operator Configurations + +#### Modify cluster domain for MongoDB service objects + +To configure the cluster domain for the MongoDB service object, i.e use a domain other than the default `cluster.local` you can specify it as an environment variable in the operator deployment under `CLUSTER_DOMAIN` key. + +For ex: +```yaml +env: + - name: CLUSTER_DOMAIN + value: $CUSTOM_DOMAIN +``` \ No newline at end of file diff --git a/docs/external_access.md b/docs/external_access.md new file mode 100644 index 000000000..40adb279c --- /dev/null +++ b/docs/external_access.md @@ -0,0 +1,103 @@ +## Enable External Access to a MongoDB Deployment + +This guide assumes that the operator is installed and a MongoDB deployment is yet to be done but you have a chosen namespace that you are installing into. We will install cert-manager and then generate certificates and configure split-horizon to support internal and external DNS names for configuring external access to the replicaset. + +### Install cert-manager + +```sh +kubectl create namespace cert-manager +helm repo add jetstack https://charts.jetstack.io +helm repo update +helm install \ + cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --version v1.3.1 \ + --set installCRDs=true +``` + +### Install mkcert and generate CA + +```sh +brew install mkcert # for Mac +#for Linux / Windows systems look at https://github.com/FiloSottile/mkcert +mkcert -install +``` + +Execute ```mkcert --CAROOT``` to note the location of the generated root CA key and cert. + +### Retrieve the CA and create configmaps and secrets + +Use the files that you found in the previous step. Replace `````` with your chosen namespace + +```sh +kubectl create configmap ca-config-map --from-file=ca.crt= --namespace + +kubectl create secret tls ca-key-pair --cert= --key= --namespace +``` + +### Create the Cert Manager issuer and secret + +Edit the file [cert-manager-certificate.yaml](../config/samples/external_access/cert-manager-certificate.yaml) to replace `````` with your MongoDB deployment name. Also replace ``````, ``````, and `````` with the external FQDNs of the MongoDB replicaset members. Please remember that you will have to add an equal number of entries for each member of the replicaset, for example: + +```yaml +... +spec: + members: 3 + type: ReplicaSet + replicaSetHorizons: + - horizon1: :31181 + horizon2: :31181 + - horizon1: :31182 + horizon2: :31182 + - horizon1: :31183 + horizon2: :31183 +... +``` + +Apply the manifests. Replace `````` with the namespace you are using for the deployment. + +```sh +kubectl apply -f config/samples/external_access/cert-manager-issuer.yaml --namespace +kubectl apply -f config/samples/external_access/cert-manager-certificate.yaml --namespace +``` + +### Create the MongoDB deployment + +Edit [mongodb.com_v1_mongodbcommunity_cr.yaml](../config/samples/external_access/mongodb.com_v1_mongodbcommunity_cr.yaml). Replace with the desired MongoDB deployment name -- this should be the same as in the previous step. Replace ``````, ``````, and `````` with the external FQDNs of the MongoDB replicaset members. Please remember that you should have the same number of entries in this section as the number of your replicaset members. You can also edit the ports for external access to your preferred numbers in this section -- you will have to remember to change them in the next step too. Change `````` to your desired admin password for MongoDB. + +Apply the manifest. + +```sh +kubectl apply -f config/samples/external_access/mongodb.com_v1_mongodbcommunity_cr.yaml --namespace +``` + +Wait for the replicaset to be available. + +### Create the external NodePort services for accessing the MongoDB deployment from outside the Kubernetes cluster + +Edit [external_services.yaml](../config/samples/external_access/external_services.yaml) and replace `````` with the MongoDB deployment name that you have used in the preceeding steps. You can change the ```nodePort``` and ```port``` to reflect the changes (if any) you have made in the previous steps. + +Apply the manifest. + +```sh +kubectl apply -f config/samples/external_access/external_services.yaml --namespace +``` + +### Retrieve the certificates from a MongoDB replicaset member + +```sh +kubectl exec --namespace -it -0 -c mongod -- bash +``` + +Once inside the container ```cat``` and copy the contents of the ```.pem``` file in ```/var/lib/tls/server``` into a file on your local system. + +### Connect to the MongoDB deployment from outside the Kubernetes cluster + +This is an example to connect to the MongoDB cluster with Mongo shell. Use the CA from ```mkcert``` and the certificate from the previous step. Replace the values in the command from the preceeding steps. + +```sh +mongosh --tls --tlsCAFile ca.crt --tlsCertificateKeyFile key.pem --username my-user --password mongodb://:31181,:31182,:31183 +``` + +### Conclusion +At this point, you should be able to connect to the MongoDB deployment from outside the cluster. Make sure that you can resolve to the FQDNs for the replicaset members where you have the Mongo client installed. diff --git a/docs/grafana/sample_dashboard.json b/docs/grafana/sample_dashboard.json new file mode 100644 index 000000000..6c08adeb0 --- /dev/null +++ b/docs/grafana/sample_dashboard.json @@ -0,0 +1,1364 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 5, + "iteration": 1650460419048, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 4, + "panels": [], + "type": "row" + }, + { + "datasource": null, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 2, + "maxDataPoints": null, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "center", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/^Last \\(not null\\)$/", + "values": false + }, + "text": {}, + "textMode": "value" + }, + "pluginVersion": "7.5.2", + "targets": [ + { + "exemplar": true, + "expr": "mongodb_uptimeMillis", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Uptime (minutes)", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "seriesToRows", + "reducers": [ + "lastNotNull" + ] + } + } + ], + "type": "stat" + }, + { + "datasource": null, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 6, + "y": 1 + }, + "id": 5, + "maxDataPoints": null, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.5.2", + "repeat": null, + "targets": [ + { + "exemplar": true, + "expr": "mongodb_connections_available", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Available", + "refId": "A" + }, + { + "exemplar": true, + "expr": "mongodb_connections_active", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Active", + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Connections", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "seriesToRows", + "reducers": [ + "lastNotNull" + ] + } + } + ], + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "unit": "bytes" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 6, + "x": 12, + "y": 1 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": null, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(container_memory_working_set_bytes{pod=~\"$Cluster.*\", container=~\"mongodb.*\"})\n", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Working", + "refId": "A" + }, + { + "exemplar": true, + "expr": ":node_memory_MemAvailable_bytes:sum", + "hide": false, + "interval": "", + "legendFormat": "Available", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Mongo Pods Memory Usage in GB / Total Available on Cluster", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "seriesToRows", + "reducers": [] + } + } + ], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 6, + "x": 18, + "y": 1 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "maxDataPoints": null, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(rate(container_cpu_usage_seconds_total{pod=~\"$Cluster.*\", image!~\"sha.*\", container=~\"mongo.*\"}[5m]))", + "hide": false, + "interval": "", + "legendFormat": "Usage", + "refId": "Used" + }, + { + "exemplar": true, + "expr": "cluster:node_cpu:sum_rate5m", + "hide": false, + "interval": "", + "legendFormat": "Available", + "refId": "Available" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Mongo Pods CPU Usage / Total Available on Cluster", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "seriesToRows", + "reducers": [] + } + } + ], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": null, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 11, + "options": { + "displayMode": "gradient", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "7.5.2", + "targets": [ + { + "exemplar": true, + "expr": "max(mongodb_catalogStats_collections{pod=~\"$Cluster.*\"})", + "instant": false, + "interval": "", + "legendFormat": "Collections", + "refId": "A" + }, + { + "exemplar": true, + "expr": "max(mongodb_catalogStats_capped{pod=~\"$Cluster.*\"})", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Capped Collections", + "refId": "B" + }, + { + "exemplar": true, + "expr": "max(mongodb_catalogStats_timeseries{pod=~\"$Cluster.*\"})", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Timeseries", + "refId": "C" + }, + { + "exemplar": true, + "expr": "max(mongodb_catalogStats_views{pod=~\"$Cluster.*\"})", + "hide": false, + "interval": "", + "legendFormat": "Views", + "refId": "D" + } + ], + "title": "Catalog Stats", + "type": "bargauge" + }, + { + "datasource": null, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "graph": false, + "legend": false, + "tooltip": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 9, + "options": { + "graph": {}, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltipOptions": { + "mode": "single" + } + }, + "pluginVersion": "7.5.2", + "targets": [ + { + "exemplar": true, + "expr": "sum(mongodb_globalLock_activeClients_total{pod=~\"$Cluster.*\"})", + "interval": "", + "legendFormat": "Total", + "refId": "A" + }, + { + "exemplar": true, + "expr": "sum(mongodb_globalLock_activeClients_readers{pod=~\"$Cluster.*\"})", + "hide": false, + "interval": "", + "legendFormat": "Readers", + "refId": "B" + }, + { + "exemplar": true, + "expr": "sum(mongodb_globalLock_activeClients_writers{pod=~\"$Cluster.*\"})", + "hide": false, + "interval": "", + "legendFormat": "Writers", + "refId": "C" + } + ], + "title": "Global Locks", + "transformations": [], + "type": "timeseries" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 16 + }, + "hiddenSeries": false, + "id": 13, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "mongodb_metrics_cursor_open_total{pod=~\"$Cluster.*\"}", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{pod}} Open", + "refId": "A" + }, + { + "exemplar": true, + "expr": "mongodb_metrics_cursor_open_noTimeout{pod=~\"$Cluster.*\"}", + "hide": false, + "interval": "", + "legendFormat": "{{pod}} Open No Timeout", + "refId": "B" + }, + { + "exemplar": true, + "expr": "mongodb_metrics_cursor_timed_out{pod=~\"$Cluster.*\"}", + "hide": false, + "interval": "", + "legendFormat": "{{pod}} Timed Out", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Cursors", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 16 + }, + "hiddenSeries": false, + "id": 15, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "mongodb_metrics_document_inserted{pod=~\"$Cluster.*\"}", + "interval": "", + "legendFormat": "{{pod}} Inserted", + "refId": "A" + }, + { + "exemplar": true, + "expr": "mongodb_metrics_document_returned{pod=~\"$Cluster.*\"}", + "hide": false, + "interval": "", + "legendFormat": "{{pod}} Returned", + "refId": "B" + }, + { + "exemplar": true, + "expr": "mongodb_metrics_document_deleted{pod=~\"$Cluster.*\"}", + "hide": false, + "interval": "", + "legendFormat": "{{pod}} Deleted", + "refId": "C" + }, + { + "exemplar": true, + "expr": "mongodb_metrics_document_updated{pod=~\"$Cluster.*\"}", + "hide": false, + "interval": "", + "legendFormat": "{{pod}} Updated", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Documents", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 27 + }, + "id": 17, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.2", + "targets": [ + { + "exemplar": true, + "expr": "sum(mongodb_metrics_repl_network_bytes{pod=~\"$Cluster.*\"})", + "instant": false, + "interval": "", + "legendFormat": "Total Usage", + "refId": "A" + } + ], + "title": "Replication Network Usage", + "transformations": [ + { + "id": "reduce", + "options": { + "reducers": [ + "lastNotNull" + ] + } + } + ], + "type": "stat" + }, + { + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 27 + }, + "id": 19, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.5.2", + "targets": [ + { + "exemplar": true, + "expr": "sum(mongodb_metrics_repl_network_ops{pod=~\"$Cluster.*\"})", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Replication Operations", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "unit": "bytes" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 27 + }, + "hiddenSeries": false, + "id": 26, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "max(mongodb_network_bytesIn{pod=~\"$Cluster.*\"})", + "interval": "", + "legendFormat": "Bytes In", + "refId": "A" + }, + { + "exemplar": true, + "expr": "max(mongodb_network_bytesOut{pod=~\"$Cluster.*\"})", + "hide": false, + "interval": "", + "legendFormat": "Bytes Out", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "unit": "µs" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 35 + }, + "hiddenSeries": false, + "id": 23, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "max(mongodb_opLatencies_reads_latency{pod=~\"$Cluster.*\"})", + "interval": "", + "legendFormat": "Reads", + "refId": "A" + }, + { + "exemplar": true, + "expr": "max(mongodb_opLatencies_commands_latency{pod=~\"$Cluster.*\"})", + "hide": false, + "interval": "", + "legendFormat": "Commands", + "refId": "C" + }, + { + "exemplar": true, + "expr": "max(mongodb_opLatencies_transactions_latency{pod=~\"$Cluster.*\"})", + "hide": false, + "interval": "", + "legendFormat": "Transactions", + "refId": "D" + }, + { + "exemplar": true, + "expr": "max(mongodb_opLatencies_writes_latency{pod=~\"$Cluster.*\"})", + "hide": false, + "interval": "", + "legendFormat": "Writes", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Latencies", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "µs", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 35 + }, + "hiddenSeries": false, + "id": 24, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "max(mongodb_opLatencies_reads_ops{pod=~\"$Cluster.*\"})", + "interval": "", + "legendFormat": "Reads", + "refId": "A" + }, + { + "exemplar": true, + "expr": "max(mongodb_opLatencies_commands_ops{pod=~\"$Cluster.*\"})", + "hide": false, + "interval": "", + "legendFormat": "Commands", + "refId": "C" + }, + { + "exemplar": true, + "expr": "max(mongodb_opLatencies_transactions_ops{pod=~\"$Cluster.*\"})", + "hide": false, + "interval": "", + "legendFormat": "Transactions", + "refId": "D" + }, + { + "exemplar": true, + "expr": "max(mongodb_opLatencies_writes_ops{pod=~\"$Cluster.*\"})", + "hide": false, + "interval": "", + "legendFormat": "Writes", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Ops", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 27, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": "replica-set-with-prom", + "value": "replica-set-with-prom" + }, + "datasource": null, + "definition": "label_values(mongodb_connections_available, cl_name)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "Cluster", + "options": [], + "query": { + "query": "label_values(mongodb_connections_available, cl_name)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "MongoDB Dashboard Copy", + "uid": "_y8XBgynz", + "version": 22 + } \ No newline at end of file diff --git a/docs/how-to-release.md b/docs/how-to-release.md new file mode 100644 index 000000000..f92412433 --- /dev/null +++ b/docs/how-to-release.md @@ -0,0 +1,31 @@ + +## How to Release +* Prepare release PR: + * Pull the changes in the helm-charts submodule folder to get the latest main. + * `cd helm-charts` + * `git submodule update --init` - if submodule was not initialised before + * `git pull origin main` + * Update any changing versions in [release.json](../release.json). + * `operator` - always when doing a release + * `version-upgrade-hook` - whenever we make changes in the [versionhook](../cmd/versionhook) files + * `readiness-probe` - whenever we make changes in the [readiness](../cmd/readiness) files + * `agent` - newest version available in `ops-manager` `conf-hosted.properties` file under `automation.agent.version` + * `agent-tools-version` - newest version available in `ops-manager` `conf-hosted.properties` file under `mongotools.version` + * Ensure that [the release notes](./RELEASE_NOTES.md) are up to date for this release. + * all merged PRs have a covered entry in the release notes. For example, you can use `git log v0.11.0..HEAD --reverse --oneline` to get the list of commits after previous release + * Run `python scripts/ci/update_release.py` to update the relevant yaml manifests. + * **use venv and then `python3 -m pip install -r requirements.txt`** + * Copy ``CRD`s`` to Helm Chart + * `cp config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml helm-charts/charts/community-operator-crds/templates/mongodbcommunity.mongodb.com_mongodbcommunity.yaml` + * commit changes to the [helm-charts submodule](https://github.com/mongodb/helm-charts) and create a PR against it ([similar to this one](https://github.com/mongodb/helm-charts/pull/163)). + * do not merge helm-charts PR until release PR is merged and the images are pushed to quay.io. + * do not commit the submodule change in the release pr of the community repository. + * Commit all changes (except for the submodule change) + * Create a PR with the title `Release MongoDB Kubernetes Operator v` (the title must match this pattern). + * Wait for the tests to pass and merge the PR. + * Upon approval, all new images for this release will be built and released, and a GitHub release draft will be created. + * Dockerfiles for mongodb-kubernetes-operator and mongodb-agent will be uploaded to S3 to be used by daily rebuild process in the enterprise repo. + * Review and publish the new GitHub release draft, that was prepared + * Merge helm-charts PR and update submodule to the latest commit on `main` branch. + * Create a new PR with only bump to the helm-chart submodule, similar to [this](https://github.com/mongodb/mongodb-kubernetes-operator/pull/1210). The commit here should match the master commit in the `helm-charts` repository. + * Add the new released operator version to the enterprise [release.json](https://github.com/10gen/ops-manager-kubernetes/blob/master/release.json#L74) file. diff --git a/docs/install-upgrade.md b/docs/install-upgrade.md new file mode 100644 index 000000000..3deb68a06 --- /dev/null +++ b/docs/install-upgrade.md @@ -0,0 +1,313 @@ +# Install and Upgrade the Community Kubernetes Operator # + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Install the Operator](#install-the-operator) + - [Understand Deployment Scopes](#understand-deployment-scopes) + - [Operator in Same Namespace as Resources](#operator-in-same-namespace-as-resources) + - [Operator in Different Namespace Than Resources](#operator-in-different-namespace-than-resources) + - [Install the Operator using Helm](#install-the-operator-using-Helm) + - [Prerequisites to Install using Helm](#prerequisites-to-install-using-Helm) + - [Procedure using Helm](#procedure-using-Helm) + - [Install the Operator using kubectl](#install-the-operator-using-kubectl) + - [Prerequisites to Install using kubectl](#prerequisites-to-install-using-kubectl) + - [Install in a Different Namespace using kubectl](#install-in-a-different-namespace-using-kubectl) + - [Configure the MongoDB Docker Image or Container Registry](#configure-the-mongodb-docker-image-or-container-registry) + - [Procedure using kubectl](#procedure-using-kubectl) +- [Upgrade the Operator](#upgrade-the-operator) +- [Rotating TLS certificate for the MongoDB deployment](#rotating-tls-certificate-for-the-mongodb-deployment) + +## Prerequisites + +- A Kubernetes cluster with nodes with x86-64/AMD64 processors (either all, or a separate node pool) + +## Install the Operator + +The MongoDB Community Kubernetes Operator is a [Custom Resource Definition](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) and a controller. + +Use the following resources to prepare your implementation and install the Community Operator: + +- [Understand Deployment Scopes](#understand-deployment-scopes) +- [Install the Operator using Helm](#install-the-operator-using-Helm) +- [Install the Operator using kubectl](#install-the-operator-using-kubectl) + +### Understand Deployment Scopes + +You can deploy the MongoDB Community Kubernetes Operator with different scopes based on where you want to deploy MongoDBCommunity resources: + +- [Operator in Same Namespace as Resources](#operator-in-same-namespace-as-resources) +- [Operator in Different Namespace Than Resources](#operator-in-different-namespace-than-resources) + +#### Operator in Same Namespace as Resources + +You scope the Operator to a namespace. The Operator watches MongoDBCommunity resources in that same [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/). + +This is the default scope when you [install the Operator using Helm](#install-the-operator-using-helm) or [install the Operator using kubectl](#install-the-operator-using-kubectl). + +#### Operator in Different Namespace Than Resources + +You scope the Operator to a namespace. The Operator watches MongoDBCommunity resources in other namespaces. + +To deploy the Operator in a different namespace than the resources, [Install in a Different Namespace using Helm](#install-in-a-different-namespace-using-helm) or [Install in a Different Namespace using kubectl](#install-in-a-different-namespace-using-kubectl). + +### Install the Operator using Helm + +You can install the Operator using the [MongoDB Helm Charts](https://mongodb.github.io/helm-charts/). + +#### Prerequisites to Install using Helm + +Before you install the MongoDB Community Kubernetes Operator using Helm, you must: + +1. Have a Kubernetes solution available to use. + If you need a Kubernetes solution, see the [Kubernetes documentation on picking the right solution](https://kubernetes.io/docs/setup). For testing, MongoDB recommends [Kind](https://kind.sigs.k8s.io/). +2. [Install Helm](https://helm.sh/docs/intro/install/). +3. Add the [MongoDB Helm Charts for Kubernetes](https://mongodb.github.io/helm-charts/) repository to Helm by running the following command: + ``` + helm repo add mongodb https://mongodb.github.io/helm-charts + ``` + +#### Procedure using Helm + +Use one of the following procedures to install the Operator using Helm: + +- [Install in the Default Namespace using Helm](#install-in-the-default-namespace-using-helm) +- [Install in a Different Namespace using Helm](#install-in-a-different-namespace-using-helm) + +##### Install in the Default Namespace using Helm + +To install the Custom Resource Definitions and the Community Operator in +the `default` namespace using Helm, run the install command from the +terminal: + ``` + helm install community-operator mongodb/community-operator + ``` + +If you already installed the `community-operator-crds` Helm chart, you must +include `--set community-operator-crds.enabled=false` when installing the Operator: + ``` + helm install community-operator mongodb/community-operator --set community-operator-crds.enabled=false + ``` + +##### Install in a Different Namespace using Helm + +To install the Custom Resource Definitions and the Community Operator in +a different namespace using Helm, run the install +command with the `--namespace` flag from the terminal. Include the `--create-namespace` +flag if you are creating a new namespace. + ``` + helm install community-operator mongodb/community-operator --namespace mongodb [--create-namespace] + ``` + +To configure the Operator to watch resources in another namespace, run the following command from the terminal. Replace `example` with the namespace the Operator should watch: + + ``` + helm install community-operator mongodb/community-operator --set operator.watchNamespace="example" + ``` + +### Install the Operator using kubectl + +You can install the Operator using `kubectl` instead of Helm. + +#### Prerequisites to Install using kubectl + +Before you install the MongoDB Community Kubernetes Operator using `kubectl`, you must: + +1. Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). +2. Have a Kubernetes solution available to use. + If you need a Kubernetes solution, see the [Kubernetes documentation on picking the right solution](https://kubernetes.io/docs/setup). For testing, MongoDB recommends [Kind](https://kind.sigs.k8s.io/). +3. Clone this repository. + ``` + git clone https://github.com/mongodb/mongodb-kubernetes-operator.git + ``` +4. **Optional** Configure the Operator to watch other namespaces. +5. **Optional** Configure the [MongoDB Docker image or container registry](#configure-the-mongodb-docker-image-or-container-registry). + +##### Install in a Different Namespace using kubectl + +To configure the Operator to watch resources in other namespaces: + +1. In the Operator [resource definition](../config/manager/manager.yaml), set the `WATCH_NAMESPACE` environment variable to one of the following values: + + - the namespace that you want the Operator to watch, or + - `*` to configure the Operator to watch all namespaces in the cluster. + + ```yaml + spec: + containers: + - name: mongodb-kubernetes-operator + image: quay.io/mongodb/mongodb-kubernetes-operator:0.5.0 + command: + - mongodb-kubernetes-operator + imagePullPolicy: Always + env: + - name: WATCH_NAMESPACE + value: "*" + ``` + +2. Modify the [clusterRoleBinding](../deploy/clusterwide/cluster_role_binding.yaml) namespace value for the serviceAccount `mongodb-kubernetes-operator` to the namespace in which the operator is deployed. + +3. Run the following command to create cluster-wide roles and role-bindings in the default namespace: + + ```sh + kubectl apply -f deploy/clusterwide + ``` +4. For each namespace that you want the Operator to watch, run the following + commands to deploy a Role, RoleBinding and ServiceAccount in that namespace: + + ```sh + kubectl apply -k config/rbac --namespace + ``` + + *Note: If you need the operator to have permission over multiple namespaces, for ex: when configuring the operator to have the `connectionStringSecret` in a different `namespace`, make sure + to apply the `RBAC` in all the relevant namespaces.* + + +5. [Install the operator](#procedure-using-kubectl). + +##### Configure the MongoDB Docker Image or Container Registry + +By default, the Operator pulls the MongoDB database Docker image from `registry.hub.docker.com/library/mongo`. + +To configure the Operator to use a different image or container registry +for MongoDB Docker images: + +1. In the Operator [resource definition](../config/manager/manager.yaml), set the `MONGODB_IMAGE` and `MONGODB_REPO_URL` environment variables: + + **NOTE:** Use the official + [MongoDB Community Server images](https://hub.docker.com/r/mongodb/mongodb-community-server). + Official images provide the following advantages: + + - They are rebuilt daily for the latest upstream + vulnerability fixes. + - MongoDB tests, maintains, and supports them. + + | Environment Variable | Description | Default | + |----|------------------------------|------------------------------| + | `MONGODB_IMAGE` | From the `MONGODB_REPO_URL`, absolute path to the MongoDB Docker image that you want to deploy. | `"mongodb-community-server"` | + | `MONGODB_REPO_URL` | URL of the container registry that contains the MongoDB Docker image that you want to deploy. | `"quay.io/mongodb"` | + + ```yaml + spec: + containers: + - name: mongodb-kubernetes-operator + image: quay.io/mongodb/mongodb-kubernetes-operator:0.5.1 + command: + - mongodb-kubernetes-operator + imagePullPolicy: Always + env: + - name: MONGODB_IMAGE + value: + - name: MONGODB_REPO_URL + value: + ``` + +2. Save the file. + +3. [Install the operator](#procedure-using-kubectl). + +#### Procedure using kubectl + +The Operator can be installed using `kubectl` and the [Makefile](../Makefile). + +To install the MongoDB Community Kubernetes Operator using kubectl: + +1. Change to the Community Operator's directory. +2. Install the [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/). + + a. Invoke the following command: + *Make sure to apply the CRD file from the [git tag version](https://github.com/mongodb/mongodb-kubernetes-operator/tags) of the operator you are attempting to install*. + ``` + kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml + ``` + b. Verify that the Custom Resource Definitions installed successfully: + ``` + kubectl get crd/mongodbcommunity.mongodbcommunity.mongodb.com + ``` +3. Install the necessary roles and role-bindings: + + a. Invoke the following command: + ``` + kubectl apply -k config/rbac/ --namespace + ``` + b. Verify that the resources have been created: + ``` + kubectl get role mongodb-kubernetes-operator --namespace + + kubectl get rolebinding mongodb-kubernetes-operator --namespace + + kubectl get serviceaccount mongodb-kubernetes-operator --namespace + ``` +4. Install the Operator. + + a. Invoke the following `kubectl` command to install the Operator in the specified namespace: + ``` + kubectl create -f config/manager/manager.yaml --namespace + ``` + b. Verify that the Operator installed successsfully: + ``` + kubectl get pods --namespace + ``` + +## Upgrade the Operator + +The release v0.6.0 had some breaking changes (see https://github.com/mongodb/mongodb-kubernetes-operator/releases/tag/v0.6.0) requiring special steps to upgrade from a pre-0.6.0 Version. +As always, have backups. +Make sure you run commands in the correct namespace. + +1. Prepare for the upgrade. + + a. Migrate your cr by updating apiVersion and kind to + ``` + apiVersion: mongodbcommunity.mongodb.com/v1 + kind: MongoDBCommunity + ``` + If you upgrade from pre-0.3.0 you need to also add the field spec.users[n].scramCredentialsSecretName for each resource. This will be used to determine the name of the generated secret which stores MongoDB user credentials. This field must comply with DNS-1123 rules (see https://kubernetes.io/docs/concepts/overview/working-with-objects/names/). + b. Plan a downtime. +2. Out with the old + + a. Delete the old operator. + ``` + kubectl delete deployments.apps mongodb-kubernetes-operator + ``` + b. Delete the old statefulset. + ``` + kubectl delete statefulsets.apps mongodb + ``` + c. Delete the old customResourceDefinition. Not strictly needed but no need to keep it around anymore (unless you got more installations of operator in your cluster) + ``` + kubectl delete crd mongodbcommunity.mongodbcommunity.mongodb.com + ``` +3. In with the new + + Follow the normal installation procedure above. +4. Start up your Replica Set again + a. Re-create your cr using the new Version from Step 1.a + b. Patch your statefulset to have it update the permissions + ``` + kubectl patch statefulset --type='json' --patch '[ {"op":"add","path":"/spec/template/spec/initContainers/-", "value": { "name": "change-data-dir-permissions", "image": "busybox", "command": [ "chown", "-R", "2000", "/data" ], "securityContext": { "runAsNonRoot": false, "runAsUser": 0, "runAsGroup":0 }, "volumeMounts": [ { "mountPath": "/data", "name" : "data-volume" } ] } } ]' + ``` + c. Delete your pod manually + Since you added your cr in step a. kubernetes will immediately try to get your cluster up and running. + You will now have one pod that isn't working since it got created before you patched your statefulset with the additional migration container. + Delete that pod. + ``` + kubectl delete pod -0 + ``` + d. You're done. Now Kubernetes will create the pod fresh, causing the migration to run and then the pod to start up. Then kubernetes will proceed creating the next pod until it reaches the number specified in your cr. + +## Rotating TLS certificate for the MongoDB deployment + +Renew the secret for your TLS certificates +``` +kubectl create secret tls \ + --cert= \ + --key= \ + --dry-run=client \ + -o yaml | +kubectl apply -f - +``` +*`secret_name` is what you've specified under `Spec.Security.TLS.CertificateKeySecret.Name`*. + +If you're using a tool like cert-manager, you can follow [these instructions](https://cert-manager.io/docs/usage/certificate/#renewal) to rotate the certificate. +The operator should would watch the secret change and re-trigger a reconcile process. diff --git a/docs/logging.md b/docs/logging.md new file mode 100644 index 000000000..021ae48ed --- /dev/null +++ b/docs/logging.md @@ -0,0 +1,33 @@ +# Configure Logging in MongoDB Community + +This section describes the components which are logging either to a file or stdout, +how to configure them and what their defaults are. + +## MongoDB Processes +### Configuration +The exposed CRD options can be seen [in the crd yaml](https://github.com/mongodb/mongodb-kubernetes-operator/blob/74d13f189566574b862e5670b366b61ec5b65923/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml#L105-L117). +Additionally, more information regarding configuring systemLog can be found [in the official documentation of systemLog](https://www.mongodb.com/docs/manual/reference/configuration-options/#core-options)]. +`spec.agent.systemLog.destination` configures the logging destination of the mongod process. +### Default Values +By default, MongoDB sends all log output to standard output. + +## MongoDB Agent +### Configuration +`spec.agent.logFile` can be used to configure the output file of the mongoDB agent logging. +The agent will log to standard output with the following setting: `/dev/stdout`. +### Default Values +By default, the MongoDB agent logs to `/var/log/mongodb-mms-automation/automation-agent.log` + +## ReadinessProbe +### Configuration & Default Values +The readinessProbe can be configured via Environment variables. +Below is a table with each environment variable, its explanation and its default value. + +| Environment Variable | Explanation | Default Value | +|---------------------------------|-------------------------------------------------------------------------|-----------------------------------------------| +| READINESS_PROBE_LOGGER_BACKUPS | maximum number of old log files to retain | 5 | +| READINESS_PROBE_LOGGER_MAX_SIZE | maximum size in megabytes | 5 | +| READINESS_PROBE_LOGGER_MAX_AGE | maximum number of days to retain old log files | none | +| READINESS_PROBE_LOGGER_COMPRESS | if the rotated log files should be compressed | false | +| MDB_WITH_AGENT_FILE_LOGGING | whether we should also log to stdout (which shows in kubectl describe) | true | +| LOG_FILE_PATH | path of the logfile of the readinessProbe. | /var/log/mongodb-mms-automation/readiness.log | \ No newline at end of file diff --git a/docs/prometheus/README.md b/docs/prometheus/README.md new file mode 100644 index 000000000..a7c53f298 --- /dev/null +++ b/docs/prometheus/README.md @@ -0,0 +1,189 @@ +# Use Prometheus with your MongoDB Resource + + You can use the [mongodb-prometheus-sample.yaml](mongodb-prometheus-sample.yaml) file to + deploy a MongoDB resource in your Kubernetes cluster, with a +[`ServiceMonitor`](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md#related-resources) +to indicate to Prometheus how to consume metrics data from +it. + +The sample specifies a simple MongoDB resource with one user, +and the `spec.Prometheus` attribute with basic HTTP +authentication and no TLS. The sample lets you test +the metrics that MongoDB sends to Prometheus. + +## Quick Start + +We tested this setup with version 0.54 of the [Prometheus +Operator](https://github.com/prometheus-operator/prometheus-operator). + +### Prerequisites + +* Kubernetes 1.16+ +* Helm 3+ + +### Install the Prometheus Operator + +You can install the Prometheus Operator using Helm. To learn +more, see the [installation instructions](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#kube-prometheus-stack). + +To install the Prometheus Operator using Helm, run the +following commands: + +``` shell +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +helm install prometheus prometheus-community/ \ + kube-prometheus-stack --namespace \ + --create-namespace +``` + +### Install the MongoDB Community Kubernetes Operator + +Run the following command to install the Community Kubernetes +Operator and create a namespace to contain the Community +Kubernetes Operator and resources: + +``` shell +helm install community-operator mongodb/community-operator --namespace --create-namespace +``` + +To learn more, see the [Installation Instructions](../install-upgrade.md#operator-in-same-namespace-as-resources). + +## Create a MongoDB Resource + + You can use the [mongodb-prometheus-sample.yaml](mongodb-prometheus-sample.yaml) file to + deploy a MongoDB resource in your Kubernetes cluster, with a +[`ServiceMonitor`](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md#related-resources) +to indicate to Prometheus how to consume metrics data from +it. + +You can apply the sample directly with the following command: + +``` shell +kubectl apply -f +``` + +**Note:** If you haven't cloned the +[mongodb-kubernetes-operator](https://github.com/mongodb/mongodb-kubernetes-operator) +repository, you must provide the full URL that points to the +[mongodb-prometheus-sample.yaml](mongodb-prometheus-sample.yaml) file in the command: +[https://raw.githubusercontent.com/mongodb/mongodb-kubernetes-operator/master/docs/prometheus/mongodb-prometheus-sample.yaml](mongodb-prometheus-sample.yaml) + +This command creates two `Secrets` that contain authentication +for a new MongoDB user and basic HTTP authentication for the +Prometheus endpoint. The command creates both `Secrets` in the +`mongodb` namespace. + +This command also creates a `ServiceMonitor` that configures +Prometheus to consume this resource's metrics. This command +creates the `ServiceMonitor` in the `prometheus-system` +namespace. + +## Optional: Enable TLS on the Prometheus Endpoint + +### Install Cert-Manager + +1. Run the following commands to install + [Cert-Manager](https://cert-manager.io/) using Helm: + + ``` shell + helm repo add jetstack https://charts.jetstack.io + helm repo update + helm install \ + cert-manager jetstack/cert-manager \ + --namespace cert-manager \ + --create-namespace \ + --version v1.7.1 \ + --set installCRDs=true + ``` + +2. Now with Cert-Manager installed, create a Cert-Manager + `Issuer` and then a `Certificate`. You can use the two files + that we provide to create a new `Issuer`: + + a. Run the following command to create a `Secret` that + contains the TLS certificate `tls.crt` and `tls.key` + entries. You can use the certificate and key files that + we provide in the [`testdata/tls`](../../testdata/tls) directory to create a Cert-Manager `Certificate`. + + ``` shell + kubectl create secret tls issuer-secret --cert=../../testdata/tls/ca.crt --key=../../testdata/tls/ca.key \ + --namespace mongodb + ``` + + The following response appears: + + ``` shell + secret/issuer-secret created + ``` + + b. Run the following command to create a new `Issuer` and + `Certificate`: + + ``` shell + kubectl apply -f issuer-and-cert.yaml --namespace mongodb + ``` + The following response appears: + + ``` shell + issuer.cert-manager.io/ca-issuer created + certificate.cert-manager.io/prometheus-target-cert created + ``` + +### Enable TLS on the MongoDB CRD + +**Important!** Do **NOT** use this configuration in Production +environments! A security expert should advise you about how to +configure TLS. + +To enable TLS, you must add a new entry to the +`spec.prometheus` section of the MongoDB `CustomResource`. Run +the following [patch](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/) +operation to add the needed entry. + +``` shell +kubectl patch mdbc mongodb --type='json' \ + -p='[{"op": "add", "path": "/spec/prometheus/tlsSecretKeyRef", "value":{"name": "prometheus-target-cert"}}]' \ + --namespace mongodb +``` + +The following response appears: + +``` shell +mongodbcommunity.mongodbcommunity.mongodb.com/mongodb patched +``` + +After a few minutes, the MongoDB resource should return to the +Running phase. Now you must configure the Prometheus +`ServiceMonitor` to point to the HTTPS endpoint. + +### Update ServiceMonitor + +To update the `ServiceMonitor`, run the following command to +patch the resource again: + +``` shell +kubectl patch servicemonitors mongodb-sm --type='json' \ + -p=' +[ + {"op": "replace", "path": "/spec/endpoints/0/scheme", "value": "https"}, + {"op": "add", "path": "/spec/endpoints/0/tlsConfig", "value": {"insecureSkipVerify": true}} +] +' \ + --namespace mongodb +``` + +The following reponse appears: + +``` shell +servicemonitor.monitoring.coreos.com/mongodb-sm patched +``` + +With these changes, the new `ServiceMonitor` points to the HTTPS +endpoint (defined in `/spec/endpoints/0/scheme`). You also +set `spec/endpoints/0/tlsConfig/insecureSkipVerify` to `true`, +so that Prometheus doesn't verify the TLS certificates on +MongoDB's end. + +Prometheus should now be able to scrape the MongoDB target +using HTTPS. diff --git a/docs/prometheus/issuer-and-cert.yaml b/docs/prometheus/issuer-and-cert.yaml new file mode 100644 index 000000000..2f9834335 --- /dev/null +++ b/docs/prometheus/issuer-and-cert.yaml @@ -0,0 +1,52 @@ +# Creates a new Cert-Manager `Issuer` using a certificate stored in a +# `Secret` named "issuer-secret". +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: ca-issuer + namespace: mongodb +spec: + ca: + secretName: issuer-secret + +# Creates a new Cert-Manager `Certificate` using the `Issuer` we just +# (name: ca-issuer). +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: prometheus-target-cert + namespace: mongodb +spec: + # Secret names are always required. + secretName: prometheus-target-cert + + duration: 2160h # 90 days + renewBefore: 360h # 15 days + subject: + organizations: + - mongodb + + isCA: false + privateKey: + algorithm: RSA + encoding: PKCS1 + size: 2048 + usages: + - server auth + + # we will include each hostname in our Replica Set. + dnsNames: + - mongodb-0.mongodb-svc.mongodb.svc.cluster.local + - mongodb-1.mongodb-svc.mongodb.svc.cluster.local + - mongodb-2.mongodb-svc.mongodb.svc.cluster.local + + issuerRef: + name: ca-issuer + # We can reference ClusterIssuers by changing the kind here. + # The default value is Issuer (i.e. a locally namespaced Issuer) + kind: Issuer + # This is optional since cert-manager will default to this value however + # if you are using an external issuer, change this to that issuer group. + group: cert-manager.io diff --git a/docs/prometheus/mongodb-prometheus-sample.yaml b/docs/prometheus/mongodb-prometheus-sample.yaml new file mode 100644 index 000000000..93c0ba564 --- /dev/null +++ b/docs/prometheus/mongodb-prometheus-sample.yaml @@ -0,0 +1,104 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: mongodb + namespace: mongodb +spec: + members: 3 + type: ReplicaSet + version: "5.0.6" + + # You can expose metrics for Prometheus polling using the + # `prometheus` entry. + prometheus: + # Metrics endpoint HTTP Basic Auth username + username: prometheus-username + + # Metrics endpoint HTTP Basic Auth password + passwordSecretRef: + name: metrics-endpoint-password + + security: + authentication: + modes: ["SCRAM"] + + users: + - name: my-user + db: admin + passwordSecretRef: + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password + namespace: mongodb +type: Opaque +stringData: + password: 'Not-So-Secure!' + +--- +apiVersion: v1 +kind: Secret +metadata: + name: metrics-endpoint-password + namespace: mongodb +type: Opaque +stringData: + password: 'Not-So-Secure!' + username: prometheus-username + +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + annotations: + + # This needs to match `spec.ServiceMonitorSelector.matchLabels` from your + # `prometheuses.monitoring.coreos.com` resouce. + labels: + release: prometheus + + name: mongodb-sm + namespace: mongodb +spec: + endpoints: + + # Configuring a Prometheus Endpoint with basic Auth. + # `prom-secret` is a Secret containing a `username` and `password` entries. + - basicAuth: + password: + key: password + name: metrics-endpoint-password + username: + key: username + name: metrics-endpoint-password + + # This port matches what we created in our MongoDB Service. + port: prometheus + + # If using HTTPS enabled endpoint, change scheme to https + scheme: http + + # Configure different TLS related settings. For more information, see: + # https://github.com/prometheus-operator/prometheus-operator/blob/main/pkg/apis/monitoring/v1/types.go#L909 + # tlsConfig: + # insecureSkipVerify: true + + # What namespace to watch + namespaceSelector: + matchNames: + - mongodb + + # Service labels to match + selector: + matchLabels: + app: mongodb-svc diff --git a/docs/release-notes-template.md b/docs/release-notes-template.md new file mode 100644 index 000000000..90b997cd3 --- /dev/null +++ b/docs/release-notes-template.md @@ -0,0 +1,47 @@ +*Note, that any section that doesn’t have content should be omitted* + +# MongoDB Kubernetes Operator 1.x.y + +## Kubernetes Operator + +* Breaking Changes + * Breaking Change 1 +* Changes + * Change 1 +* Bug fixes (*CVE issues go first*) + * Fixes an issue ... + + +## MongoDBCommunity Resource +* Breaking Changes + * Breaking Change 1 +* Changes + * Change 1 +* Bug fixes (*CVE issues go first*) + * Fixes an issue ... + +## MongoDB Agent ReadinessProbe +* Breaking Changes + * Breaking Change 1 +* Changes + * Change 1 +* Bug fixes (*CVE issues go first*) + * Fixes an issue ... + +## Known Issues +* Issue 1 +* Issue 2 + +## Miscellaneous +* Item 1 +* Item 2 + +## Updated Image Tags +* mongodb-kubernetes-operator:0.3.0 +* mongodb-agent:10.19.0.6562-1 +* mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.2 + +*All the images can be found in:* + +https://quay.io/mongodb + diff --git a/docs/resize-pvc.md b/docs/resize-pvc.md new file mode 100644 index 000000000..8c4c29a99 --- /dev/null +++ b/docs/resize-pvc.md @@ -0,0 +1,101 @@ +# Resize PVC Resources # + +Resizing the [Persistent Volume Claim (PVC)](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) resources for your Community Kubernetes Operator replica sets using the [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) is [not yet possible](https://github.com/kubernetes/enhancements/pull/3412). Instead, follow these steps to resize the PVC resource for each replica set and recreate the StatefulSet. + +1. Enable your storage provisioner to allow volume expansion by setting `allowVolumeExpansion` in the [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) to `true`. For example: + + ``` + kubectl patch storageclass/ --type='json' -p='[{"op": "add", "path": "/allowVolumeExpansion", "value": true }]' + ``` + +1. If you don't already have a `MongoDBCommunity` resource with custom storage specified, create one. For example: + + ```yaml + --- + apiVersion: mongodbcommunity.mongodb.com/v1 + kind: MongoDBCommunity + metadata: + name: example-mongodb + spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + statefulSet: + spec: + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + resources: + requests: + storage: 50Gi + ... + ``` + +1. Patch the PVC resource for each replica set. + + ``` + kubectl patch pvc/"data-volume--0" -p='{"spec": {"resources": {"requests": {"storage": "100Gi"}}}}' + kubectl patch pvc/"data-volume--1" -p='{"spec": {"resources": {"requests": {"storage": "100Gi"}}}}' + kubectl patch pvc/"data-volume--2" -p='{"spec": {"resources": {"requests": {"storage": "100Gi"}}}}' + ``` + +1. Scale the Community Kubernetes Operator to `0`. + + ``` + kubectl scale deploy mongodb-kubernetes-operator --replicas=0 + ``` + +1. Remove the StatefulSet without removing the Pods. + + ``` + kubectl delete sts --cascade=orphan + ``` + +1. Remove the `MongoDBCommunity` resource without removing the Pods. + + ``` + kubectl delete mdbc --cascade=orphan + ``` + +1. Scale the Community Kubernetes Operator to `1`. + + ``` + kubectl scale deploy mongodb-kubernetes-operator --replicas=1 + ``` + +1. Add your new storage specifications to the `MongoDBCommunity` resource. For example: + + ```yaml + --- + apiVersion: mongodbcommunity.mongodb.com/v1 + kind: MongoDBCommunity + metadata: + name: example-mongodb + spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + statefulSet: + spec: + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + resources: + requests: + storage: 100Gi + ... + ``` + +1. Reapply the `MongoDBCommunity` resource. For example: + + ``` + kubectl apply -f PATH/TO/.yaml + ``` + +1. If your storage provisioner doesn't support online expansion, restart the Pods. + + ``` + kubectl rollout restart sts + ``` diff --git a/docs/run-operator-locally.md b/docs/run-operator-locally.md new file mode 100644 index 000000000..c742f54b7 --- /dev/null +++ b/docs/run-operator-locally.md @@ -0,0 +1,46 @@ +# Quick start for building and running the operator locally + +This document contains a quickstart guide to build and running and debugging the operator locally. +Being able to run and build the binary locally can help with faster feedback-cycles. + +## Prerequisites +- Follow the general setup to be able to run e2e tests locally with our suite as described here, which includes the usage of [telepresence](https://www.getambassador.io/docs/telepresence/latest/quick-start/): + - [contributing.md](contributing.md) + - [build_operator_locally.md](build_operator_locally.md) + - If above has been configured there should be either: + - `$HOME/.kube/config` + - `KUBECONFIG` environment variable pointing at a file + - **Note**: either of these are necessary to be able to run the operator locally +- Have a folder `.community-operator-dev` +- *Optional - if you want to export the environment variables, you can run the following command*: `source .community-operator-dev/local-test.export.env`. ( These environment variables are generated with the `make generate-env-file`) +## Goals +- Run the operator locally as a binary (optionally in debug mode) in command line or in an IDE +- Run e2e tests locally + +## Running The Operator locally +1. Use the dedicated make target which exports the needed environment variables and builds & runs the operator binary. + + Before doing that you need to add 2 more fields to the `config.json` file found in [contributing.md](contributing.md), because the python script looks for them in the file: + - `mdb_local_operator`: needs to be set to `true`, to allow for the operator to be run locally + - `kubeconfig`: needs to be set to the path of the `kubeconfig` configuration file, for example `$HOME/.kube/config` + + Then you can run the command: + + ```sh + make run + ``` + +2. For debugging one can use the following make target, which uses [dlv](https://github.com/go-delve/delve): + + ```sh + make debug + ``` + +## Running e2e tests with the local operator +- Our [e2e tests](../test/e2e), contains sub-steps that will install the following helm-chart: [operator.yaml](../helm-charts/charts/community-operator/templates/operator.yaml) +- By default, the template chart sets the number of operator replicas to `1`. This will clash with our local running operator. With this in mind the solution is to set the replicas number to `0` temporarily. +- Follow the guide on how to run `e2e` tests as described in our [contributing.md](contributing.md), for instance: + +```sh +make e2e-telepresence test= +``` diff --git a/docs/secure.md b/docs/secure.md new file mode 100644 index 000000000..e1a1e8631 --- /dev/null +++ b/docs/secure.md @@ -0,0 +1,93 @@ +# Secure MongoDBCommunity Resources # + +## Table of Contents + +- [Secure MongoDBCommunity Resource Connections using TLS](#secure-mongodbcommunity-resource-connections-using-tls) + - [Prerequisites](#prerequisites) + - [Procedure](#procedure) + +## Secure MongoDBCommunity Resource Connections using TLS + +You can configure the MongoDB Community Kubernetes Operator to use TLS +certificates to encrypt traffic between: + +- MongoDB hosts in a replica set, and +- Client applications and MongoDB deployments. + +The Operator automates TLS configuration through its integration with +[cert-manager](https://cert-manager.io/), a certificate management tool for +Kubernetes. + +### Prerequisites + +Before you secure MongoDBCommunity resource connections using TLS, you +must [Create a database user](../docs/users.md) to authenticate to your +MongoDBCommunity resource. + +### Procedure + +To secure connections to MongoDBCommunity resources with TLS using `cert-manager`: + +1. Add the `cert-manager` repository to your `helm` repository list and + ensure it's up to date: + + ``` + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +2. Install `cert-manager`: + + ``` + helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --set crds.enabled=true + ``` + +3. Create a TLS-secured MongoDBCommunity resource: + + This assumes you already have the operator installed in namespace `` + + ``` + helm upgrade --install community-operator mongodb/community-operator \ + --namespace --set resource.tls.useCertManager=true \ + --set createResource=true --set resource.tls.enabled=true \ + --set namespace= + ``` + + This creates a resource secured with TLS and generates the necessary + certificates with `cert-manager` according to the values specified in + the `values.yaml` file in the Community Kubernetes Operator + [chart repository](https://github.com/mongodb/helm-charts/tree/main/charts/community-operator). + + `cert-manager` automatically reissues certificates according to the + value of `resource.tls.certManager.renewCertBefore`. To alter the + reissuance interval, either: + + - Set `resource.tls.certManager.renewCertBefore` in `values.yaml` to + the desired interval in hours before running `helm upgrade` + + - Set `spec.renewBefore` in the Certificate resource file generated + by `cert-manager` to the desired interval in hours after running + `helm upgrade` + + + +1. Test your connection over TLS by + + - Connecting to a `mongod` container inside a pod using `kubectl`: + + ``` + kubectl exec -it -c mongod -- bash + ``` + + Where `mongodb-replica-set-pod` is the name of a pod from your MongoDBCommunity resource + + - Then, use `mongosh` to connect over TLS: + For how to get the connection string look at [Deploy A Replica Set](deploy-configure.md#deploy-a-replica-set) + + ``` + mongosh "" --tls --tlsCAFile /var/lib/tls/ca/ca.crt --tlsCertificateKeyFile /var/lib/tls/server/*.pem + ``` + + Where `mongodb-replica-set` is the name of your MongoDBCommunity + resource, `namespace` is the namespace of your deployment + and `connection-string` is a connection string for your `-svc` service. \ No newline at end of file diff --git a/docs/users.md b/docs/users.md new file mode 100644 index 000000000..96a44570a --- /dev/null +++ b/docs/users.md @@ -0,0 +1,89 @@ +# Create a Database User # + +You can create a MongoDB database user to authenticate to your MongoDBCommunity resource using [SCRAM](https://www.mongodb.com/docs/manual/core/security-scram/). First, [create a Kubernetes secret](#create-a-user-secret) for the new user's password. Then, [modify and apply the MongoDBCommunity resource definition](#modify-the-mongodbcommunity-resource). + +You cannot disable SCRAM authentication. + +## Create a User Secret + +1. Copy the following example secret. + + ```yaml + --- + apiVersion: v1 + kind: Secret + metadata: + name: # corresponds to spec.users.passwordSecretRef.name in the MongoDB CRD + type: Opaque + stringData: + password: # corresponds to spec.users.passwordSecretRef.key in the MongoDB CRD + ... + ``` +1. Update the value of `metadata.name` with any name for this secret. +1. Update the value of `stringData.password` with the user's password. +1. Save the secret with a `.yaml` file extension. +1. Apply the secret in Kubernetes: + ``` + kubectl apply -f .yaml --namespace + ``` + +## Modify the MongoDBCommunity Resource + +1. Add the following fields to the MongoDBCommunity resource definition: + + | Key | Type | Description | Required? | + |----|----|----|----| + | `spec.users` | array of objects | Configures database users for this deployment. | Yes | + | `spec.users.name` | string | Username of the database user. | Yes | + | `spec.users.db` | string | Database that the user authenticates against. Defaults to `admin`. | No | + | `spec.users.passwordSecretRef.name` | string | Name of the secret that contains the user's plain text password. | Yes| + | `spec.users.passwordSecretRef.key` | string| Key in the secret that corresponds to the value of the user's password. Defaults to `password`. | No | + | `spec.users.scramCredentialsSecretName` | string| ScramCredentialsSecretName appended by string "scram-credentials" is the name of the secret object created by the operator for storing SCRAM credentials for the user. The name should comply with [DNS1123 subdomain](https://tools.ietf.org/html/rfc1123). Also, please make sure the name is unique among `users`. | Yes | + | `spec.users.roles` | array of objects | Configures roles assigned to the user. | Yes | + | `spec.users.roles.role.name` | string | Name of the role. Valid values are [built-in roles](https://www.mongodb.com/docs/manual/reference/built-in-roles/#built-in-roles) and [custom roles](deploy-configure.md#define-a-custom-database-role) that you have defined. | Yes | + | `spec.users.roles.role.db` | string | Database that the role applies to. | Yes | + + ```yaml + --- + apiVersion: mongodbcommunity.mongodb.com/v1 + kind: MongoDBCommunity + metadata: + name: example-scram-mongodb + spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: ["SCRAM"] + users: + - name: + db: + passwordSecretRef: + name: + roles: + - name: + db: + - name: + db: + ... + ``` +2. Save the file. +3. Apply the updated MongoDBCommunity resource definition: + + ``` + kubectl apply -f .yaml --namespace + ``` + +## Next Steps + +- After the MongoDBCommunity resource is running, the Operator no longer requires the user's secret. MongoDB recommends that you securely store the user's password and then delete the user secret: + ``` + kubectl delete secret --namespace + ``` + +- To authenticate to your MongoDBCommunity resource, run the following command: + ``` + mongosh "mongodb://-svc..svc.cluster.local:27017/?replicaSet=" --username --password --authenticationDatabase + ``` +- To change a user's password, create and apply a new secret resource definition with a `metadata.name` that is the same as the name specified in `passwordSecretRef.name` of the MongoDB CRD. The Operator will automatically regenerate credentials. diff --git a/docs/x509-auth.md b/docs/x509-auth.md new file mode 100644 index 000000000..61a53a93c --- /dev/null +++ b/docs/x509-auth.md @@ -0,0 +1,129 @@ +# Enable X.509 Authentication + +You can use Helm or `kubectl` to enable X.509 authentication for the +MongoDB Agent and client. + +## Prerequisites + +1. Add the `cert-manager` repository to your `helm` repository list and + ensure it's up to date: + + ``` + helm repo add jetstack https://charts.jetstack.io + helm repo update + ``` + +1. Install `cert-manager`: + + ``` + helm install cert-manager jetstack/cert-manager --namespace cert-manager \ + --create-namespace --set installCRDs=true + ``` + +## Use Helm to Enable X.509 Authentication + +You can use Helm to install and deploy the MongoDB Community Kubernetes +Operator with X.509 Authentication enabled for the MongoDB Agent and +client. To learn more, see [Install the Operator using Helm](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/docs/install-upgrade.md#install-the-operator-using-helm). + +1. To deploy the MongoDB Community Kubernetes Operator, copy and paste + the following command and replace the `` variable with the + namespace: + + **Note:** + + The following command deploys a sample resource with X.509 enabled + for both the MongoDB Agent and client authentication. It also creates + a sample X.509 user and the certificate that the user can use to + authenticate. + + ``` + helm upgrade --install community-operator mongodb/community-operator \ + --namespace --set namespace= --create-namespace \ + --set resource.tls.useCertManager=true --set resource.tls.enabled=true \ + --set resource.tls.useX509=true --set resource.tls.sampleX509User=true \ + --set createResource=true + ``` + +## Use `kubectl` to Enable X.509 Authentication + +You can use Helm to install and deploy the MongoDB Community Kubernetes +Operator with X.509 Authentication enabled for the MongoDB Agent and +client. + +1. To install the MongoDB Community Kubernetes Operator, see + [Install the Operator using kubectl](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/docs/install-upgrade.md#install-the-operator-using-kubectl). + +1. To create a CA, ConfigMap, secrets, issuer, and certificate, see + [Enable External Access to a MongoDB Deployment](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/docs/external_access.md). + +1. Create a YAML file for the MongoDB Agent certificate. For an example, + see [agent-certificate.yaml](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/config/samples/external_access/agent-certificate.yaml). + + **Note:** + + - For the `spec.issuerRef.name` parameter, specify the + `cert-manager` issuer that you created previously. + - For the `spec.secretName` parameter, specify the same + value as the `spec.security.authentication.agentCertificateSecretRef` + parameter in your resource. This secret should contain a signed + X.509 certificate and a private key for the MongoDB agent. + +1. To apply the file, copy and paste the following command and replace + the `` variable with the name of your MongoDB Agent + certificate and the `` variable with the namespace: + + ``` + kubectl apply -f .yaml --namespace + ``` + +1. Create a YAML file for your resource. For an example, see + [mongodb.com_v1_mongodbcommunity_x509.yaml](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/config/samples/mongodb.com_v1_mongodbcommunity_x509.yaml). + + **Note:** + + - For the `spec.security.tls.certificateKeySecretRef.name` parameter, + specify a reference to the secret that contains the private key and + certificate to use for TLS. The operator expects the PEM encoded key + and certificate available at "tls.key" and "tls.crt". Use the same + format used for the standard "kubernetes.io/tls" Secret type, but no + specific type is required. Alternatively, you can provide + an entry called "tls.pem" that contains the concatenation of the + certificate and key. If all of "tls.pem", "tls.crt" and "tls.key" + are present, the "tls.pem" entry needs to equal the concatenation + of "tls.crt" and "tls.key". + + - For the `spec.security.tls.caConfigMapRef.name` parameter, specify + the ConfigMap that you created previously. + + - For the `spec.authentication.modes` parameter, specify `X509`. + + - If you have multiple authentication modes, specify the + `spec.authentication.agentMode` parameter. + + - The `spec.authentication.agentCertificateSecretRef` parameter + defaults to `agent-certs`. + + - For the `spec.users.db` parameter, specify `$external`. + + - Do not set the `spec.users.scramCredentialsSecretName` parameter + and the `spec.users.passwordSecretRef` parameters. + +1. To apply the file, copy and paste the following command and replace + the `` variable with your resource and the `` + variable with the namespace: + + ``` + kubectl apply -f .yaml --namespace + ``` + +1. Create a YAML file for the client certificate. For an example, see + [cert-x509.yaml](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/config/samples/external_access/cert-x509.yaml). + +1. To apply the file, copy and paste the following command and replace + the `` variable with the name of your client + certificate and the `` variable with the namespace: + + ``` + kubectl apply -f .yaml --namespace + ``` diff --git a/go.mod b/go.mod index 3d25b5071..35b8ccebc 100644 --- a/go.mod +++ b/go.mod @@ -1,35 +1,90 @@ module github.com/mongodb/mongodb-kubernetes-operator -go 1.13 +go 1.24.0 require ( - github.com/Azure/go-autorest v14.0.1+incompatible // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/gobuffalo/envy v1.7.1 // indirect - github.com/golang/protobuf v1.3.5 // indirect - github.com/hashicorp/go-multierror v1.0.0 - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/imdario/mergo v0.3.9 - github.com/json-iterator/go v1.1.9 // indirect - github.com/klauspost/compress v1.9.8 // indirect - github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect - github.com/operator-framework/operator-sdk v0.17.0 - github.com/prometheus/procfs v0.0.11 // indirect - github.com/rogpeppe/go-internal v1.5.2 // indirect - github.com/spf13/cobra v0.0.7 // indirect - github.com/stretchr/testify v1.4.0 - go.mongodb.org/mongo-driver v1.3.2 - go.uber.org/zap v1.14.1 - google.golang.org/appengine v1.6.6 // indirect - k8s.io/api v0.17.5 - k8s.io/apiextensions-apiserver v0.17.5 - k8s.io/apimachinery v0.17.5 - k8s.io/client-go v12.0.0+incompatible - sigs.k8s.io/controller-runtime v0.5.2 - sigs.k8s.io/yaml v1.2.0 + github.com/blang/semver v3.5.1+incompatible + github.com/go-logr/logr v1.4.2 + github.com/hashicorp/go-multierror v1.1.1 + github.com/imdario/mergo v0.3.15 + github.com/spf13/cast v1.7.1 + github.com/stretchr/objx v0.5.2 + github.com/stretchr/testify v1.10.0 + github.com/xdg/stringprep v1.0.3 + go.mongodb.org/mongo-driver v1.16.0 + go.uber.org/zap v1.27.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 + k8s.io/api v0.30.10 + k8s.io/apimachinery v0.30.10 + k8s.io/client-go v0.30.10 + k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 + sigs.k8s.io/controller-runtime v0.18.7 + sigs.k8s.io/yaml v1.4.0 ) -replace github.com/docker/docker => github.com/moby/moby v17.12.0-ce-rc1.0.20200309214505-aa6a9891b09c+incompatible // Required by Helm +require google.golang.org/protobuf v1.33.0 // indirect -replace k8s.io/client-go => k8s.io/client-go v0.17.5 // Required by controller-runtime +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.13.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/montanaflynn/stats v0.7.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.18.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect + github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.37.0 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.23.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.30.1 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect +) diff --git a/go.sum b/go.sum index a013a3aca..4e2b54c85 100644 --- a/go.sum +++ b/go.sum @@ -1,1616 +1,247 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= -bou.ke/monkey v1.0.1/go.mod h1:FgHuK96Rv2Nlf+0u1OOVDpCMdsWyOFmeeketDHE7LIg= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= -cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.49.0 h1:CH+lkubJzcPYB1Ggupcq0+k8Ni2ILdG2lYjDIgavDBQ= -cloud.google.com/go v0.49.0/go.mod h1:hGvAdzcWNbyuxS3nWhD7H2cIJxjRRTRLQVB0bdputVY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.3.0/go.mod h1:9IAwXhoyBJ7z9LcAwkj0/7NnPzYaPeZxxVp3zm+5IqA= -contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v11.2.8+incompatible h1:Q2feRPMlcfVcqz3pF87PJzkm5lZrL+x6BDtzhODzNJM= -github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v14.0.1+incompatible h1:YhojO9jolWIvvTW7ORhz2ZSNF6Q1TbLqUunKd3jrtyw= -github.com/Azure/go-autorest v14.0.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503 h1:uUhdsDMg2GbFLF5GfQPtLMWd5vdDZSfqvqQp3waafxQ= -github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.7 h1:VZK9/I6YsDZd8o6OOdgqJWQQA5x7BSPXHkOndIKW6js= -github.com/Azure/go-autorest/autorest v0.9.7/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503 h1:Hxqlh1uAA8aGpa1dFhDNhll7U/rkWtG8ZItFvRMr7l0= -github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw= -github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= -github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= -github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= -github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.0.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig/v3 v3.0.0/go.mod h1:NEUY/Qq8Gdm2xgYA+NwJM6wmfdRV9xkh8h/Rld20R0U= -github.com/Masterminds/sprig/v3 v3.0.2/go.mod h1:oesJ8kPONMONaZgtiHNzUShJbksypC5kWczhZAf6+aU= -github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= -github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/ant31/crd-validation v0.0.0-20180702145049-30f8a35d0ac2/go.mod h1:X0noFIik9YqfhGYBLEHg8LJKEwy7QIitLQuFMpKLcPk= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= -github.com/bazelbuild/bazel-gazelle v0.0.0-20181012220611-c728ce9f663e/go.mod h1:uHBSeeATKpVazAACZBDPL/Nk/UhQDDsJWDlqYJo8/Us= -github.com/bazelbuild/buildtools v0.0.0-20180226164855-80c7f0d45d7e/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= -github.com/brancz/gojsontoyaml v0.0.0-20190425155809-e8bd32d46b3d/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/bugsnag-go v1.5.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= -github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= -github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= -github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA= -github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= -github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= -github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= -github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190823190603-4a2f61c4f2b4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/coredns/corefile-migration v1.0.2/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= -github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/prometheus-operator v0.34.0/go.mod h1:Li6rMllG/hYIyXfMuvUwhyC+hqwJVHdsDdP21hypT1M= -github.com/coreos/prometheus-operator v0.38.0/go.mod h1:xZC7/TgeC0/mBaJk+1H9dbHaiEvLYHgX6Mi1h40UPh8= -github.com/coreos/rkt v1.30.0/go.mod h1:O634mlH6U7qk87poQifK6M2rsFNt+FyUTWNMnP1hF1U= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= -github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= -github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= -github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4= -github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A= -github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= -github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= -github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/deislabs/oras v0.7.0/go.mod h1:sqMKPG3tMyIX9xwXUBRLhZ24o+uT4y6jgBD2RzUTKDM= -github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= -github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= -github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/cli v0.0.0-20190506213505-d88565df0c2d/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= -github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.0.0-20180830151422-a9cd636e3789/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/go-sysinfo v1.0.1/go.mod h1:O/D5m1VpYLwGjCYzEt63g3Z1uO3jXfwyzzjiW90t8cY= -github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= -github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= -github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.11.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= -github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= -github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-logr/zapr v0.1.1 h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE= -github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0 h1:GlXgaiBkmrYMHco6t4j7SacKO4XUjvh5pwXh0f4uxXU= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.1 h1:OQl5ys5MBea7OGCdvPbBJWRgnhC/fGona6QKfvFeau8= -github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= -github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= -github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= -github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cadvisor v0.34.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= -github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.2.0 h1:lD2Bce2xBAMNNcFZ0dObTpXkGLlVIb33RPVUNVpw6ic= -github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.6.0 h1:Xb2lcqZtml1XjgYZxbeayEemq7ASbeTp09m36gQFpEU= -github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/handlers v1.4.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gosuri/uitable v0.0.1/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= -github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20190203031600-7a902570cb17/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-health-probe v0.2.1-0.20181220223928-2bf0a5b182db/go.mod h1:uBKkC2RbarFsvS5jMJHpVhTLvGlGQj9JJwkaePE3FWI= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= -github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= -github.com/heketi/heketi v9.0.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= -github.com/heketi/rest v0.0.0-20180404230133-aa6a65207413/go.mod h1:BeS3M108VzVlmAue3lv2WcGuPAX94/KN63MUURzbYSI= -github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= -github.com/heketi/utils v0.0.0-20170317161834-435bc5bdfa64/go.mod h1:RYlF4ghFZPPmk2TC5REt5OFwvfb6lzxFWrTWB+qs28s= -github.com/helm/helm-2to3 v0.2.0/go.mod h1:jQUVAWB0bM7zNIqKPIfHFzuFSK0kHYovJrjO+hqcvRk= -github.com/helm/helm-2to3 v0.5.1/go.mod h1:AXFpQX2cSQpss+47ROPEeu7Sm4+CRJ1jKWCEQdHP3/c= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/improbable-eng/thanos v0.3.2/go.mod h1:GZewVGILKuJVPNRn7L4Zw+7X96qzFOwj63b22xYGXBE= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jsonnet-bundler/jsonnet-bundler v0.1.0/go.mod h1:YKsSFc9VFhhLITkJS3X2PrRqWG9u2Jq99udTdDjQLfM= -github.com/jsonnet-bundler/jsonnet-bundler v0.2.0/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= -github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= -github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= -github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= -github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= -github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= -github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= -github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/maorfr/helm-plugin-utils v0.0.0-20181205064038-588190cb5e3b/go.mod h1:p3gwmRSFqbWw6plBpR0sKl3n3vpu8kX70gvCJKMvvCA= -github.com/maorfr/helm-plugin-utils v0.0.0-20200216074820-36d2fcf6ae86/go.mod h1:p3gwmRSFqbWw6plBpR0sKl3n3vpu8kX70gvCJKMvvCA= -github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= -github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= -github.com/martinlindhe/base36 v1.0.0/go.mod h1:+AtEs8xrBpCeYgSLoY/aJ6Wf37jtBuR0s35750M27+8= -github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.1/go.mod h1:F9YacGpnZbLQMzuPI0rR6op21YvNu/RjL705LJJpM3k= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= -github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= -github.com/miekg/dns v0.0.0-20181005163659-0d29b283ac0f/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU= -github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= -github.com/mindprince/gonvml v0.0.0-20171110221305-fee913ce8fb2/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= -github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= -github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/moby v17.12.0-ce-rc1.0.20200309214505-aa6a9891b09c+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= -github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= -github.com/mrunalp/fileutils v0.0.0-20160930181131-4ee1cc9a8058/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20190414153302-2ae31c8b6b30/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v0.0.0-20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= -github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc2.0.20190611121236-6cc515888830/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/openshift/api v0.0.0-20200205133042-34f0ec8dab87/go.mod h1:fT6U/JfG8uZzemTRwZA2kBDJP5nWz7v05UHnty/D+pk= -github.com/openshift/api v3.9.1-0.20190924102528-32369d4db2ad+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= -github.com/openshift/client-go v0.0.0-20190923180330-3b6373338c9b/go.mod h1:6rzn+JTr7+WYS2E1TExP4gByoABxMznR6y2SnUIkmxk= -github.com/openshift/origin v0.0.0-20160503220234-8f127d736703/go.mod h1:0Rox5r9C8aQn6j1oAOQ0c1uC86mYbUFObzjBRvUKHII= -github.com/openshift/prom-label-proxy v0.1.1-0.20191016113035-b8153a7f39f1/go.mod h1:p5MuxzsYP1JPsNGwtjtcgRHHlGziCJJfztff91nNixw= -github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/operator-framework/api v0.0.0-20200120235816-80fd2f1a09c9/go.mod h1:S5IdlJvmKkF84K2tBvsrqJbI2FVy03P88R75snpRxJo= -github.com/operator-framework/api v0.1.1/go.mod h1:yzNYR7qyJqRGOOp+bT6Z/iYSbSPNxeh3Si93Gx/3OBY= -github.com/operator-framework/operator-lifecycle-manager v0.0.0-20191115003340-16619cd27fa5/go.mod h1:zL34MNy92LPutBH5gQK+gGhtgTUlZZX03I2G12vWHF4= -github.com/operator-framework/operator-lifecycle-manager v0.0.0-20200321030439-57b580e57e88/go.mod h1:7Ut8p9jJ8C6RZyyhZfZypmlibCIJwK5Wcc+WZDgLkOA= -github.com/operator-framework/operator-registry v1.5.1/go.mod h1:agrQlkWOo1q8U1SAaLSS2WQ+Z9vswNT2M2HFib9iuLY= -github.com/operator-framework/operator-registry v1.5.3/go.mod h1:agrQlkWOo1q8U1SAaLSS2WQ+Z9vswNT2M2HFib9iuLY= -github.com/operator-framework/operator-registry v1.5.7-0.20200121213444-d8e2ec52c19a/go.mod h1:ekexcV4O8YMxdQuPb+Xco7MHfVmRIq7Jvj5e6NU7dHI= -github.com/operator-framework/operator-registry v1.6.1/go.mod h1:sx4wWMiZtYhlUiaKscg3QQUPPM/c1bkrAs4n4KipDb4= -github.com/operator-framework/operator-registry v1.6.2-0.20200330184612-11867930adb5/go.mod h1:SHff373z8asEkPo6aWpN0qId4Y/feQTjZxRF8PRhti8= -github.com/operator-framework/operator-sdk v0.16.0 h1:+D61x7FjcITLzjVakzfzz5hqkkMDR+uEDMzXfyVZOw8= -github.com/operator-framework/operator-sdk v0.16.0/go.mod h1:1UykIjxOHX/Ltj/2Z0h0y0DZ7YGBYlcBI+ctaCjXVDk= -github.com/operator-framework/operator-sdk v0.17.0 h1:+TTrGjXa+lm7g7Cm0UtFcgOjnw1x9/lBorydpsIIhOY= -github.com/operator-framework/operator-sdk v0.17.0/go.mod h1:wmYi08aoUmtgfoUamURmssI4dkdFGNtSI1Egj+ZfBnk= -github.com/otiai10/copy v1.0.1/go.mod h1:8bMCJrAqOtN/d9oyh5HR7HhLQMvcGMpGdwRDYsfOCHc= -github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/curr v0.0.0-20190513014714-f5a3d24e5776/go.mod h1:3HNVkVOU7vZeFXocWuvtcS0XSFLcf2XUSDHkq9t1jU4= -github.com/otiai10/mint v1.2.3/go.mod h1:YnfyPNhBvnY8bW4SGQHCs/aAFhkgySlMZbrF5U0bOVw= -github.com/otiai10/mint v1.2.4/go.mod h1:d+b7n/0R3tdyUYYylALXpWQ/kTN+QobSq/4SRGBkR3M= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.0.1/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/freeport v0.0.0-20171002181615-b8543db493a5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= -github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= -github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= -github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= -github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= -github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190129233650-316cf8ccfec5/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= -github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= -github.com/prometheus/prometheus v1.8.2-0.20200110114423-1e64d757f711/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= -github.com/prometheus/prometheus v2.3.2+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/prometheus/tsdb v0.8.0/go.mod h1:fSI0j+IUQrDd7+ZtR9WKIGtoYAYAJUKcKhYLG25tN4g= -github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= -github.com/robfig/cron v0.0.0-20170526150127-736158dc09e1/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.5.0 h1:Usqs0/lDK/NqTkvrmKSwA/3XkZAs7ZAW/eLeQ2MVBTw= -github.com/rogpeppe/go-internal v1.5.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.5.2 h1:qLvObTrvO/XRCqmkKxUlOBc48bI3efyDuAZe25QiF0w= -github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rubenv/sql-migrate v0.0.0-20191025130928-9355dd04f4b3/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY= -github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= -github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= -github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU= -github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/thanos-io/thanos v0.11.0/go.mod h1:N/Yes7J68KqvmY+xM6J5CJqEvWIvKSR5sqGtmuD6wDc= -github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= -github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vmware/govmomi v0.20.1/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xenolf/lego v0.0.0-20160613233155-a9d8cec0e656/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY= -github.com/xenolf/lego v0.3.2-0.20160613233155-a9d8cec0e656/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY= -github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= -github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/gorelic v0.0.6/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= -go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= -go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= -go.elastic.co/apm/module/apmot v1.5.0/go.mod h1:d2KYwhJParTpyw2WnTNy8geNlHKKFX+4oK3YLlsesWE= -go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.0.4/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.4/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.3.2 h1:IYppNjEV/C+/3VPbhHVxQ4t04eVW0cLp0/pNdW++6Ug= -go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= -go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver v1.16.0 h1:tpRsfBJMROVHKpdGyc1BBEzzjDUWjItxbVSZ8Ls4BQ4= +go.mongodb.org/mongo-driver v1.16.0/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191028145041-f83a4685e152/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271 h1:N66aaryRB3Ax92gH0v3hp1QYZ3zWWCCUR/j8Ifh45Ss= -golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181004145325-8469e314837c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191028164358-195ce5e7f934/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20170824195420-5d2fd3ccab98/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190918214516-5a1a30219888/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191018212557-ed542cd5b28a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191030203535-5e247c9ad0a0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191111182352-50fa39b762bc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200327195553-82bb89366a1e h1:qCZ8SbsZMjT0OuDPCEBxgLZic4NMj8Gj4vNXiTVRAaA= -golang.org/x/tools v0.0.0-20200327195553-82bb89366a1e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= -gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -gomodules.xyz/jsonpatch/v3 v3.0.1/go.mod h1:CBhndykehEwTOlEfnsfJwvkFQbSN8YZFr9M+cIHAJto= -gomodules.xyz/orderedmap v0.1.0/go.mod h1:g9/TPUCm1t2gwD3j3zfV8uylyYhVdCNSi+xCEIu7yTU= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/gonum v0.0.0-20190710053202-4340aa3071a0/go.mod h1:03dgh78c4UvU1WksguQ/lvJQXbezKQGJSrwwRq5MraQ= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190128161407-8ac453e89fca/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= -gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= -gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0Eppok+U= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v1 v1.1.2/go.mod h1:QpYS+a4WhS+DTlyQIi6Ka7MS3SuR9a055rgXNEe6EiA= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.1.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= -helm.sh/helm/v3 v3.0.0/go.mod h1:sI7B9yfvMgxtTPMWdk1jSKJ2aa59UyP9qhPydqW6mgo= -helm.sh/helm/v3 v3.0.1/go.mod h1:sI7B9yfvMgxtTPMWdk1jSKJ2aa59UyP9qhPydqW6mgo= -helm.sh/helm/v3 v3.0.2/go.mod h1:KBxE6XWO57XSNA1PA9CvVLYRY0zWqYQTad84bNXp1lw= -helm.sh/helm/v3 v3.1.0/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g= -helm.sh/helm/v3 v3.1.2/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= -k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58= -k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= -k8s.io/api v0.0.0-20191016110408-35e52d86657a h1:VVUE9xTCXP6KUPMf92cQmN88orz600ebexcRRaBTepQ= -k8s.io/api v0.0.0-20191016110408-35e52d86657a/go.mod h1:/L5qH+AD540e7Cetbui1tuJeXdmNhO8jM6VkXeDdDhQ= -k8s.io/api v0.0.0-20191115095533-47f6de673b26/go.mod h1:iA/8arsvelvo4IDqIhX4IbjTEKBGgvsf2OraTuRtLFU= -k8s.io/api v0.16.7/go.mod h1:oUAiGRgo4t+5yqcxjOu5LoHT3wJ8JSbgczkaFYS5L7I= -k8s.io/api v0.16.9/go.mod h1:Y7dZNHs1Xy0mSwSlzL9QShi6qkljnN41yR8oWCRTDe8= -k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= -k8s.io/api v0.17.1/go.mod h1:zxiAc5y8Ngn4fmhWUtSxuUlkfz1ixT7j9wESokELzOg= -k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= -k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0= -k8s.io/api v0.17.4 h1:HbwOhDapkguO8lTAE8OX3hdF2qp8GtpC9CW/MQATXXo= -k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= -k8s.io/api v0.17.5 h1:EkVieIbn1sC8YCDwckLKLpf+LoVofXYW72+LTZWo4aQ= -k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY= -k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= -k8s.io/apiextensions-apiserver v0.0.0-20191016113550-5357c4baaf65 h1:kThoiqgMsSwBdMK/lPgjtYTsEjbUU9nXCA9DyU3feok= -k8s.io/apiextensions-apiserver v0.0.0-20191016113550-5357c4baaf65/go.mod h1:5BINdGqggRXXKnDgpwoJ7PyQH8f+Ypp02fvVNcIFy9s= -k8s.io/apiextensions-apiserver v0.16.7/go.mod h1:6xYRp4trGp6eT5WZ6tPi/TB2nfWQCzwUvBlpg8iswe0= -k8s.io/apiextensions-apiserver v0.16.9/go.mod h1:j/+KedxOeRSPMkvLNyKMbIT3+saXdTO4jTBplTmXJR4= -k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8= -k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= -k8s.io/apiextensions-apiserver v0.17.3/go.mod h1:CJbCyMfkKftAd/X/V6OTHYhVn7zXnDdnkUjS1h0GTeY= -k8s.io/apiextensions-apiserver v0.17.4 h1:ZKFnw3cJrGZ/9s6y+DerTF4FL+dmK0a04A++7JkmMho= -k8s.io/apiextensions-apiserver v0.17.4/go.mod h1:rCbbbaFS/s3Qau3/1HbPlHblrWpFivoaLYccCffvQGI= -k8s.io/apiextensions-apiserver v0.17.5 h1:1MvO6pRopn9ZHweFEVFxnWDRpMd3ZE7SPY156qDnOeI= -k8s.io/apiextensions-apiserver v0.17.5/go.mod h1:Up8qgvIy2v9521+YBhg7fhVtd4jgh/1MjotWr5GvOn4= -k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= -k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= -k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= -k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8 h1:Iieh/ZEgT3BWwbLD5qEKcY06jKuPEl6zC7gPSehoLw4= -k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8/go.mod h1:llRdnznGEAqC3DcNm6yEj472xaFVfLM7hnYofMb12tQ= -k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= -k8s.io/apimachinery v0.16.7/go.mod h1:Xk2vD2TRRpuWYLQNM6lT9R7DSFZUYG03SarNkbGrnKE= -k8s.io/apimachinery v0.16.9/go.mod h1:Xk2vD2TRRpuWYLQNM6lT9R7DSFZUYG03SarNkbGrnKE= -k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= -k8s.io/apimachinery v0.17.4 h1:UzM+38cPUJnzqSQ+E1PY4YxMHIzQyCg29LOoGfo79Zw= -k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= -k8s.io/apimachinery v0.17.5 h1:QAjfgeTtSGksdkgyaPrIb4lhU16FWMIzxKejYD5S0gc= -k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0= -k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= -k8s.io/apiserver v0.0.0-20191016112112-5190913f932d/go.mod h1:7OqfAolfWxUM/jJ/HBLyE+cdaWFBUoo5Q5pHgJVj2ws= -k8s.io/apiserver v0.16.7/go.mod h1:/5zSatF30/L9zYfMTl55jzzOnx7r/gGv5a5wtRp8yAw= -k8s.io/apiserver v0.16.9/go.mod h1:JWzfDIpD8e9rvU+Gn6ew8MfQZq41USj0iwW5+ZLyTLM= -k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= -k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= -k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY= -k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= -k8s.io/apiserver v0.17.5/go.mod h1:yo2cFZJ7AUj6BYYRWzEzs2cLtkY6F6zdxs8GhLu5V28= -k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= -k8s.io/cli-runtime v0.0.0-20191016114015-74ad18325ed5/go.mod h1:sDl6WKSQkDM6zS1u9F49a0VooQ3ycYFBFLqd2jf2Xfo= -k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI= -k8s.io/cli-runtime v0.17.3/go.mod h1:X7idckYphH4SZflgNpOOViSxetiMj6xI0viMAjM81TA= -k8s.io/cli-runtime v0.17.4/go.mod h1:IVW4zrKKx/8gBgNNkhiUIc7nZbVVNhc1+HcQh+PiNHc= -k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k= -k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= -k8s.io/client-go v0.0.0-20191016111102-bec269661e48 h1:C2XVy2z0dV94q9hSSoCuTPp1KOG7IegvbdXuz9VGxoU= -k8s.io/client-go v0.0.0-20191016111102-bec269661e48/go.mod h1:hrwktSwYGI4JK+TJA3dMaFyyvHVi/aLarVHpbs8bgCU= -k8s.io/client-go v0.16.7/go.mod h1:9kEMEeuy2LdsHHXoU2Skqh+SDso+Yhkxd/0tltvswDE= -k8s.io/client-go v0.16.9/go.mod h1:ThjPlh7Kx+XoBFOCt775vx5J7atwY7F/zaFzTco5gL0= -k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= -k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= -k8s.io/client-go v0.17.3/go.mod h1:cLXlTMtWHkuK4tD360KpWz2gG2KtdWEr/OT02i3emRQ= -k8s.io/client-go v0.17.4/go.mod h1:ouF6o5pz3is8qU0/qYL2RnoxOPqgfuidYLowytyLJmc= -k8s.io/client-go v0.17.5 h1:Sm/9AQ415xPAX42JLKbJZnreXFgD2rVfDUDwOTm0gzA= -k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo= -k8s.io/client-go v12.0.0+incompatible h1:YlJxncpeVUC98/WMZKC3JZGk/OXQWCZjAB4Xr3B17RY= -k8s.io/client-go v12.0.0+incompatible/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k= -k8s.io/cloud-provider v0.0.0-20191016115326-20453efc2458/go.mod h1:O5SO5xcgxrjJV9EC9R/47RuBpbk5YX9URDBlg++FA5o= -k8s.io/cluster-bootstrap v0.0.0-20191016115129-c07a134afb42/go.mod h1:MzCL6kLExQuHruGaqibd8cugC8nw8QRxm3+lzR5l8SI= -k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= -k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894/go.mod h1:mJUgkl06XV4kstAnLHAIzJPVCOzVR+ZcfPIv4fUsFCY= -k8s.io/code-generator v0.16.7/go.mod h1:wFdrXdVi/UC+xIfLi+4l9elsTT/uEF61IfcN2wOLULQ= -k8s.io/code-generator v0.16.9/go.mod h1:wFdrXdVi/UC+xIfLi+4l9elsTT/uEF61IfcN2wOLULQ= -k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= -k8s.io/code-generator v0.17.1/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= -k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= -k8s.io/code-generator v0.17.3/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= -k8s.io/code-generator v0.17.4/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= -k8s.io/code-generator v0.17.5/go.mod h1:qdiSCSTKtS+3WtPelj2h57fylSQcPUlhMVm+TD9Dvqc= -k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= -k8s.io/component-base v0.0.0-20191016111319-039242c015a9/go.mod h1:SuWowIgd/dtU/m/iv8OD9eOxp3QZBBhTIiWMsBQvKjI= -k8s.io/component-base v0.16.7/go.mod h1:ikdyfezOFMu5O0qJjy/Y9eXwj+fV3pVwdmt0ulVcIR0= -k8s.io/component-base v0.16.9/go.mod h1:5iNKIRj8yEaKG+baEkfXgU9JiWpC1WAFGBZ3Xg9fDJk= -k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= -k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= -k8s.io/component-base v0.17.3/go.mod h1:GeQf4BrgelWm64PXkIXiPh/XS0hnO42d9gx9BtbZRp8= -k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE= -k8s.io/component-base v0.17.5/go.mod h1:cZQAW1AUbBjD1lh+e/krbiIpqGz6fipI+vHslOBbuHE= -k8s.io/cri-api v0.0.0-20190828162817-608eb1dad4ac/go.mod h1:BvtUaNBr0fEpzb11OfrQiJLsLPtqbmulpo1fPwcpP6Q= -k8s.io/csi-translation-lib v0.0.0-20191016115521-756ffa5af0bd/go.mod h1:lf1VBseeLanBpSXD0N9tuPx1ylI8sA0j6f+rckCKiIk= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20191010091904-7fa3014cb28f/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= -k8s.io/helm v2.16.1+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= -k8s.io/helm v2.16.3+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-aggregator v0.0.0-20191016112429-9587704a8ad4/go.mod h1:+aW0UZgSXdTSHTIFnWnueEuXjOqerDUxGIw6Ygr+vYY= -k8s.io/kube-aggregator v0.17.3/go.mod h1:1dMwMFQbmH76RKF0614L7dNenMl3dwnUJuOOyZ3GMXA= -k8s.io/kube-controller-manager v0.0.0-20191016114939-2b2b218dc1df/go.mod h1:WgrTcPKYAfNa9C0LV1UeK+XqfbSOUH1WGq/vX5UiW40= -k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20190918143330-0270cf2f1c1d/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d h1:jocF7XFucw2pEiv2wS7wk2FRFCjDFGV1oa4TMs0SAT0= -k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= -k8s.io/kube-proxy v0.0.0-20191016114407-2e83b6f20229/go.mod h1:2Hxci1uzXO5ipP0h9n2+h18fvNkBTpYlckk5dOPu8zg= -k8s.io/kube-scheduler v0.0.0-20191016114748-65049c67a58b/go.mod h1:BgDUHHC5Wl0xcBUQgo2XEprE5nG5i9tlRR4iNgEFbL0= -k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+IjYA/E= -k8s.io/kubectl v0.0.0-20191016120415-2ed914427d51/go.mod h1:gL826ZTIfD4vXTGlmzgTbliCAT9NGiqpCqK2aNYv5MQ= -k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk= -k8s.io/kubectl v0.17.3/go.mod h1:NUn4IBY7f7yCMwSop2HCXlw/MVYP4HJBiUmOR3n9w28= -k8s.io/kubectl v0.17.4/go.mod h1:im5QWmh6fvtmJkkNm4HToLe8z9aM3jihYK5X/wOybcY= -k8s.io/kubelet v0.0.0-20191016114556-7841ed97f1b2/go.mod h1:SBvrtLbuePbJygVXGGCMtWKH07+qrN2dE1iMnteSG8E= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/kubernetes v1.16.0/go.mod h1:nlP2zevWKRGKuaaVbKIwozU0Rjg9leVDXkL4YTtjmVs= -k8s.io/kubernetes v1.16.2/go.mod h1:SmhGgKfQ30imqjFVj8AI+iW+zSyFsswNErKYeTfgoH0= -k8s.io/legacy-cloud-providers v0.0.0-20191016115753-cf0698c3a16b/go.mod h1:tKW3pKqdRW8pMveUTpF5pJuCjQxg6a25iLo+Z9BXVH0= -k8s.io/metrics v0.0.0-20191016113814-3b1a734dba6e/go.mod h1:ve7/vMWeY5lEBkZf6Bt5TTbGS3b8wAxwGbdXAsufjRs= -k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw= -k8s.io/metrics v0.17.3/go.mod h1:HEJGy1fhHOjHggW9rMDBJBD3YuGroH3Y1pnIRw9FFaI= -k8s.io/metrics v0.17.4/go.mod h1:6rylW2iD3M9VppnEAAtJASY1XS8Pt9tcYh+tHxBeV3I= -k8s.io/repo-infra v0.0.0-20181204233714-00fe14e3d1a3/go.mod h1:+G1xBfZDfVFsm1Tj/HNCvg4QqWx8rJ2Fxpqr1rqp/gQ= -k8s.io/sample-apiserver v0.0.0-20191016112829-06bb3c9d77c9/go.mod h1:sXltHZrQa4jdKL14nOFRRUhhzpmbnRF0qGuAhRQbaxc= -k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191010214722-8d271d903fe4 h1:Gi+/O1saihwDqnlmC8Vhv1M5Sp4+rbOmK9TbsLn8ZEA= -k8s.io/utils v0.0.0-20191010214722-8d271d903fe4/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 h1:p0Ai3qVtkbCG/Af26dBmU0E1W58NID3hSSh7cMyylpM= -k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/letsencrypt v0.0.1/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= -rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= -sigs.k8s.io/controller-runtime v0.4.0 h1:wATM6/m+3w8lj8FXNaO6Fs/rq/vqoOjO1Q116Z9NPsg= -sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= -sigs.k8s.io/controller-runtime v0.5.2 h1:pyXbUfoTo+HA3jeIfr0vgi+1WtmNh0CwlcnQGLXwsSw= -sigs.k8s.io/controller-runtime v0.5.2/go.mod h1:JZUwSMVbxDupo0lTJSSFP5pimEyxGynROImSsqIOx1A= -sigs.k8s.io/controller-tools v0.2.4 h1:la1h46EzElvWefWLqfsXrnsO3lZjpkI0asTpX6h8PLA= -sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= -sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= -sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= -sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= -sigs.k8s.io/structured-merge-diff v1.0.2/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= -sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= -sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM= -sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.30.10 h1:2YvzRF/BELgCvxbQqFKaan5hnj2+y7JOuqu2WpVk3gg= +k8s.io/api v0.30.10/go.mod h1:Hyz3ZuK7jVLJBUFvwzDSGwxHuDdsrGs5RzF16wfHIn4= +k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= +k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= +k8s.io/apimachinery v0.30.10 h1:UflKuJeSSArttm05wjYP0GwpTlvjnMbDKFn6F7rKkKU= +k8s.io/apimachinery v0.30.10/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.10 h1:C0oWM82QMvosIl/IdJhWfTUb7rIxM52rNSutFBknAVY= +k8s.io/client-go v0.30.10/go.mod h1:OfTvt0yuo8VpMViOsgvYQb+tMJQLNWVBqXWkzdFXSq4= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= +k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.18.7 h1:WDnx8LTRY8Fn1j/7B+S/R9MeDjWNAzpDBoaSvMSrQME= +sigs.k8s.io/controller-runtime v0.18.7/go.mod h1:L9r3fUZhID7Q9eK9mseNskpaTg2n11f/tlb8odyzJ4Y= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 000000000..45dbbbbcf --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/helm-charts b/helm-charts new file mode 160000 index 000000000..c6b6488a2 --- /dev/null +++ b/helm-charts @@ -0,0 +1 @@ +Subproject commit c6b6488a2a84cb806eadac0e286b6060914082d5 diff --git a/inventories/e2e-inventory.yaml b/inventories/e2e-inventory.yaml new file mode 100644 index 000000000..c2247dff4 --- /dev/null +++ b/inventories/e2e-inventory.yaml @@ -0,0 +1,37 @@ +vars: + registry: + architecture: amd64 + +images: + - name: e2e + vars: + context: . + template_context: scripts/dev/templates + inputs: + - image + platform: linux/$(inputs.params.architecture) + stages: + - name: e2e-template + task_type: dockerfile_template + distro: e2e + + inputs: + - builder + - base_image + + output: + - dockerfile: scripts/dev/templates/Dockerfile.ubi-$(inputs.params.version_id) + + - name: e2e-build + task_type: docker_build + + dockerfile: scripts/dev/templates/Dockerfile.ubi-$(inputs.params.version_id) + + labels: + quay.expires-after: 48h + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.version_id)-$(inputs.params.architecture) + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: latest-$(inputs.params.architecture) diff --git a/inventories/operator-inventory.yaml b/inventories/operator-inventory.yaml new file mode 100644 index 000000000..ab08796a8 --- /dev/null +++ b/inventories/operator-inventory.yaml @@ -0,0 +1,118 @@ +vars: + registry: + architecture: amd64 + +images: + - name: operator + vars: + context: . + template_context: scripts/dev/templates/operator + + inputs: + - image + - image_dev + + platform: linux/$(inputs.params.architecture) + + stages: +# +# Dev build stages +# + - name: operator-builder-dev + task_type: docker_build + tags: [ "ubi" ] + dockerfile: scripts/dev/templates/operator/Dockerfile.builder + + buildargs: + builder_image: $(inputs.params.builder_image) + + labels: + quay.expires-after: 48h + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-context-$(inputs.params.architecture) + + - name: operator-template-dev + task_type: dockerfile_template + tags: ["ubi"] + template_file_extension: operator + inputs: + - base_image + + output: + - dockerfile: scripts/dev/templates/operator/Dockerfile.operator-$(inputs.params.version_id) + + - name: operator-build-dev + task_type: docker_build + tags: ["ubi"] + dockerfile: scripts/dev/templates/operator/Dockerfile.operator-$(inputs.params.version_id) + + inputs: + - version_id + + buildargs: + imagebase: $(inputs.params.registry)/$(inputs.params.image_dev):$(inputs.params.version_id)-context-$(inputs.params.architecture) + + labels: + quay.expires-after: 48h + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-$(inputs.params.architecture) + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: latest-$(inputs.params.architecture) + +# +# Release build stages +# + - name: operator-builder-release + task_type: docker_build + tags: [ "ubi", "release"] + + inputs: + - builder_image + - release_version + + dockerfile: scripts/dev/templates/operator/Dockerfile.builder + + labels: + quay.expires-after: Never + + buildargs: + builder_image: $(inputs.params.builder_image) + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-context-$(inputs.params.architecture) + + - name: operator-template-release + task_type: dockerfile_template + tags: [ "ubi", "release"] + template_file_extension: operator + inputs: + - base_image + - release_version + + output: + - dockerfile: scripts/dev/templates/operator/Dockerfile.operator-$(inputs.params.release_version) + - dockerfile: $(inputs.params.s3_bucket)/mongodb-kubernetes-operator/$(inputs.params.release_version)/ubi/Dockerfile + + - name: operator-build-release + task_type: docker_build + tags: [ "ubi", "release"] + + inputs: + - release_version + + dockerfile: scripts/dev/templates/operator/Dockerfile.operator-$(inputs.params.release_version) + + buildargs: + imagebase: $(inputs.params.registry)/$(inputs.params.image):$(inputs.params.release_version)-context-$(inputs.params.architecture) + + labels: + quay.expires-after: Never + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-$(inputs.params.architecture) diff --git a/inventory.yaml b/inventory.yaml new file mode 100644 index 000000000..e2a37214c --- /dev/null +++ b/inventory.yaml @@ -0,0 +1,315 @@ +vars: + registry: + # Default value but overwritten in pipeline.py + architecture: amd64 + +images: + + - name: agent + vars: + context: . + template_context: scripts/dev/templates/agent + + inputs: + - release_version + - tools_version + - image + - image_dev + + platform: linux/$(inputs.params.architecture) + stages: + - name: mongodb-agent-context + task_type: docker_build + dockerfile: scripts/dev/templates/agent/Dockerfile.builder + tags: [ "ubi" ] + buildargs: + agent_version: $(inputs.params.release_version) + tools_version: $(inputs.params.tools_version) + agent_distro: $(inputs.params.agent_distro) + tools_distro: $(inputs.params.tools_distro) + + labels: + quay.expires-after: 48h + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-context-$(inputs.params.architecture) + + - name: agent-template-ubi + task_type: dockerfile_template + distro: ubi + tags: [ "ubi" ] + + output: + - dockerfile: scripts/dev/templates/agent/Dockerfile.ubi-$(inputs.params.version_id) + + - name: mongodb-agent-build + task_type: docker_build + tags: [ "ubi" ] + + dockerfile: scripts/dev/templates/agent/Dockerfile.ubi-$(inputs.params.version_id) + + buildargs: + imagebase: $(inputs.params.registry)/$(inputs.params.image_dev):$(inputs.params.version_id)-context-$(inputs.params.architecture) + agent_version: $(inputs.params.release_version) + + labels: + quay.expires-after: 48h + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-$(inputs.params.architecture) + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: latest-$(inputs.params.architecture) + + - name: agent-template-ubi-s3 + task_type: dockerfile_template + tags: [ "ubi", "release" ] + distro: ubi + + inputs: + - release_version + + output: + - dockerfile: $(inputs.params.s3_bucket)/mongodb-agent/$(inputs.params.release_version)/ubi/Dockerfile + + - name: agent-context-ubi-release + task_type: docker_build + dockerfile: scripts/dev/templates/agent/Dockerfile.builder + tags: [ "ubi", "release" ] + buildargs: + agent_version: $(inputs.params.release_version) + tools_version: $(inputs.params.tools_version) + agent_distro: $(inputs.params.agent_distro) + tools_distro: $(inputs.params.tools_distro) + + labels: + quay.expires-after: Never + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-context-$(inputs.params.architecture) + + - name: mongodb-agent-release + task_type: docker_build + tags: [ "ubi", "release" ] + dockerfile: scripts/dev/templates/agent/Dockerfile.ubi-$(inputs.params.version_id) + + buildargs: + imagebase: $(inputs.params.registry)/$(inputs.params.image):$(inputs.params.release_version)-context-$(inputs.params.architecture) + agent_version: $(inputs.params.release_version) + + labels: + quay.expires-after: Never + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-$(inputs.params.architecture) + + - name: readiness-probe + vars: + context: . + template_context: scripts/dev/templates/readiness + + inputs: + - image + - image_dev + + platform: linux/$(inputs.params.architecture) + stages: + - name: readiness-init-context-build + task_type: docker_build + dockerfile: scripts/dev/templates/readiness/Dockerfile.builder + tags: [ "readiness-probe", "ubi" ] + labels: + quay.expires-after: 48h + + buildargs: + builder_image: $(inputs.params.builder_image) + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-context-$(inputs.params.architecture) + + - name: readiness-template-ubi + task_type: dockerfile_template + tags: [ "ubi" ] + template_file_extension: readiness + + inputs: + - base_image + + output: + - dockerfile: scripts/dev/templates/readiness/Dockerfile.readiness-$(inputs.params.version_id) + + - name: readiness-init-build + task_type: docker_build + tags: [ "readiness-probe", "ubi" ] + dockerfile: scripts/dev/templates/readiness/Dockerfile.readiness-$(inputs.params.version_id) + + buildargs: + imagebase: $(inputs.params.registry)/$(inputs.params.image_dev):$(inputs.params.version_id)-context-$(inputs.params.architecture) + + + labels: + quay.expires-after: 48h + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-$(inputs.params.architecture) + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: latest-$(inputs.params.architecture) + + - name: readiness-init-context-release + task_type: docker_build + dockerfile: scripts/dev/templates/readiness/Dockerfile.builder + tags: [ "readiness-probe", "release" , "ubi" ] + + labels: + quay.expires-after: Never + + buildargs: + builder_image: $(inputs.params.builder_image) + + inputs: + - release_version + - builder_image + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-context-$(inputs.params.architecture) + + - name: readiness-template-release + task_type: dockerfile_template + tags: [ "readiness-probe", "release", "ubi" ] + template_file_extension: readiness + inputs: + - base_image + - release_version + + output: + - dockerfile: scripts/dev/templates/readiness/Dockerfile.readiness-$(inputs.params.release_version) + - dockerfile: $(inputs.params.s3_bucket)/mongodb-kubernetes-readinessprobe/$(inputs.params.release_version)/ubi/Dockerfile + + - name: readiness-init-build-release + task_type: docker_build + dockerfile: scripts/dev/templates/readiness/Dockerfile.readiness-$(inputs.params.release_version) + tags: [ "readiness-probe", "release" , "ubi" ] + + buildargs: + imagebase: $(inputs.params.registry)/$(inputs.params.image):$(inputs.params.release_version)-context-$(inputs.params.architecture) + + labels: + quay.expires-after: Never + + inputs: + - base_image + - release_version + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-$(inputs.params.architecture) + + - name: version-upgrade-hook + vars: + context: . + template_context: scripts/dev/templates/versionhook + + inputs: + - image + - image_dev + + platform: linux/$(inputs.params.architecture) + stages: + - name: version-upgrade-hook-context-build + task_type: docker_build + dockerfile: scripts/dev/templates/versionhook/Dockerfile.builder + tags: [ "post-start-hook", "ubi" ] + + buildargs: + builder_image: $(inputs.params.builder_image) + + labels: + quay.expires-after: 48h + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-context-$(inputs.params.architecture) + + - name: version-post-start-hook-template-ubi + task_type: dockerfile_template + tags: [ "ubi" ] + template_file_extension: versionhook + + inputs: + - base_image + + output: + - dockerfile: scripts/dev/templates/versionhook/Dockerfile.versionhook-$(inputs.params.version_id) + + - name: version-upgrade-hook-build + task_type: docker_build + dockerfile: scripts/dev/templates/versionhook/Dockerfile.versionhook-$(inputs.params.version_id) + tags: [ "post-start-hook", "ubi" ] + + buildargs: + imagebase: $(inputs.params.registry)/$(inputs.params.image_dev):$(inputs.params.version_id)-context-$(inputs.params.architecture) + + labels: + quay.expires-after: 48h + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: $(inputs.params.version_id)-$(inputs.params.architecture) + - registry: $(inputs.params.registry)/$(inputs.params.image_dev) + tag: latest-$(inputs.params.architecture) + + - name: version-upgrade-hook-context-release + task_type: docker_build + dockerfile: scripts/dev/templates/versionhook/Dockerfile.builder + tags: [ "release", "post-start-hook", "ubi", ] + + labels: + quay.expires-after: Never + + buildargs: + builder_image: $(inputs.params.builder_image) + + inputs: + - release_version + - builder_image + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-context-$(inputs.params.architecture) + + - name: versionhook-template-release + task_type: dockerfile_template + tags: [ "post-start-hook", "release", "ubi" ] + template_file_extension: versionhook + inputs: + - base_image + - release_version + + output: + - dockerfile: scripts/dev/templates/versionhook/Dockerfile.versionhook-$(inputs.params.release_version) + - dockerfile: $(inputs.params.s3_bucket)/mongodb-kubernetes-operator-version-upgrade-post-start-hook/$(inputs.params.release_version)/ubi/Dockerfile + + - name: version-upgrade-hook-build-release + task_type: docker_build + dockerfile: scripts/dev/templates/versionhook/Dockerfile.versionhook-$(inputs.params.release_version) + tags: [ "release", "post-start-hook", "ubi" ] + + buildargs: + imagebase: $(inputs.params.registry)/$(inputs.params.image):$(inputs.params.release_version)-context-$(inputs.params.architecture) + + labels: + quay.expires-after: Never + + inputs: + - base_image + - release_version + + output: + - registry: $(inputs.params.registry)/$(inputs.params.image) + tag: $(inputs.params.release_version)-$(inputs.params.architecture) \ No newline at end of file diff --git a/licenses.csv b/licenses.csv new file mode 100644 index 000000000..931e7a9e4 --- /dev/null +++ b/licenses.csv @@ -0,0 +1,201 @@ +github.com/beorn7/perks/quantile,https://github.com/beorn7/perks/blob/v1.0.1/LICENSE,MIT +github.com/blang/semver,https://github.com/blang/semver/blob/v3.5.1/LICENSE,MIT +github.com/cespare/xxhash/v2,https://github.com/cespare/xxhash/blob/v2.1.2/LICENSE.txt,MIT +github.com/davecgh/go-spew/spew,https://github.com/davecgh/go-spew/blob/v1.1.1/LICENSE,ISC +github.com/emicklei/go-restful/v3,https://github.com/emicklei/go-restful/blob/v3.9.0/LICENSE,MIT +github.com/evanphx/json-patch,https://github.com/evanphx/json-patch/blob/v4.12.0/LICENSE,BSD-3-Clause +github.com/evanphx/json-patch/v5,https://github.com/evanphx/json-patch/blob/v5.6.0/v5/LICENSE,BSD-3-Clause +github.com/fsnotify/fsnotify,https://github.com/fsnotify/fsnotify/blob/v1.6.0/LICENSE,BSD-3-Clause +github.com/go-logr/logr,https://github.com/go-logr/logr/blob/v1.4.1/LICENSE,Apache-2.0 +github.com/go-openapi/jsonpointer,https://github.com/go-openapi/jsonpointer/blob/v0.19.5/LICENSE,Apache-2.0 +github.com/go-openapi/jsonreference,https://github.com/go-openapi/jsonreference/blob/v0.20.0/LICENSE,Apache-2.0 +github.com/go-openapi/swag,https://github.com/go-openapi/swag/blob/v0.19.14/LICENSE,Apache-2.0 +github.com/gogo/protobuf,https://github.com/gogo/protobuf/blob/v1.3.2/LICENSE,BSD-3-Clause +github.com/golang/groupcache/lru,https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE,Apache-2.0 +github.com/golang/protobuf,https://github.com/golang/protobuf/blob/v1.5.2/LICENSE,BSD-3-Clause +github.com/golang/snappy,https://github.com/golang/snappy/blob/v0.0.3/LICENSE,BSD-3-Clause +github.com/google/gnostic,https://github.com/google/gnostic/blob/v0.5.7-v3refs/LICENSE,Apache-2.0 +github.com/google/go-cmp/cmp,https://github.com/google/go-cmp/blob/v0.5.9/LICENSE,BSD-3-Clause +github.com/google/gofuzz,https://github.com/google/gofuzz/blob/v1.1.0/LICENSE,Apache-2.0 +github.com/google/uuid,https://github.com/google/uuid/blob/v1.3.0/LICENSE,BSD-3-Clause +github.com/hashicorp/errwrap,https://github.com/hashicorp/errwrap/blob/v1.0.0/LICENSE,MPL-2.0 +github.com/hashicorp/go-multierror,https://github.com/hashicorp/go-multierror/blob/v1.1.1/LICENSE,MPL-2.0 +github.com/imdario/mergo,https://github.com/imdario/mergo/blob/v0.3.15/LICENSE,BSD-3-Clause +github.com/josharian/intern,https://github.com/josharian/intern/blob/v1.0.0/license.md,MIT +github.com/json-iterator/go,https://github.com/json-iterator/go/blob/v1.1.12/LICENSE,MIT +github.com/klauspost/compress,https://github.com/klauspost/compress/blob/v1.13.6/LICENSE,Apache-2.0 +github.com/klauspost/compress/internal/snapref,https://github.com/klauspost/compress/blob/v1.13.6/internal/snapref/LICENSE,BSD-3-Clause +github.com/klauspost/compress/zstd/internal/xxhash,https://github.com/klauspost/compress/blob/v1.13.6/zstd/internal/xxhash/LICENSE.txt,MIT +github.com/mailru/easyjson,https://github.com/mailru/easyjson/blob/v0.7.6/LICENSE,MIT +github.com/matttproud/golang_protobuf_extensions/pbutil,https://github.com/matttproud/golang_protobuf_extensions/blob/v1.0.2/LICENSE,Apache-2.0 +github.com/moby/spdystream,https://github.com/moby/spdystream/blob/v0.2.0/LICENSE,Apache-2.0 +github.com/modern-go/concurrent,https://github.com/modern-go/concurrent/blob/bacd9c7ef1dd/LICENSE,Apache-2.0 +github.com/modern-go/reflect2,https://github.com/modern-go/reflect2/blob/v1.0.2/LICENSE,Apache-2.0 +github.com/mongodb/mongodb-kubernetes-operator/api/v1,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/api/v1,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/cmd/manager,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/cmd/readiness,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/cmd/readiness,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/cmd/readiness/testdata,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/cmd/versionhook,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers/construct,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers/construct,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers/predicates,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers/validation,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers/watch,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/controllers/watch,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/agent,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/agent,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/mocks,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scram,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scram,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scramcredentials,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scramcredentials,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/x509,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/x509,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/helm,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/configmap,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/configmap,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/lifecycle,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/persistentvolumeclaim,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/pod,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/podtemplatespec,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/podtemplatespec,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/probes,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/resourcerequirements,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/resourcerequirements,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/service,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/headless,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/headless,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/health,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/health,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/pod,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/pod,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/secret,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/apierrors,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/apierrors,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/functions,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/generate,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/merge,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/merge,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/state,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/state,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/versions,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/pkg/util/versions,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/feature_compatibility_version,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/prometheus,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_arbiter,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_authentication,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_change_version,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_connection_string_options,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_cross_namespace_deploy,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_custom_annotations_test_test,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_custom_persistent_volume,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_custom_role,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade_4_5,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade_5_6,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade_6_7,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_mongod_config,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_mongod_port_change_with_arbiters,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_mongod_readiness,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_mount_connection_string,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_multiple,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_operator_upgrade,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_recovery,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_scale,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_scale_down,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_tls,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_tls_recreate_mdbc,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_tls_rotate,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_tls_rotate_delete_sts,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_tls_upgrade,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_x509,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/statefulset_arbitrary_config,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/statefulset_arbitrary_config_update,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/statefulset_delete,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/tlstests,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester,Unknown,Unknown +github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/wait,Unknown,Unknown +github.com/montanaflynn/stats,https://github.com/montanaflynn/stats/blob/1bf9dbcd8cbe/LICENSE,MIT +github.com/munnerz/goautoneg,https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE,BSD-3-Clause +github.com/pkg/errors,https://github.com/pkg/errors/blob/v0.9.1/LICENSE,BSD-2-Clause +github.com/pmezard/go-difflib/difflib,https://github.com/pmezard/go-difflib/blob/v1.0.0/LICENSE,BSD-3-Clause +github.com/prometheus/client_golang/prometheus,https://github.com/prometheus/client_golang/blob/v1.14.0/LICENSE,Apache-2.0 +github.com/prometheus/client_model/go,https://github.com/prometheus/client_model/blob/v0.3.0/LICENSE,Apache-2.0 +github.com/prometheus/common,https://github.com/prometheus/common/blob/v0.37.0/LICENSE,Apache-2.0 +github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg,https://github.com/prometheus/common/blob/v0.37.0/internal/bitbucket.org/ww/goautoneg/README.txt,BSD-3-Clause +github.com/prometheus/procfs,https://github.com/prometheus/procfs/blob/v0.8.0/LICENSE,Apache-2.0 +github.com/spf13/cast,https://github.com/spf13/cast/blob/v1.6.0/LICENSE,MIT +github.com/spf13/pflag,https://github.com/spf13/pflag/blob/v1.0.5/LICENSE,BSD-3-Clause +github.com/stretchr/objx,https://github.com/stretchr/objx/blob/v0.5.1/LICENSE,MIT +github.com/stretchr/testify,https://github.com/stretchr/testify/blob/v1.8.4/LICENSE,MIT +github.com/xdg-go/pbkdf2,https://github.com/xdg-go/pbkdf2/blob/v1.0.0/LICENSE,Apache-2.0 +github.com/xdg-go/scram,https://github.com/xdg-go/scram/blob/v1.1.2/LICENSE,Apache-2.0 +github.com/xdg-go/stringprep,https://github.com/xdg-go/stringprep/blob/v1.0.4/LICENSE,Apache-2.0 +github.com/xdg/stringprep,https://github.com/xdg/stringprep/blob/v1.0.3/LICENSE,Apache-2.0 +github.com/youmark/pkcs8,https://github.com/youmark/pkcs8/blob/1be2e3e5546d/LICENSE,MIT +go.mongodb.org/mongo-driver,https://github.com/mongodb/mongo-go-driver/blob/v1.13.1/LICENSE,Apache-2.0 +go.uber.org/multierr,https://github.com/uber-go/multierr/blob/v1.10.0/LICENSE.txt,MIT +go.uber.org/zap,https://github.com/uber-go/zap/blob/v1.26.0/LICENSE.txt,MIT +golang.org/x/crypto,https://cs.opensource.google/go/x/crypto/+/v0.17.0:LICENSE,BSD-3-Clause +golang.org/x/net,https://cs.opensource.google/go/x/net/+/v0.17.0:LICENSE,BSD-3-Clause +golang.org/x/oauth2,https://cs.opensource.google/go/x/oauth2/+/ee480838:LICENSE,BSD-3-Clause +golang.org/x/sync,https://cs.opensource.google/go/x/sync/+/v0.1.0:LICENSE,BSD-3-Clause +golang.org/x/sys/unix,https://cs.opensource.google/go/x/sys/+/v0.15.0:LICENSE,BSD-3-Clause +golang.org/x/term,https://cs.opensource.google/go/x/term/+/v0.15.0:LICENSE,BSD-3-Clause +golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.14.0:LICENSE,BSD-3-Clause +golang.org/x/time/rate,https://cs.opensource.google/go/x/time/+/v0.3.0:LICENSE,BSD-3-Clause +gomodules.xyz/jsonpatch/v2,https://github.com/gomodules/jsonpatch/blob/v2.2.0/v2/LICENSE,Apache-2.0 +google.golang.org/protobuf,https://github.com/protocolbuffers/protobuf-go/blob/v1.28.1/LICENSE,BSD-3-Clause +gopkg.in/inf.v0,https://github.com/go-inf/inf/blob/v0.9.1/LICENSE,BSD-3-Clause +gopkg.in/natefinch/lumberjack.v2,https://github.com/natefinch/lumberjack/blob/v2.2.1/LICENSE,MIT +gopkg.in/yaml.v2,https://github.com/go-yaml/yaml/blob/v2.4.0/LICENSE,Apache-2.0 +gopkg.in/yaml.v3,https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE,MIT +k8s.io/api,https://github.com/kubernetes/api/blob/v0.26.10/LICENSE,Apache-2.0 +k8s.io/apiextensions-apiserver/pkg,https://github.com/kubernetes/apiextensions-apiserver/blob/v0.26.10/LICENSE,Apache-2.0 +k8s.io/apimachinery/pkg,https://github.com/kubernetes/apimachinery/blob/v0.26.10/LICENSE,Apache-2.0 +k8s.io/apimachinery/third_party/forked/golang,https://github.com/kubernetes/apimachinery/blob/v0.26.10/third_party/forked/golang/LICENSE,BSD-3-Clause +k8s.io/client-go,https://github.com/kubernetes/client-go/blob/v0.26.10/LICENSE,Apache-2.0 +k8s.io/component-base/config,https://github.com/kubernetes/component-base/blob/v0.26.10/LICENSE,Apache-2.0 +k8s.io/klog/v2,https://github.com/kubernetes/klog/blob/v2.80.1/LICENSE,Apache-2.0 +k8s.io/kube-openapi/pkg,https://github.com/kubernetes/kube-openapi/blob/172d655c2280/LICENSE,Apache-2.0 +k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json,https://github.com/kubernetes/kube-openapi/blob/172d655c2280/pkg/internal/third_party/go-json-experiment/json/LICENSE,BSD-3-Clause +k8s.io/kube-openapi/pkg/validation/spec,https://github.com/kubernetes/kube-openapi/blob/172d655c2280/pkg/validation/spec/LICENSE,Apache-2.0 +k8s.io/utils,https://github.com/kubernetes/utils/blob/99ec85e7a448/LICENSE,Apache-2.0 +k8s.io/utils/internal/third_party/forked/golang/net,https://github.com/kubernetes/utils/blob/99ec85e7a448/internal/third_party/forked/golang/LICENSE,BSD-3-Clause +sigs.k8s.io/controller-runtime,https://github.com/kubernetes-sigs/controller-runtime/blob/v0.14.7/LICENSE,Apache-2.0 +sigs.k8s.io/json,https://github.com/kubernetes-sigs/json/blob/f223a00ba0e2/LICENSE,Apache-2.0 +sigs.k8s.io/structured-merge-diff/v4,https://github.com/kubernetes-sigs/structured-merge-diff/blob/v4.2.3/LICENSE,Apache-2.0 +sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE,Apache-2.0 +sigs.k8s.io/yaml/goyaml.v2,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE,Apache-2.0 diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 000000000..100a12707 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,8 @@ +# Global options: + +[mypy] +python_version = 3.10 +ignore_missing_imports = true +disallow_untyped_calls = true +disallow_untyped_defs = true +disallow_incomplete_defs = true diff --git a/pipeline.py b/pipeline.py new file mode 100644 index 000000000..f288d1a5b --- /dev/null +++ b/pipeline.py @@ -0,0 +1,294 @@ +import argparse +import json +import subprocess +import sys +from typing import Dict, List, Set +from scripts.ci.base_logger import logger +from scripts.ci.images_signing import ( + sign_image, + verify_signature, + mongodb_artifactory_login, +) + +from scripts.dev.dev_config import load_config, DevConfig +from sonar.sonar import process_image + +# These image names must correspond to prefixes in release.json, developer configuration and inventories +VALID_IMAGE_NAMES = { + "agent", + "readiness-probe", + "version-upgrade-hook", + "operator", + "e2e", +} + +AGENT_DISTRO_KEY = "agent_distro" +TOOLS_DISTRO_KEY = "tools_distro" + +AGENT_DISTROS_PER_ARCH = { + "amd64": {AGENT_DISTRO_KEY: "rhel8_x86_64", TOOLS_DISTRO_KEY: "rhel88-x86_64"}, + "arm64": {AGENT_DISTRO_KEY: "amzn2_aarch64", TOOLS_DISTRO_KEY: "rhel88-aarch64"}, +} + + +def load_release() -> Dict: + with open("release.json") as f: + return json.load(f) + + +def build_image_args(config: DevConfig, image_name: str) -> Dict[str, str]: + release = load_release() + + # Naming in pipeline : readiness-probe, naming in dev config : readiness_probe_image + image_name_prefix = image_name.replace("-", "_") + + # Default config + arguments = { + "builder": "true", + # Defaults to "" if empty, e2e has no release version + "release_version": release.get(image_name, ""), + "tools_version": "", + "image": getattr(config, f"{image_name_prefix}_image"), + # Defaults to "" if empty, e2e has no dev image + "image_dev": getattr(config, f"{image_name_prefix}_image_dev", ""), + "registry": config.repo_url, + "s3_bucket": config.s3_bucket, + "builder_image": release["golang-builder-image"], + "base_image": "registry.access.redhat.com/ubi8/ubi-minimal:latest", + "inventory": "inventory.yaml", + "skip_tags": config.skip_tags, # Include skip_tags + "include_tags": config.include_tags, # Include include_tags + } + + # Handle special cases + if image_name == "operator": + arguments["inventory"] = "inventories/operator-inventory.yaml" + + if image_name == "e2e": + arguments.pop("builder", None) + arguments["base_image"] = release["golang-builder-image"] + arguments["inventory"] = "inventories/e2e-inventory.yaml" + + if image_name == "agent": + arguments["tools_version"] = release["agent-tools-version"] + + return arguments + + +def sign_and_verify(registry: str, tag: str) -> None: + sign_image(registry, tag) + verify_signature(registry, tag) + + +def build_and_push_image( + image_name: str, + config: DevConfig, + args: Dict[str, str], + architectures: Set[str], + release: bool, + sign: bool, + insecure: bool = False, +) -> None: + if sign: + mongodb_artifactory_login() + for arch in architectures: + image_tag = f"{image_name}" + args["architecture"] = arch + if image_name == "agent": + args[AGENT_DISTRO_KEY] = AGENT_DISTROS_PER_ARCH[arch][AGENT_DISTRO_KEY] + args[TOOLS_DISTRO_KEY] = AGENT_DISTROS_PER_ARCH[arch][TOOLS_DISTRO_KEY] + process_image( + image_tag, + build_args=args, + inventory=args["inventory"], + skip_tags=args["skip_tags"], + include_tags=args["include_tags"], + ) + if release: + registry = args["registry"] + "/" + args["image"] + context_tag = args["release_version"] + "-context-" + arch + release_tag = args["release_version"] + "-" + arch + if sign: + sign_and_verify(registry, context_tag) + sign_and_verify(registry, release_tag) + + if args["image_dev"]: + image_to_push = args["image_dev"] + elif image_name == "e2e": + # If no image dev (only e2e is concerned) we push the normal image + image_to_push = args["image"] + else: + raise Exception("Dev image must be specified") + + push_manifest(config, architectures, image_to_push, insecure) + + if config.gh_run_id: + push_manifest(config, architectures, image_to_push, insecure, config.gh_run_id) + + if release: + registry = args["registry"] + "/" + args["image"] + context_tag = args["release_version"] + "-context" + push_manifest( + config, architectures, args["image"], insecure, args["release_version"] + ) + push_manifest(config, architectures, args["image"], insecure, context_tag) + if sign: + sign_and_verify(registry, args["release_version"]) + sign_and_verify(registry, context_tag) + + +""" +Generates docker manifests by running the following commands: +1. Clear existing manifests +docker manifest rm config.repo_url/image:tag +2. Create the manifest +docker manifest create config.repo_url/image:tag --amend config.repo_url/image:tag-amd64 --amend config.repo_url/image:tag-arm64 +3. Push the manifest +docker manifest push config.repo_url/image:tag +""" + + +def push_manifest( + config: DevConfig, + architectures: Set[str], + image_name: str, + insecure: bool = False, + image_tag: str = "latest", +) -> None: + logger.info(f"Pushing manifest for {image_tag}") + final_manifest = "{0}/{1}:{2}".format(config.repo_url, image_name, image_tag) + remove_args = ["docker", "manifest", "rm", final_manifest] + logger.info("Removing existing manifest") + run_cli_command(remove_args, fail_on_error=False) + + create_args = [ + "docker", + "manifest", + "create", + final_manifest, + ] + + if insecure: + create_args.append("--insecure") + + for arch in architectures: + create_args.extend(["--amend", final_manifest + "-" + arch]) + + logger.info("Creating new manifest") + run_cli_command(create_args) + + push_args = ["docker", "manifest", "push", final_manifest] + logger.info("Pushing new manifest") + run_cli_command(push_args) + + +# Raises exceptions by default +def run_cli_command(args: List[str], fail_on_error: bool = True) -> None: + command = " ".join(args) + logger.debug(f"Running: {command}") + try: + cp = subprocess.run( + command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + check=False, + ) + except Exception as e: + logger.error(f" Command raised the following exception: {e}") + if fail_on_error: + raise Exception + else: + logger.warning("Continuing...") + return + + if cp.returncode != 0: + error_msg = cp.stderr.decode().strip() + stdout = cp.stdout.decode().strip() + logger.error(f"Error running command") + logger.error(f"stdout:\n{stdout}") + logger.error(f"stderr:\n{error_msg}") + if fail_on_error: + raise Exception + else: + logger.warning("Continuing...") + return + + +def _parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser() + parser.add_argument("--image-name", type=str) + parser.add_argument("--release", action="store_true", default=False) + parser.add_argument( + "--arch", + choices=["amd64", "arm64"], + nargs="+", + help="for daily builds only, specify the list of architectures to build for images", + ) + parser.add_argument("--tag", type=str) + parser.add_argument("--sign", action="store_true", default=False) + parser.add_argument("--insecure", action="store_true", default=False) + return parser.parse_args() + + +""" +Takes arguments: +--image-name : The name of the image to build, must be one of VALID_IMAGE_NAMES +--release : We push the image to the registry only if this flag is set +--architecture : List of architectures to build for the image +--sign : Sign images with our private key if sign is set (only for release) + +Run with --help for more information +Example usage : `python pipeline.py --image-name agent --release --sign` + +Builds and push the docker image to the registry +Many parameters are defined in the dev configuration, default path is : ~/.community-operator-dev/config.json +""" + + +def main() -> int: + args = _parse_args() + + image_name = args.image_name + if image_name not in VALID_IMAGE_NAMES: + logger.error( + f"Invalid image name: {image_name}. Valid options are: {VALID_IMAGE_NAMES}" + ) + return 1 + + # Handle dev config + config: DevConfig = load_config() + config.gh_run_id = args.tag + + # Warn user if trying to release E2E tests + if args.release and image_name == "e2e": + logger.warning( + "Warning : releasing E2E test will fail because E2E image has no release version" + ) + + # Skipping release tasks by default + if not args.release: + config.ensure_skip_tag("release") + if args.sign: + logger.warning("--sign flag has no effect without --release") + + if args.arch: + arch_set = set(args.arch) + else: + # Default is multi-arch + arch_set = {"amd64", "arm64"} + logger.info(f"Building for architectures: {','.join(arch_set)}") + + if not args.sign: + logger.warning("--sign flag not provided, images won't be signed") + + image_args = build_image_args(config, image_name) + + build_and_push_image( + image_name, config, image_args, arch_set, args.release, args.sign, args.insecure + ) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/pkg/agent/agent_readiness.go b/pkg/agent/agent_readiness.go new file mode 100644 index 000000000..eefe3a49d --- /dev/null +++ b/pkg/agent/agent_readiness.go @@ -0,0 +1,133 @@ +package agent + +import ( + "context" + "fmt" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/pod" + "github.com/spf13/cast" + "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" +) + +const ( + // podAnnotationAgentVersion is the Pod Annotation key which contains the current version of the Automation Config + // the Agent on the Pod is on now. + podAnnotationAgentVersion = "agent.mongodb.com/version" +) + +type PodState struct { + PodName types.NamespacedName + Found bool + ReachedGoalState bool + IsArbiter bool +} + +// AllReachedGoalState returns whether the agents associated with a given StatefulSet have reached goal state. +// it achieves this by reading the Pod annotations and checking to see if they have reached the expected config versions. +func AllReachedGoalState(ctx context.Context, sts appsv1.StatefulSet, podGetter pod.Getter, desiredMemberCount, targetConfigVersion int, log *zap.SugaredLogger) (bool, error) { + // AllReachedGoalState does not use desiredArbitersCount for backwards compatibility + podStates, err := GetAllDesiredMembersAndArbitersPodState(ctx, types.NamespacedName{ + Namespace: sts.Namespace, + Name: sts.Name, + }, podGetter, desiredMemberCount, 0, targetConfigVersion, log) + if err != nil { + return false, err + } + + var podsNotFound []string + for _, podState := range podStates { + if !podState.Found { + podsNotFound = append(podsNotFound, podState.PodName.Name) + } else if !podState.ReachedGoalState { + return false, nil + } + } + + if len(podsNotFound) == desiredMemberCount { + // no pods existing means that the StatefulSet hasn't been created yet - will be done during the next step + return true, nil + } + + if len(podsNotFound) > 0 { + log.Infof("The following Pods don't exist: %v. Assuming they will be rescheduled by Kubernetes soon", podsNotFound) + return false, nil + } + + log.Infof("All %d Agents have reached Goal state", desiredMemberCount) + return true, nil +} + +// GetAllDesiredMembersAndArbitersPodState returns states of all desired pods in a replica set. +// Pod names to search for are calculated using desiredMemberCount and desiredArbitersCount. Each pod is then checked if it exists +// or if it reached goal state vs targetConfigVersion. +func GetAllDesiredMembersAndArbitersPodState(ctx context.Context, namespacedName types.NamespacedName, podGetter pod.Getter, desiredMembersCount, desiredArbitersCount, targetConfigVersion int, log *zap.SugaredLogger) ([]PodState, error) { + podStates := make([]PodState, desiredMembersCount+desiredArbitersCount) + + membersPodNames := statefulSetPodNames(namespacedName.Name, desiredMembersCount) + arbitersPodNames := arbitersStatefulSetPodNames(namespacedName.Name, desiredArbitersCount) + + for i, podName := range append(membersPodNames, arbitersPodNames...) { + podNamespacedName := types.NamespacedName{Name: podName, Namespace: namespacedName.Namespace} + podState := PodState{ + PodName: podNamespacedName, + Found: true, + ReachedGoalState: false, + IsArbiter: i >= len(membersPodNames), + } + + p, err := podGetter.GetPod(ctx, podNamespacedName) + if err != nil { + if apiErrors.IsNotFound(err) { + // we can skip below iteration and check for our goal state since the pod is not available yet + podState.Found = false + podState.ReachedGoalState = false + podStates[i] = podState + continue + } else { + return nil, err + } + } + + podState.ReachedGoalState = ReachedGoalState(p, targetConfigVersion, log) + podStates[i] = podState + } + + return podStates, nil +} + +// ReachedGoalState checks if a single Agent has reached the goal state. To do this it reads the Pod annotation +// to find out the current version the Agent is on. +func ReachedGoalState(pod corev1.Pod, targetConfigVersion int, log *zap.SugaredLogger) bool { + currentAgentVersion, ok := pod.Annotations[podAnnotationAgentVersion] + if !ok { + log.Debugf("The Pod '%s' doesn't have annotation '%s' yet", pod.Name, podAnnotationAgentVersion) + return false + } + if cast.ToInt(currentAgentVersion) != targetConfigVersion { + log.Debugf("The Agent in the Pod '%s' hasn't reached the goal state yet (goal: %d, agent: %s)", pod.Name, targetConfigVersion, currentAgentVersion) + return false + } + return true +} + +// statefulSetPodNames returns a slice of names for a subset of the StatefulSet pods. +// we need a subset in the case of scaling up/down. +func statefulSetPodNames(name string, currentMembersCount int) []string { + names := make([]string, currentMembersCount) + for i := 0; i < currentMembersCount; i++ { + names[i] = fmt.Sprintf("%s-%d", name, i) + } + return names +} + +func arbitersStatefulSetPodNames(name string, currentArbitersCount int) []string { + names := make([]string, currentArbitersCount) + for i := 0; i < currentArbitersCount; i++ { + names[i] = fmt.Sprintf("%s-arb-%d", name, i) + } + return names +} diff --git a/pkg/agent/agent_readiness_test.go b/pkg/agent/agent_readiness_test.go new file mode 100644 index 000000000..2f898ad9d --- /dev/null +++ b/pkg/agent/agent_readiness_test.go @@ -0,0 +1,109 @@ +package agent + +import ( + "context" + "os" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func init() { + logger, err := zap.NewDevelopment() + if err != nil { + os.Exit(1) + } + zap.ReplaceGlobals(logger) +} + +func TestAllReachedGoalState(t *testing.T) { + ctx := context.Background() + sts, err := statefulset.NewBuilder().SetName("sts").SetNamespace("test-ns").Build() + assert.NoError(t, err) + + t.Run("Returns true if all pods are not found", func(t *testing.T) { + ready, err := AllReachedGoalState(ctx, sts, mockPodGetter{}, 3, 3, zap.S()) + assert.NoError(t, err) + assert.True(t, ready) + }) + + t.Run("Returns true if all pods are ready", func(t *testing.T) { + ready, err := AllReachedGoalState(ctx, sts, mockPodGetter{pods: []corev1.Pod{ + createPodWithAgentAnnotation("3"), + createPodWithAgentAnnotation("3"), + createPodWithAgentAnnotation("3"), + }}, 3, 3, zap.S()) + assert.NoError(t, err) + assert.True(t, ready) + }) + + t.Run("Returns false if one pod is not ready", func(t *testing.T) { + ready, err := AllReachedGoalState(ctx, sts, mockPodGetter{pods: []corev1.Pod{ + createPodWithAgentAnnotation("2"), + createPodWithAgentAnnotation("3"), + createPodWithAgentAnnotation("3"), + }}, 3, 3, zap.S()) + assert.NoError(t, err) + assert.False(t, ready) + }) + + t.Run("Returns true when the pods are not found", func(t *testing.T) { + ready, err := AllReachedGoalState(ctx, sts, mockPodGetter{shouldReturnNotFoundError: true}, 3, 3, zap.S()) + assert.NoError(t, err) + assert.True(t, ready) + }) +} + +func TestReachedGoalState(t *testing.T) { + t.Run("Pod reaches goal state when annotation is present", func(t *testing.T) { + assert.True(t, ReachedGoalState(createPodWithAgentAnnotation("2"), 2, zap.S())) + assert.True(t, ReachedGoalState(createPodWithAgentAnnotation("4"), 4, zap.S())) + assert.True(t, ReachedGoalState(createPodWithAgentAnnotation("20"), 20, zap.S())) + }) + + t.Run("Pod does not reach goal state when there is a mismatch", func(t *testing.T) { + assert.False(t, ReachedGoalState(createPodWithAgentAnnotation("2"), 4, zap.S())) + assert.False(t, ReachedGoalState(createPodWithAgentAnnotation("3"), 7, zap.S())) + assert.False(t, ReachedGoalState(createPodWithAgentAnnotation("10"), 1, zap.S())) + }) + + t.Run("Pod does not reach goal state when annotation is not present", func(t *testing.T) { + assert.False(t, ReachedGoalState(corev1.Pod{}, 10, zap.S())) + }) +} + +func createPodWithAgentAnnotation(versionStr string) corev1.Pod { + return corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + podAnnotationAgentVersion: versionStr, + }, + }, + } +} + +type mockPodGetter struct { + pods []corev1.Pod + currPodIndex int + shouldReturnNotFoundError bool +} + +func (m mockPodGetter) GetPod(context.Context, client.ObjectKey) (corev1.Pod, error) { + if m.shouldReturnNotFoundError || m.currPodIndex >= len(m.pods) { + return corev1.Pod{}, notFoundError() + } + + pod := m.pods[m.currPodIndex] + + return pod, nil +} + +func notFoundError() error { + return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}} +} diff --git a/pkg/agent/agentflags.go b/pkg/agent/agentflags.go new file mode 100644 index 000000000..ed231da09 --- /dev/null +++ b/pkg/agent/agentflags.go @@ -0,0 +1,19 @@ +package agent + +import corev1 "k8s.io/api/core/v1" + +type StartupParameter struct { + Key string `json:"key"` + Value string `json:"value"` +} + +// StartupParametersToAgentFlag takes a slice of StartupParameters +// and concatenates them into a single string that is then +// returned as env variable AGENT_FLAGS +func StartupParametersToAgentFlag(parameters ...StartupParameter) corev1.EnvVar { + agentParams := "" + for _, param := range parameters { + agentParams += " -" + param.Key + " " + param.Value + } + return corev1.EnvVar{Name: "AGENT_FLAGS", Value: agentParams} +} diff --git a/pkg/agent/agentflags_test.go b/pkg/agent/agentflags_test.go new file mode 100644 index 000000000..e56b67613 --- /dev/null +++ b/pkg/agent/agentflags_test.go @@ -0,0 +1,34 @@ +package agent + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAgentFlagIsCorrectlyCreated(t *testing.T) { + parameters := []StartupParameter{ + { + Key: "Key1", + Value: "Value1", + }, + { + Key: "Key2", + Value: "Value2", + }, + } + + envVar := StartupParametersToAgentFlag(parameters...) + assert.Equal(t, "AGENT_FLAGS", envVar.Name) + assert.Equal(t, " -Key1 Value1 -Key2 Value2", envVar.Value) + +} + +func TestAgentFlagEmptyParameters(t *testing.T) { + parameters := []StartupParameter{} + + envVar := StartupParametersToAgentFlag(parameters...) + assert.Equal(t, "AGENT_FLAGS", envVar.Name) + assert.Equal(t, "", envVar.Value) + +} diff --git a/pkg/agent/agenthealth.go b/pkg/agent/agenthealth.go new file mode 100644 index 000000000..7bd16c435 --- /dev/null +++ b/pkg/agent/agenthealth.go @@ -0,0 +1,40 @@ +package agent + +import ( + "time" +) + +type Health struct { + Healthiness map[string]ProcessHealth `json:"statuses"` + ProcessPlans map[string]MmsDirectorStatus `json:"mmsStatus"` +} + +type ProcessHealth struct { + IsInGoalState bool `json:"IsInGoalState"` + ExpectedToBeUp bool `json:"ExpectedToBeUp"` + LastMongoUpTime int64 `json:"LastMongoUpTime"` +} + +type MmsDirectorStatus struct { + Name string `json:"name"` + LastGoalStateClusterConfigVersion int64 `json:"lastGoalVersionAchieved"` + Plans []*PlanStatus `json:"plans"` +} + +type PlanStatus struct { + Moves []*MoveStatus `json:"moves"` + Started *time.Time `json:"started"` + Completed *time.Time `json:"completed"` +} + +type MoveStatus struct { + Move string `json:"move"` + Steps []*StepStatus `json:"steps"` +} + +type StepStatus struct { + Step string `json:"step"` + Started *time.Time `json:"started"` + Completed *time.Time `json:"completed"` + Result string `json:"result"` +} diff --git a/pkg/agent/replica_set_port_manager.go b/pkg/agent/replica_set_port_manager.go new file mode 100644 index 000000000..e47e94181 --- /dev/null +++ b/pkg/agent/replica_set_port_manager.go @@ -0,0 +1,147 @@ +package agent + +import ( + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +// ReplicaSetPortManager is used to determine which ports should be set in pods (mongod processes) and in the service. +// It is used for the two use cases: +// +// * Determining the port values for the initial automation config and service when the cluster is not created yet. +// +// * Determining the next port to be changed (next stage in the process of the port change) when there is a need of changing the port of running cluster. +// +// It does not depend on K8S API, and it is deterministic for the given parameters in the NewReplicaSetPortManager constructor. +// +// When the replica set is initially configured (no pods/processes are created yet), it works by simply setting desired port to all processes. +// When the replica set is created and running, it orchestrates port changes as mongodb-agent does not allow changing ports in more than one process at a time. +// For the running cluster, it changes ports only when all pods reached goal state and changes the ports one by one. +// For the whole process of port change, the service has both ports exposed: old and new. After it finishes, only the new port is in the service. +type ReplicaSetPortManager struct { + log *zap.SugaredLogger + expectedPort int + currentPodStates []PodState + currentACProcesses []automationconfig.Process +} + +func NewReplicaSetPortManager(log *zap.SugaredLogger, expectedPort int, currentPodStates []PodState, currentACProcesses []automationconfig.Process) *ReplicaSetPortManager { + return &ReplicaSetPortManager{log: log, expectedPort: expectedPort, currentPodStates: currentPodStates, currentACProcesses: currentACProcesses} +} + +// GetPortsModification returns automation config modification function to be used in config builder. +// It calculates which ports are needed to be set in current reconcile process. +// For the pods, which are not created yet, it sets desired port immediately. +// For the pods, which are created and with its goal reached, it changes only one port at a time to allow +// agent to change port in one process at a time. +func (r *ReplicaSetPortManager) GetPortsModification() automationconfig.Modification { + portMap, _, _ := r.calculateExpectedPorts() + r.log.Debugf("Calculated process port map: %+v", portMap) + return func(config *automationconfig.AutomationConfig) { + for i := range config.Processes { + process := config.Processes[i] + process.SetPort(portMap[process.Name]) + } + } +} + +// GetServicePorts returns an array of corev1.ServicePort to be set in the service. +// If there is no port change in progress, it returns expectedPort named "mongodb". +// If there is port change in progress, then it returns both ports: old named "mongodb" and new named "mongodb-new". +// When the port change is finished, it falls back to the first case (no port change in progress) and "mongodb-new" will be renamed to "mongodb". +func (r *ReplicaSetPortManager) GetServicePorts() []corev1.ServicePort { + _, portChangeRequired, oldPort := r.calculateExpectedPorts() + + if !portChangeRequired || oldPort == r.expectedPort { + return []corev1.ServicePort{{ + Port: int32(r.expectedPort), + Name: "mongodb", + }} + } + + servicePorts := []corev1.ServicePort{ + { + Port: int32(r.expectedPort), + Name: "mongodb-new", + }, + { + Port: int32(oldPort), + Name: "mongodb", + }, + } + + r.log.Debugf("Port change in progress, setting service ports: %+v", servicePorts) + + return servicePorts +} + +func (r *ReplicaSetPortManager) getProcessByName(name string) *automationconfig.Process { + for i := 0; i < len(r.currentACProcesses); i++ { + if r.currentACProcesses[i].Name == name { + return &r.currentACProcesses[i] + } + } + + return nil +} + +// calculateExpectedPorts is a helper function to calculate what should be the current ports set in all replica set pods. +// It's working deterministically using currentACProcesses from automation config and currentPodStates. +func (r *ReplicaSetPortManager) calculateExpectedPorts() (processPortMap map[string]int, portChangeRequired bool, oldPort int) { + processPortMap = map[string]int{} + + // populate processPortMap with current ports + // it also populates entries for not existing pods yet + for _, podState := range r.currentPodStates { + process := r.getProcessByName(podState.PodName.Name) + if process == nil || process.GetPort() == 0 { + // new processes are configured with correct port from the start + processPortMap[podState.PodName.Name] = r.expectedPort + } else { + processPortMap[podState.PodName.Name] = process.GetPort() + } + } + + // check if there is a need to perform port change + portChangeRequired = false + // This is the only place we could get the old port value + // As soon as the port is changed on the MongoDB resource we lose the old value. + oldPort = r.expectedPort + for _, port := range processPortMap { + if port != r.expectedPort { + portChangeRequired = true + oldPort = port + break + } + } + + // If there are no port changes we just return initial config. + // This way this ReplicaSetPortManager is used also for setting the initial port values for all processes. + if !portChangeRequired { + r.log.Debugf("No port change required") + return processPortMap, false, oldPort + } + + // We only perform port change if all pods reached goal states. + // That will guarantee, that we will not change more than one process' port at a time. + for _, podState := range r.currentPodStates { + if !podState.ReachedGoalState { + r.log.Debugf("Port change required but not all pods reached goal state, abandoning port change") + return processPortMap, true, oldPort + } + } + + // change the port only in the first eligible process as the agent cannot handle simultaneous port changes in multiple processes + // We have guaranteed here that all pods are created and have reached the goal state. + for _, podState := range r.currentPodStates { + podName := podState.PodName.Name + if processPortMap[podName] != r.expectedPort { + r.log.Debugf("Changing port in process %s from %d to %d", podName, processPortMap[podName], r.expectedPort) + processPortMap[podName] = r.expectedPort + break + } + } + + return processPortMap, true, oldPort +} diff --git a/pkg/agent/replica_set_port_manager_test.go b/pkg/agent/replica_set_port_manager_test.go new file mode 100644 index 000000000..4a24be9d8 --- /dev/null +++ b/pkg/agent/replica_set_port_manager_test.go @@ -0,0 +1,213 @@ +package agent + +import ( + "fmt" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/types" +) + +func TestReplicaSetPortManagerCalculateExpectedPorts(t *testing.T) { + type input struct { + currentPodStates []PodState + expectedPort int + currentAC automationconfig.AutomationConfig + } + + type output struct { + portMap map[string]int + portChangeRequired bool + oldPort int + } + + type testCase struct { + in input + expectedOutput output + } + + name := "mdb" + podName := func(i int) types.NamespacedName { + return types.NamespacedName{Namespace: "mongodb", Name: fmt.Sprintf("%s-%d", name, i)} + } + arbiterPodName := func(i int) types.NamespacedName { + return types.NamespacedName{Namespace: "mongodb", Name: fmt.Sprintf("%s-arb-%d", name, i)} + } + + generateConfig := func(ports []int, arbiterPorts []int) automationconfig.AutomationConfig { + builder := automationconfig.NewBuilder() + builder.SetMembers(len(ports)) + builder.SetArbiters(len(arbiterPorts)) + builder.SetName(name) + builder.AddProcessModification(func(i int, process *automationconfig.Process) { + if i < len(ports) { + process.SetPort(ports[i]) + } + }) + ac, err := builder.Build() + require.NoError(t, err) + return ac + } + + generatePortMap := func(ports []int, arbiterPorts []int) map[string]int { + portMap := map[string]int{} + for i, port := range ports { + portMap[podName(i).Name] = port + } + for i, port := range arbiterPorts { + portMap[arbiterPodName(i+len(ports)).Name] = port + } + return portMap + } + + testCases := map[string]testCase{ + "No ports are changed if there is existing config and pods are not ready": { + in: input{ + currentPodStates: []PodState{ + {PodName: podName(0), Found: false, ReachedGoalState: false}, + {PodName: podName(1), Found: false, ReachedGoalState: false}, + {PodName: podName(2), Found: false, ReachedGoalState: false}, + }, + expectedPort: 2000, + currentAC: generateConfig([]int{1000, 1000, 1000}, nil), + }, + expectedOutput: output{ + portMap: generatePortMap([]int{1000, 1000, 1000}, nil), + portChangeRequired: true, + oldPort: 1000, + }, + }, + "No ports are changed when not all pods reached goal state": { + in: input{ + currentPodStates: []PodState{ + {PodName: podName(0), Found: true, ReachedGoalState: true}, + {PodName: podName(1), Found: true, ReachedGoalState: false}, + {PodName: podName(2), Found: true, ReachedGoalState: true}, + }, + expectedPort: 2000, + currentAC: generateConfig([]int{1000, 1000, 1000}, nil), + }, + expectedOutput: output{ + portMap: generatePortMap([]int{1000, 1000, 1000}, nil), + portChangeRequired: true, + oldPort: 1000, + }, + }, + "All ports set to expected when there are no processes in config yet": { + in: input{ + currentPodStates: []PodState{ + {PodName: podName(0), Found: true, ReachedGoalState: false}, + {PodName: podName(1), Found: true, ReachedGoalState: false}, + {PodName: podName(2), Found: true, ReachedGoalState: false}, + }, + expectedPort: 2000, + currentAC: generateConfig(nil, nil), + }, + expectedOutput: output{ + portMap: generatePortMap([]int{2000, 2000, 2000}, nil), + portChangeRequired: false, + oldPort: 2000, + }, + }, + "Only one port changed when all pods are ready": { + in: input{ + currentPodStates: []PodState{ + {PodName: podName(0), Found: true, ReachedGoalState: true}, + {PodName: podName(1), Found: true, ReachedGoalState: true}, + {PodName: podName(2), Found: true, ReachedGoalState: true}, + }, + expectedPort: 2000, + currentAC: generateConfig([]int{1000, 1000, 1000}, nil), + }, + expectedOutput: output{ + portMap: generatePortMap([]int{2000, 1000, 1000}, nil), + portChangeRequired: true, + oldPort: 1000, + }, + }, + "No port changes required when all ports changed but not all pods reached goal state": { + in: input{ + currentPodStates: []PodState{ + {PodName: podName(0), Found: true, ReachedGoalState: true}, + {PodName: podName(1), Found: true, ReachedGoalState: true}, + {PodName: podName(2), Found: true, ReachedGoalState: false}, + }, + expectedPort: 2000, + currentAC: generateConfig([]int{2000, 2000, 2000}, nil), + }, + expectedOutput: output{ + portMap: generatePortMap([]int{2000, 2000, 2000}, nil), + portChangeRequired: false, + oldPort: 2000, + }, + }, + "No port changes required when all ports changed but only arbiter is not in a goal state": { + in: input{ + currentPodStates: []PodState{ + {PodName: podName(0), Found: true, ReachedGoalState: true}, + {PodName: podName(1), Found: true, ReachedGoalState: true}, + {PodName: podName(2), Found: true, ReachedGoalState: true}, + {PodName: arbiterPodName(3), Found: true, ReachedGoalState: true}, + {PodName: arbiterPodName(4), Found: true, ReachedGoalState: false}, + }, + expectedPort: 2000, + currentAC: generateConfig([]int{2000, 2000, 2000}, []int{2000, 2000}), + }, + expectedOutput: output{ + portMap: generatePortMap([]int{2000, 2000, 2000}, []int{2000, 2000}), + portChangeRequired: false, + oldPort: 2000, + }, + }, + "No port changes when scaling down and there are more processes in config than current pod states": { + in: input{ + currentPodStates: []PodState{ + {PodName: podName(0), Found: true, ReachedGoalState: true}, + {PodName: podName(1), Found: true, ReachedGoalState: true}, + {PodName: podName(2), Found: true, ReachedGoalState: true}, + }, + expectedPort: 2000, + currentAC: generateConfig([]int{2000, 2000, 2000, 2000, 2000}, nil), + }, + expectedOutput: output{ + portMap: generatePortMap([]int{2000, 2000, 2000}, nil), + portChangeRequired: false, + oldPort: 2000, + }, + }, + "No port changes when scaling up and there are less processes in config than current pod states": { + in: input{ + currentPodStates: []PodState{ + {PodName: podName(0), Found: true, ReachedGoalState: true}, + {PodName: podName(1), Found: true, ReachedGoalState: true}, + {PodName: podName(2), Found: true, ReachedGoalState: true}, + {PodName: podName(3), Found: false, ReachedGoalState: false}, + }, + expectedPort: 2000, + currentAC: generateConfig([]int{2000, 2000, 2000}, nil), + }, + expectedOutput: output{ + portMap: generatePortMap([]int{2000, 2000, 2000, 2000}, nil), + portChangeRequired: false, + oldPort: 2000, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + portManager := NewReplicaSetPortManager(zap.S(), tc.in.expectedPort, tc.in.currentPodStates, tc.in.currentAC.Processes) + portMap, portChangeRequired, oldPort := portManager.calculateExpectedPorts() + actualOutput := output{ + portMap: portMap, + portChangeRequired: portChangeRequired, + oldPort: oldPort, + } + assert.Equal(t, tc.expectedOutput, actualOutput) + }) + } + +} diff --git a/pkg/apis/addtoscheme_mongodb_v1.go b/pkg/apis/addtoscheme_mongodb_v1.go deleted file mode 100644 index 7c244e6ae..000000000 --- a/pkg/apis/addtoscheme_mongodb_v1.go +++ /dev/null @@ -1,10 +0,0 @@ -package apis - -import ( - v1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" -) - -func init() { - // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back - AddToSchemes = append(AddToSchemes, v1.SchemeBuilder.AddToScheme) -} diff --git a/pkg/apis/apis.go b/pkg/apis/apis.go deleted file mode 100644 index 07dc96164..000000000 --- a/pkg/apis/apis.go +++ /dev/null @@ -1,13 +0,0 @@ -package apis - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -// AddToSchemes may be used to add all resources defined in the project to a Scheme -var AddToSchemes runtime.SchemeBuilder - -// AddToScheme adds all Resources to the Scheme -func AddToScheme(s *runtime.Scheme) error { - return AddToSchemes.AddToScheme(s) -} diff --git a/pkg/apis/mongodb/group.go b/pkg/apis/mongodb/group.go deleted file mode 100644 index f7d25a1eb..000000000 --- a/pkg/apis/mongodb/group.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package mongodb contains mongodb API versions. -// -// This file ensures Go source parsers acknowledge the mongodb package -// and any child packages. It can be removed if any other Go source files are -// added to this package. -package mongodb diff --git a/pkg/apis/mongodb/v1/doc.go b/pkg/apis/mongodb/v1/doc.go deleted file mode 100644 index b6416df04..000000000 --- a/pkg/apis/mongodb/v1/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package v1 contains API Schema definitions for the mongodb v1 API group -// +k8s:deepcopy-gen=package,register -// +groupName=mongodb.com -package v1 diff --git a/pkg/apis/mongodb/v1/mongodb_types.go b/pkg/apis/mongodb/v1/mongodb_types.go deleted file mode 100644 index f0db540a7..000000000 --- a/pkg/apis/mongodb/v1/mongodb_types.go +++ /dev/null @@ -1,118 +0,0 @@ -package v1 - -import ( - "fmt" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type Type string - -const ( - ReplicaSet Type = "ReplicaSet" -) - -type Phase string - -const ( - Running Phase = "Running" -) - -const ( - // LastVersionAnnotationKey should indicate which version of MongoDB was last - // configured - LastVersionAnnotationKey = "lastVersion" -) - -// MongoDBSpec defines the desired state of MongoDB -type MongoDBSpec struct { - // Members is the number of members in the replica set - // +optional - Members int `json:"members"` - // Type defines which type of MongoDB deployment the resource should create - Type Type `json:"type"` - // Version defines which version of MongoDB will be used - Version string `json:"version"` - - // FeatureCompatibilityVersion configures the feature compatibility version that will - // be set for the deployment - // +optional - FeatureCompatibilityVersion string `json:"featureCompatibilityVersion,omitempty"` -} - -// MongoDBStatus defines the observed state of MongoDB -type MongoDBStatus struct { - MongoURI string `json:"mongoUri"` - Phase Phase `json:"phase"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MongoDB is the Schema for the mongodbs API -// +kubebuilder:subresource:status -// +kubebuilder:resource:path=mongodb,scope=Namespaced,shortName=mdb -type MongoDB struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MongoDBSpec `json:"spec,omitempty"` - Status MongoDBStatus `json:"status,omitempty"` -} - -func (m *MongoDB) UpdateSuccess() { - m.Status.MongoURI = m.MongoURI() - m.Status.Phase = Running -} - -func (m MongoDB) ChangingVersion() bool { - if lastVersion, ok := m.Annotations[LastVersionAnnotationKey]; ok { - return (m.Spec.Version != lastVersion) && lastVersion != "" - } - return false -} - -// MongoURI returns a mongo uri which can be used to connect to this deployment -func (m MongoDB) MongoURI() string { - members := make([]string, m.Spec.Members) - clusterDomain := "svc.cluster.local" // TODO: make this configurable - for i := 0; i < m.Spec.Members; i++ { - members[i] = fmt.Sprintf("%s-%d.%s.%s.%s:%d", m.Name, i, m.ServiceName(), m.Namespace, clusterDomain, 27017) - } - return fmt.Sprintf("mongodb://%s", strings.Join(members, ",")) -} - -// ServiceName returns the name of the Service that should be created for -// this resource -func (m MongoDB) ServiceName() string { - return m.Name + "-svc" -} - -func (m MongoDB) ConfigMapName() string { - return m.Name + "-config" -} - -// GetFCV returns the feature compatibility version. If no FeatureCompatibilityVersion is specified. -// It uses the major and minor version for whichever version of MongoDB is configured. -func (m MongoDB) GetFCV() string { - versionToSplit := m.Spec.FeatureCompatibilityVersion - if versionToSplit == "" { - versionToSplit = m.Spec.Version - } - minorIndex := 1 - parts := strings.Split(versionToSplit, ".") - return strings.Join(parts[:minorIndex+1], ".") -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MongoDBList contains a list of MongoDB -type MongoDBList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []MongoDB `json:"items"` -} - -func init() { - SchemeBuilder.Register(&MongoDB{}, &MongoDBList{}) -} diff --git a/pkg/apis/mongodb/v1/mongodb_types_test.go b/pkg/apis/mongodb/v1/mongodb_types_test.go deleted file mode 100644 index 90805635f..000000000 --- a/pkg/apis/mongodb/v1/mongodb_types_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package v1 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestMongoDB_MongoURI(t *testing.T) { - mdb := newReplicaSet(2, "my-rs", "my-namespace") - assert.Equal(t, mdb.MongoURI(), "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017") - mdb = newReplicaSet(1, "my-single-rs", "my-single-namespace") - assert.Equal(t, mdb.MongoURI(), "mongodb://my-single-rs-0.my-single-rs-svc.my-single-namespace.svc.cluster.local:27017") - mdb = newReplicaSet(5, "my-big-rs", "my-big-namespace") - assert.Equal(t, mdb.MongoURI(), "mongodb://my-big-rs-0.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-1.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-2.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-3.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-4.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017") -} - -func TestGetFCV(t *testing.T) { - mdb := newReplicaSet(3, "my-rs", "my-ns") - mdb.Spec.Version = "4.2.0" - assert.Equal(t, "4.2", mdb.GetFCV()) - - mdb.Spec.FeatureCompatibilityVersion = "4.0" - assert.Equal(t, "4.0", mdb.GetFCV()) - - mdb.Spec.FeatureCompatibilityVersion = "" - assert.Equal(t, "4.2", mdb.GetFCV()) -} - -func newReplicaSet(members int, name, namespace string) MongoDB { - return MongoDB{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: MongoDBSpec{ - Members: members, - }, - } -} diff --git a/pkg/apis/mongodb/v1/register.go b/pkg/apis/mongodb/v1/register.go deleted file mode 100644 index 2efd9821b..000000000 --- a/pkg/apis/mongodb/v1/register.go +++ /dev/null @@ -1,19 +0,0 @@ -// NOTE: Boilerplate only. Ignore this file. - -// Package v1 contains API Schema definitions for the mongodb v1 API group -// +k8s:deepcopy-gen=package,register -// +groupName=mongodb.com -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "mongodb.com", Version: "v1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} -) diff --git a/pkg/authentication/authentication.go b/pkg/authentication/authentication.go new file mode 100644 index 000000000..a856bda66 --- /dev/null +++ b/pkg/authentication/authentication.go @@ -0,0 +1,69 @@ +package authentication + +import ( + "context" + "fmt" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + + "k8s.io/apimachinery/pkg/types" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scram" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/x509" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" +) + +func Enable(ctx context.Context, auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable, agentCertSecret types.NamespacedName) error { + scramEnabled := false + for _, authMode := range mdb.GetAuthOptions().AuthMechanisms { + switch authMode { + case constants.Sha1, constants.Sha256: + if !scramEnabled { + if err := scram.Enable(ctx, auth, secretGetUpdateCreateDeleter, mdb); err != nil { + return fmt.Errorf("could not configure scram authentication: %s", err) + } + scramEnabled = true + } + case constants.X509: + if err := x509.Enable(ctx, auth, secretGetUpdateCreateDeleter, mdb, agentCertSecret); err != nil { + return fmt.Errorf("could not configure x509 authentication: %s", err) + } + } + } + return nil +} + +func AddRemovedUsers(auth *automationconfig.Auth, mdb mdbv1.MongoDBCommunity, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) { + deletedUsers := getRemovedUsersFromSpec(mdb.Spec, lastAppliedSpec) + + auth.UsersDeleted = append(auth.UsersDeleted, deletedUsers...) +} + +func getRemovedUsersFromSpec(currentMDB mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec *mdbv1.MongoDBCommunitySpec) []automationconfig.DeletedUser { + type user struct { + db string + name string + } + m := map[user]bool{} + var deletedUsers []automationconfig.DeletedUser + + for _, mongoDBUser := range currentMDB.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] = true + } + + for _, mongoDBUser := range lastAppliedMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + _, ok := m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] + if !ok { + deletedUsers = append(deletedUsers, automationconfig.DeletedUser{User: mongoDBUser.Name, Dbs: []string{mongoDBUser.DB}}) + } + } + return deletedUsers +} diff --git a/pkg/authentication/authentication_test.go b/pkg/authentication/authentication_test.go new file mode 100644 index 000000000..edfef363d --- /dev/null +++ b/pkg/authentication/authentication_test.go @@ -0,0 +1,249 @@ +package authentication + +import ( + "context" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/mocks" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/x509" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/stretchr/testify/assert" +) + +func TestEnable(t *testing.T) { + ctx := context.Background() + t.Run("SCRAM only", func(t *testing.T) { + auth := automationconfig.Auth{} + user := mocks.BuildScramMongoDBUser("my-user") + mdb := buildConfigurable("mdb", []string{constants.Sha256}, constants.Sha256, user) + passwordSecret := secret.Builder(). + SetName(user.PasswordSecretName). + SetNamespace(mdb.NamespacedName().Namespace). + SetField(user.PasswordSecretKey, "TDg_DESiScDrJV6"). + Build() + secrets := mocks.NewMockedSecretGetUpdateCreateDeleter(passwordSecret) + + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + assert.NoError(t, err) + + assert.Equal(t, false, auth.Disabled) + assert.Equal(t, constants.Sha256, auth.AutoAuthMechanism) + assert.Equal(t, []string{constants.Sha256}, auth.DeploymentAuthMechanisms) + assert.Equal(t, []string{constants.Sha256}, auth.AutoAuthMechanisms) + assert.Len(t, auth.Users, 1) + assert.Equal(t, "my-user", auth.Users[0].Username) + assert.Equal(t, "mms-automation", auth.AutoUser) + }) + t.Run("SCRAM-SHA-256 and SCRAM-SHA-1", func(t *testing.T) { + auth := automationconfig.Auth{} + user := mocks.BuildScramMongoDBUser("my-user") + mdb := buildConfigurable("mdb", []string{constants.Sha256, constants.Sha1}, constants.Sha256, user) + passwordSecret := secret.Builder(). + SetName(user.PasswordSecretName). + SetNamespace(mdb.NamespacedName().Namespace). + SetField(user.PasswordSecretKey, "TDg_DESiScDrJV6"). + Build() + secrets := mocks.NewMockedSecretGetUpdateCreateDeleter(passwordSecret) + + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + assert.NoError(t, err) + + assert.Equal(t, false, auth.Disabled) + assert.Equal(t, constants.Sha256, auth.AutoAuthMechanism) + assert.Equal(t, []string{constants.Sha256, constants.Sha1}, auth.DeploymentAuthMechanisms) + assert.Equal(t, []string{constants.Sha256, constants.Sha1}, auth.AutoAuthMechanisms) + assert.Len(t, auth.Users, 1) + assert.Equal(t, "my-user", auth.Users[0].Username) + assert.Equal(t, "mms-automation", auth.AutoUser) + }) + t.Run("X509 only", func(t *testing.T) { + auth := automationconfig.Auth{} + user := mocks.BuildX509MongoDBUser("my-user") + mdb := buildConfigurable("mdb", []string{constants.X509}, constants.X509, user) + agentSecret := x509.CreateAgentCertificateSecret("tls.crt", false, mdb.AgentCertificateSecretNamespacedName()) + secrets := mocks.NewMockedSecretGetUpdateCreateDeleter(agentSecret) + + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + assert.NoError(t, err) + + assert.Equal(t, false, auth.Disabled) + assert.Equal(t, constants.X509, auth.AutoAuthMechanism) + assert.Equal(t, []string{constants.X509}, auth.DeploymentAuthMechanisms) + assert.Equal(t, []string{constants.X509}, auth.AutoAuthMechanisms) + assert.Len(t, auth.Users, 1) + assert.Equal(t, "CN=my-user,OU=organizationalunit,O=organization", auth.Users[0].Username) + assert.Equal(t, "CN=mms-automation-agent,OU=ENG,O=MongoDB,C=US", auth.AutoUser) + }) + t.Run("SCRAM and X509 with SCRAM agent", func(t *testing.T) { + auth := automationconfig.Auth{} + userScram := mocks.BuildScramMongoDBUser("my-user") + userX509 := mocks.BuildX509MongoDBUser("my-user") + mdb := buildConfigurable("mdb", []string{constants.Sha256, constants.X509}, constants.Sha256, userScram, userX509) + passwordSecret := secret.Builder(). + SetName(userScram.PasswordSecretName). + SetNamespace(mdb.NamespacedName().Namespace). + SetField(userScram.PasswordSecretKey, "TDg_DESiScDrJV6"). + Build() + secrets := mocks.NewMockedSecretGetUpdateCreateDeleter(passwordSecret) + + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + assert.NoError(t, err) + + assert.Equal(t, false, auth.Disabled) + assert.Equal(t, constants.Sha256, auth.AutoAuthMechanism) + assert.Equal(t, []string{constants.Sha256, constants.X509}, auth.DeploymentAuthMechanisms) + assert.Equal(t, []string{constants.Sha256}, auth.AutoAuthMechanisms) + assert.Len(t, auth.Users, 2) + assert.Equal(t, "my-user", auth.Users[0].Username) + assert.Equal(t, "CN=my-user,OU=organizationalunit,O=organization", auth.Users[1].Username) + assert.Equal(t, "mms-automation", auth.AutoUser) + }) + t.Run("SCRAM and X509 with X509 agent", func(t *testing.T) { + auth := automationconfig.Auth{} + userScram := mocks.BuildScramMongoDBUser("my-user") + userX509 := mocks.BuildX509MongoDBUser("my-user") + mdb := buildConfigurable("mdb", []string{constants.Sha256, constants.X509}, constants.X509, userScram, userX509) + passwordSecret := secret.Builder(). + SetName(userScram.PasswordSecretName). + SetNamespace(mdb.NamespacedName().Namespace). + SetField(userScram.PasswordSecretKey, "TDg_DESiScDrJV6"). + Build() + agentSecret := x509.CreateAgentCertificateSecret("tls.crt", false, mdb.AgentCertificateSecretNamespacedName()) + secrets := mocks.NewMockedSecretGetUpdateCreateDeleter(passwordSecret, agentSecret) + + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + assert.NoError(t, err) + + assert.Equal(t, false, auth.Disabled) + assert.Equal(t, constants.X509, auth.AutoAuthMechanism) + assert.Equal(t, []string{constants.Sha256, constants.X509}, auth.DeploymentAuthMechanisms) + assert.Equal(t, []string{constants.X509}, auth.AutoAuthMechanisms) + assert.Len(t, auth.Users, 2) + assert.Equal(t, "my-user", auth.Users[0].Username) + assert.Equal(t, "CN=my-user,OU=organizationalunit,O=organization", auth.Users[1].Username) + assert.Equal(t, "CN=mms-automation-agent,OU=ENG,O=MongoDB,C=US", auth.AutoUser) + }) + +} + +func TestGetDeletedUsers(t *testing.T) { + lastAppliedSpec := mdbv1.MongoDBCommunitySpec{ + Members: 3, + Type: "ReplicaSet", + Version: "7.0.2", + Arbiters: 0, + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + }, + Users: []mdbv1.MongoDBUser{ + { + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + DB: "admin", + }, + }, + } + + t.Run("no change same resource", func(t *testing.T) { + actual := getRemovedUsersFromSpec(lastAppliedSpec, &lastAppliedSpec) + + var expected []automationconfig.DeletedUser + assert.Equal(t, expected, actual) + }) + + t.Run("new user", func(t *testing.T) { + current := mdbv1.MongoDBCommunitySpec{ + Members: 3, + Type: "ReplicaSet", + Version: "7.0.2", + Arbiters: 0, + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + }, + Users: []mdbv1.MongoDBUser{ + { + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + DB: "admin", + }, + { + Name: "newUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "new-password-secret-name", + }, + ConnectionStringSecretName: "new-connection-string-secret", + DB: "admin", + }, + }, + } + + var expected []automationconfig.DeletedUser + actual := getRemovedUsersFromSpec(current, &lastAppliedSpec) + + assert.Equal(t, expected, actual) + }) + + t.Run("removed one user", func(t *testing.T) { + current := mdbv1.MongoDBCommunitySpec{ + Members: 3, + Type: "ReplicaSet", + Version: "7.0.2", + Arbiters: 0, + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + }, + Users: []mdbv1.MongoDBUser{}, + } + + expected := []automationconfig.DeletedUser{ + { + User: "testUser", + Dbs: []string{"admin"}, + }, + } + actual := getRemovedUsersFromSpec(current, &lastAppliedSpec) + + assert.Equal(t, expected, actual) + }) +} + +func buildConfigurable(name string, auth []string, agent string, users ...authtypes.User) mocks.MockConfigurable { + return mocks.NewMockConfigurable( + authtypes.Options{ + AuthoritativeSet: false, + KeyFile: "/path/to/keyfile", + AuthMechanisms: auth, + AgentName: constants.AgentName, + AutoAuthMechanism: agent, + }, + users, + types.NamespacedName{ + Name: name, + Namespace: "default", + }, + []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: "mdbc", + Name: "my-ref", + }}, + ) +} diff --git a/pkg/authentication/authtypes/authtypes.go b/pkg/authentication/authtypes/authtypes.go new file mode 100644 index 000000000..12d7b0cbb --- /dev/null +++ b/pkg/authentication/authtypes/authtypes.go @@ -0,0 +1,110 @@ +package authtypes + +import ( + "fmt" + "net/url" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains" +) + +// Options contains a set of values that can be used for more fine-grained configuration of authentication. +type Options struct { + // AuthoritativeSet indicates whether the agents will remove users not defined in the AutomationConfig. + AuthoritativeSet bool + + // KeyFile is the path on disk to the keyfile that will be used for the deployment. + KeyFile string + + // AuthMechanisms is a list of valid authentication mechanisms deployments/agents can use. + AuthMechanisms []string + + // AgentName is username that the Automation Agent will have. + AgentName string + + // AutoAuthMechanism is the desired authentication mechanism that the agents will use. + AutoAuthMechanism string +} + +func (o *Options) IsSha256Present() bool { + return contains.String(o.AuthMechanisms, constants.Sha256) +} + +// Role is a struct which will map to automationconfig.Role. +type Role struct { + // Name is the name of the role. + Name string + + // Database is the database this role applies to. + Database string +} + +// User is a struct which holds all the values required to create an AutomationConfig user +// and references to the credentials for that specific user. +type User struct { + // Username is the username of the user. + Username string + + // Database is the database this user will be created in. + Database string + + // Roles is a slice of roles that this user should have. + Roles []Role + + // PasswordSecretKey is the key which maps to the value of the user's password. + PasswordSecretKey string + + // PasswordSecretName is the name of the secret which stores this user's password. + PasswordSecretName string + + // ScramCredentialsSecretName returns the name of the secret which stores the generated credentials + // for this user. These credentials will be generated if they do not exist, or used if they do. + // Note: there will be one secret with credentials per user created. + ScramCredentialsSecretName string + + // ConnectionStringSecretName is the name of the secret object created by the operator + // which exposes the connection strings for the user. + // Note: there will be one secret with connection strings per user created. + ConnectionStringSecretName string + + // ConnectionStringSecretNamespace is the namespace of the secret object created by the operator which exposes the connection strings for the user. + ConnectionStringSecretNamespace string `json:"connectionStringSecretNamespace,omitempty"` + + // ConnectionStringOptions contains connection string options for this user + // These options will be appended at the end of the connection string and + // will override any existing options from the resources. + ConnectionStringOptions map[string]interface{} +} + +func (u User) GetLoginString(password string) string { + if u.Database != constants.ExternalDB { + return fmt.Sprintf("%s:%s@", + url.QueryEscape(u.Username), + url.QueryEscape(password)) + } + return "" +} + +// Configurable is an interface which any resource which can configure ScramSha authentication should implement. +type Configurable interface { + // GetAuthOptions returns a set of Options which can be used for fine-grained configuration. + GetAuthOptions() Options + + // GetAuthUsers returns a list of users which will be mapped to users in the AutomationConfig. + GetAuthUsers() []User + + // GetAgentPasswordSecretNamespacedName returns the NamespacedName of the secret which stores the generated password for the agent. + GetAgentPasswordSecretNamespacedName() types.NamespacedName + + // GetAgentKeyfileSecretNamespacedName returns the NamespacedName of the secret which stores the keyfile for the agent. + GetAgentKeyfileSecretNamespacedName() types.NamespacedName + + // NamespacedName returns the NamespacedName for the resource that is being configured. + NamespacedName() types.NamespacedName + + // GetOwnerReferences returns the OwnerReferences pointing to the current resource. + GetOwnerReferences() []v1.OwnerReference +} diff --git a/pkg/authentication/mocks/mocks.go b/pkg/authentication/mocks/mocks.go new file mode 100644 index 000000000..627a105be --- /dev/null +++ b/pkg/authentication/mocks/mocks.go @@ -0,0 +1,140 @@ +package mocks + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" +) + +type MockSecretGetUpdateCreateDeleter struct { + secrets map[client.ObjectKey]corev1.Secret +} + +func NewMockedSecretGetUpdateCreateDeleter(secrets ...corev1.Secret) secret.GetUpdateCreateDeleter { + mockSecretGetUpdateCreateDeleter := MockSecretGetUpdateCreateDeleter{} + mockSecretGetUpdateCreateDeleter.secrets = make(map[client.ObjectKey]corev1.Secret) + for _, s := range secrets { + mockSecretGetUpdateCreateDeleter.secrets[types.NamespacedName{Name: s.Name, Namespace: s.Namespace}] = s + } + return mockSecretGetUpdateCreateDeleter +} + +func (c MockSecretGetUpdateCreateDeleter) DeleteSecret(_ context.Context, key client.ObjectKey) error { + delete(c.secrets, key) + return nil +} + +func (c MockSecretGetUpdateCreateDeleter) UpdateSecret(_ context.Context, s corev1.Secret) error { + c.secrets[types.NamespacedName{Name: s.Name, Namespace: s.Namespace}] = s + return nil +} + +func (c MockSecretGetUpdateCreateDeleter) CreateSecret(ctx context.Context, secret corev1.Secret) error { + return c.UpdateSecret(ctx, secret) +} + +func (c MockSecretGetUpdateCreateDeleter) GetSecret(_ context.Context, objectKey client.ObjectKey) (corev1.Secret, error) { + if s, ok := c.secrets[objectKey]; !ok { + return corev1.Secret{}, &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}} + } else { + return s, nil + } +} + +type MockConfigurable struct { + opts authtypes.Options + users []authtypes.User + nsName types.NamespacedName + refs []metav1.OwnerReference +} + +func NewMockConfigurable(opts authtypes.Options, users []authtypes.User, nsName types.NamespacedName, refs []metav1.OwnerReference) MockConfigurable { + return MockConfigurable{opts: opts, users: users, nsName: nsName, refs: refs} +} + +func (m MockConfigurable) AgentCertificateSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{ + Namespace: m.nsName.Namespace, + Name: "agent-certs", + } +} + +func (m MockConfigurable) GetAgentPasswordSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.nsName.Name + "-agent-password", Namespace: m.nsName.Namespace} +} + +func (m MockConfigurable) GetAgentKeyfileSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.nsName.Name + "-keyfile", Namespace: m.nsName.Namespace} +} + +func (m MockConfigurable) GetAuthOptions() authtypes.Options { + return m.opts +} + +func (m MockConfigurable) GetAuthUsers() []authtypes.User { + return m.users +} + +func (m MockConfigurable) NamespacedName() types.NamespacedName { + return m.nsName +} + +func (m MockConfigurable) GetOwnerReferences() []metav1.OwnerReference { + return m.refs +} + +func BuildX509MongoDBUser(name string) authtypes.User { + return authtypes.User{ + Username: fmt.Sprintf("CN=%s,OU=organizationalunit,O=organization", name), + Database: "$external", + Roles: []authtypes.Role{ + { + Database: "admin", + Name: "readWrite", + }, + { + Database: "admin", + Name: "clusterAdmin", + }, + }, + } + +} + +func BuildScramMongoDBUser(name string) authtypes.User { + return authtypes.User{ + Username: name, + Database: "admin", + Roles: []authtypes.Role{ + { + Database: "testing", + Name: "readWrite", + }, + { + Database: "testing", + Name: "clusterAdmin", + }, + // admin roles for reading FCV + { + Database: "admin", + Name: "readWrite", + }, + { + Database: "admin", + Name: "clusterAdmin", + }, + }, + PasswordSecretKey: fmt.Sprintf("%s-password", name), + PasswordSecretName: fmt.Sprintf("%s-password-secret", name), + ScramCredentialsSecretName: fmt.Sprintf("%s-scram", name), + } + +} diff --git a/pkg/authentication/scram/scram.go b/pkg/authentication/scram/scram.go new file mode 100644 index 000000000..c21e185f1 --- /dev/null +++ b/pkg/authentication/scram/scram.go @@ -0,0 +1,279 @@ +package scram + +import ( + "context" + "encoding/base64" + "fmt" + + "go.uber.org/zap" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scramcredentials" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/generate" +) + +const ( + sha1SaltKey = "sha1-salt" + sha256SaltKey = "sha256-salt" + + sha1ServerKeyKey = "sha-1-server-key" + sha256ServerKeyKey = "sha-256-server-key" + + sha1StoredKeyKey = "sha-1-stored-key" + sha256StoredKeyKey = "sha-256-stored-key" +) + +// Enable will configure all of the required Kubernetes resources for SCRAM-SHA to be enabled. +// The agent password and keyfile contents will be configured and stored in a secret. +// the user credentials will be generated if not present, or existing credentials will be read. +func Enable(ctx context.Context, auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable) error { + opts := mdb.GetAuthOptions() + + desiredUsers, err := convertMongoDBResourceUsersToAutomationConfigUsers(ctx, secretGetUpdateCreateDeleter, mdb) + if err != nil { + return fmt.Errorf("could not convert users to Automation Config users: %s", err) + } + + if opts.AutoAuthMechanism == constants.Sha256 || opts.AutoAuthMechanism == constants.Sha1 { + if err := ensureAgent(ctx, auth, secretGetUpdateCreateDeleter, mdb); err != nil { + return err + } + } + + return enableClientAuthentication(auth, opts, desiredUsers) +} + +func ensureAgent(ctx context.Context, auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable) error { + generatedPassword, err := generate.RandomFixedLengthStringOfSize(20) + if err != nil { + return fmt.Errorf("could not generate password: %s", err) + } + + generatedContents, err := generate.KeyFileContents() + if err != nil { + return fmt.Errorf("could not generate keyfile contents: %s", err) + } + + // ensure that the agent password secret exists or read existing password. + agentPassword, err := secret.EnsureSecretWithKey(ctx, secretGetUpdateCreateDeleter, mdb.GetAgentPasswordSecretNamespacedName(), mdb.GetOwnerReferences(), constants.AgentPasswordKey, generatedPassword) + if err != nil { + return err + } + + // ensure that the agent keyfile secret exists or read existing keyfile. + agentKeyFile, err := secret.EnsureSecretWithKey(ctx, secretGetUpdateCreateDeleter, mdb.GetAgentKeyfileSecretNamespacedName(), mdb.GetOwnerReferences(), constants.AgentKeyfileKey, generatedContents) + if err != nil { + return err + } + + return enableAgentAuthentication(auth, agentPassword, agentKeyFile, mdb.GetAuthOptions()) +} + +// ensureScramCredentials will ensure that the ScramSha1 & ScramSha256 credentials exist and are stored in the credentials +// secret corresponding to user of the given MongoDB deployment. +func ensureScramCredentials(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, user authtypes.User, mdbNamespacedName types.NamespacedName, ownerRef []metav1.OwnerReference) (scramcredentials.ScramCreds, scramcredentials.ScramCreds, error) { + + password, err := secret.ReadKey(ctx, getUpdateCreator, user.PasswordSecretKey, types.NamespacedName{Name: user.PasswordSecretName, Namespace: mdbNamespacedName.Namespace}) + if err != nil { + // if the password is deleted, that's fine we can read from the stored credentials that were previously generated + if secret.SecretNotExist(err) { + zap.S().Debugf("password secret was not found, reading from credentials from secret/%s", user.ScramCredentialsSecretName) + return readExistingCredentials(ctx, getUpdateCreator, mdbNamespacedName, user.ScramCredentialsSecretName) + } + return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, fmt.Errorf("could not read secret key: %s", err) + } + + // we should only need to generate new credentials in two situations. + // 1. We are creating the credentials for the first time + // 2. We are changing the password + shouldGenerateNewCredentials, err := needToGenerateNewCredentials(ctx, getUpdateCreator, user.Username, user.ScramCredentialsSecretName, mdbNamespacedName, password) + if err != nil { + return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, fmt.Errorf("could not determine if new credentials need to be generated: %s", err) + } + + // there are no changes required, we can re-use the same credentials. + if !shouldGenerateNewCredentials { + zap.S().Debugf("Credentials have not changed, using credentials stored in: secret/%s", user.ScramCredentialsSecretName) + return readExistingCredentials(ctx, getUpdateCreator, mdbNamespacedName, user.ScramCredentialsSecretName) + } + + // the password has changed, or we are generating it for the first time + zap.S().Debugf("Generating new credentials and storing in secret/%s", user.ScramCredentialsSecretName) + sha1Creds, sha256Creds, err := generateScramShaCredentials(user.Username, password) + if err != nil { + return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, fmt.Errorf("failed generating scram credentials: %s", err) + } + + // create or update our credentials secret for this user + if err := createScramCredentialsSecret(ctx, getUpdateCreator, mdbNamespacedName, ownerRef, user.ScramCredentialsSecretName, sha1Creds, sha256Creds); err != nil { + return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, fmt.Errorf("faild to create scram credentials secret %s: %s", user.ScramCredentialsSecretName, err) + } + + zap.S().Debugf("Successfully generated SCRAM credentials") + return sha1Creds, sha256Creds, nil +} + +// needToGenerateNewCredentials determines if it is required to generate new credentials or not. +// this will be the case if we are either changing password, or are generating credentials for the first time. +func needToGenerateNewCredentials(ctx context.Context, secretGetter secret.Getter, username, scramCredentialsSecretName string, mdbNamespacedName types.NamespacedName, password string) (bool, error) { + s, err := secretGetter.GetSecret(ctx, types.NamespacedName{Name: scramCredentialsSecretName, Namespace: mdbNamespacedName.Namespace}) + if err != nil { + // haven't generated credentials yet, so we are changing password + if secret.SecretNotExist(err) { + zap.S().Debugf("No existing credentials found, generating new credentials") + return true, nil + } + return false, err + } + + existingSha1Salt := s.Data[sha1SaltKey] + existingSha256Salt := s.Data[sha256SaltKey] + + // the salts are stored encoded, we need to decode them before we use them for + // salt generation + decodedSha1Salt, err := base64.StdEncoding.DecodeString(string(existingSha1Salt)) + if err != nil { + return false, err + } + decodedSha256Salt, err := base64.StdEncoding.DecodeString(string(existingSha256Salt)) + if err != nil { + return false, err + } + + // regenerate credentials using the existing salts in order to see if the password has changed. + sha1Creds, sha256Creds, err := computeScramShaCredentials(username, password, decodedSha1Salt, decodedSha256Salt) + if err != nil { + return false, err + } + + existingSha1Creds, existingSha256Creds, err := readExistingCredentials(ctx, secretGetter, mdbNamespacedName, scramCredentialsSecretName) + if err != nil { + return false, err + } + + sha1CredsAreDifferent := sha1Creds != existingSha1Creds + sha256CredsAreDifferent := sha256Creds != existingSha256Creds + + return sha1CredsAreDifferent || sha256CredsAreDifferent, nil +} + +// generateScramShaCredentials creates a new set of credentials using randomly generated salts. The first returned element is +// sha1 credentials, the second is sha256 credentials +func generateScramShaCredentials(username string, password string) (scramcredentials.ScramCreds, scramcredentials.ScramCreds, error) { + sha1Salt, sha256Salt, err := generate.Salts() + if err != nil { + return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, err + } + + sha1Creds, sha256Creds, err := computeScramShaCredentials(username, password, sha1Salt, sha256Salt) + if err != nil { + return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, err + } + return sha1Creds, sha256Creds, nil +} + +// computeScramShaCredentials computes ScramSha 1 & 256 credentials using the provided salts +func computeScramShaCredentials(username, password string, sha1Salt, sha256Salt []byte) (scramcredentials.ScramCreds, scramcredentials.ScramCreds, error) { + scram1Creds, err := scramcredentials.ComputeScramSha1Creds(username, password, sha1Salt) + if err != nil { + return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, fmt.Errorf("could not generate scramSha1Creds: %s", err) + } + + scram256Creds, err := scramcredentials.ComputeScramSha256Creds(password, sha256Salt) + if err != nil { + return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, fmt.Errorf("could not generate scramSha256Creds: %s", err) + } + + return scram1Creds, scram256Creds, nil +} + +// createScramCredentialsSecret will create a Secret that contains all of the fields required to read these credentials +// back in the future. +func createScramCredentialsSecret(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdbObjectKey types.NamespacedName, ref []metav1.OwnerReference, scramCredentialsSecretName string, sha1Creds, sha256Creds scramcredentials.ScramCreds) error { + scramCredsSecret := secret.Builder(). + SetName(scramCredentialsSecretName). + SetNamespace(mdbObjectKey.Namespace). + SetField(sha1SaltKey, sha1Creds.Salt). + SetField(sha1StoredKeyKey, sha1Creds.StoredKey). + SetField(sha1ServerKeyKey, sha1Creds.ServerKey). + SetField(sha256SaltKey, sha256Creds.Salt). + SetField(sha256StoredKeyKey, sha256Creds.StoredKey). + SetField(sha256ServerKeyKey, sha256Creds.ServerKey). + SetOwnerReferences(ref). + Build() + return secret.CreateOrUpdate(ctx, getUpdateCreator, scramCredsSecret) +} + +// readExistingCredentials reads the existing set of credentials for both ScramSha 1 & 256 +func readExistingCredentials(ctx context.Context, secretGetter secret.Getter, mdbObjectKey types.NamespacedName, scramCredentialsSecretName string) (scramcredentials.ScramCreds, scramcredentials.ScramCreds, error) { + credentialsSecret, err := secretGetter.GetSecret(ctx, types.NamespacedName{Name: scramCredentialsSecretName, Namespace: mdbObjectKey.Namespace}) + if err != nil { + return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, fmt.Errorf("could not get secret %s/%s: %s", mdbObjectKey.Namespace, scramCredentialsSecretName, err) + } + + // we should really never hit this situation. It would only be possible if the secret storing credentials is manually edited. + if !secret.HasAllKeys(credentialsSecret, sha1SaltKey, sha1ServerKeyKey, sha1ServerKeyKey, sha256SaltKey, sha256ServerKeyKey, sha256StoredKeyKey) { + return scramcredentials.ScramCreds{}, scramcredentials.ScramCreds{}, fmt.Errorf("credentials secret did not have all of the required keys") + } + + scramSha1Creds := scramcredentials.ScramCreds{ + IterationCount: scramcredentials.DefaultScramSha1Iterations, + Salt: string(credentialsSecret.Data[sha1SaltKey]), + ServerKey: string(credentialsSecret.Data[sha1ServerKeyKey]), + StoredKey: string(credentialsSecret.Data[sha1StoredKeyKey]), + } + + scramSha256Creds := scramcredentials.ScramCreds{ + IterationCount: scramcredentials.DefaultScramSha256Iterations, + Salt: string(credentialsSecret.Data[sha256SaltKey]), + ServerKey: string(credentialsSecret.Data[sha256ServerKeyKey]), + StoredKey: string(credentialsSecret.Data[sha256StoredKeyKey]), + } + + return scramSha1Creds, scramSha256Creds, nil +} + +// convertMongoDBResourceUsersToAutomationConfigUsers returns a list of users that are able to be set in the AutomationConfig +func convertMongoDBResourceUsersToAutomationConfigUsers(ctx context.Context, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable) ([]automationconfig.MongoDBUser, error) { + var usersWanted []automationconfig.MongoDBUser + for _, u := range mdb.GetAuthUsers() { + if u.Database != constants.ExternalDB { + acUser, err := convertMongoDBUserToAutomationConfigUser(ctx, secretGetUpdateCreateDeleter, mdb.NamespacedName(), mdb.GetOwnerReferences(), u) + if err != nil { + return nil, fmt.Errorf("failed to convert scram user %s to Automation Config user: %s", u.Username, err) + } + usersWanted = append(usersWanted, acUser) + } + } + return usersWanted, nil +} + +// convertMongoDBUserToAutomationConfigUser converts a single user configured in the MongoDB resource and converts it to a user +// that can be added directly to the AutomationConfig. +func convertMongoDBUserToAutomationConfigUser(ctx context.Context, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdbNsName types.NamespacedName, ownerRef []metav1.OwnerReference, user authtypes.User) (automationconfig.MongoDBUser, error) { + acUser := automationconfig.MongoDBUser{ + Username: user.Username, + Database: user.Database, + } + for _, role := range user.Roles { + acUser.Roles = append(acUser.Roles, automationconfig.Role{ + Role: role.Name, + Database: role.Database, + }) + } + sha1Creds, sha256Creds, err := ensureScramCredentials(ctx, secretGetUpdateCreateDeleter, user, mdbNsName, ownerRef) + if err != nil { + return automationconfig.MongoDBUser{}, fmt.Errorf("could not ensure scram credentials: %s", err) + } + acUser.AuthenticationRestrictions = []string{} + acUser.Mechanisms = []string{} + acUser.ScramSha1Creds = &sha1Creds + acUser.ScramSha256Creds = &sha256Creds + return acUser, nil +} diff --git a/pkg/authentication/scram/scram_enabler.go b/pkg/authentication/scram/scram_enabler.go new file mode 100644 index 000000000..d02b03066 --- /dev/null +++ b/pkg/authentication/scram/scram_enabler.go @@ -0,0 +1,91 @@ +package scram + +import ( + "errors" + + "github.com/hashicorp/go-multierror" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains" +) + +// enableAgentAuthentication updates the provided auth struct and configures scram authentication based on the provided +// values and configuration options. +func enableAgentAuthentication(auth *automationconfig.Auth, agentPassword, agentKeyFileContents string, opts authtypes.Options) error { + if err := validateAgentOptions(opts); err != nil { + return err + } + + auth.Disabled = false + auth.AuthoritativeSet = opts.AuthoritativeSet + auth.KeyFile = opts.KeyFile + + // windows file is specified to pass validation, this will never be used + auth.KeyFileWindows = constants.AutomationAgentWindowsKeyFilePath + + auth.AutoAuthMechanisms = make([]string, 0) + if contains.Sha256(opts.AuthMechanisms) { + auth.AutoAuthMechanisms = append(auth.AutoAuthMechanisms, constants.Sha256) + } + if contains.Sha1(opts.AuthMechanisms) { + auth.AutoAuthMechanisms = append(auth.AutoAuthMechanisms, constants.Sha1) + } + + // the username of the MongoDB Agent + auth.AutoUser = opts.AgentName + + // the mechanism used by the Agent + auth.AutoAuthMechanism = opts.AutoAuthMechanism + + // the password for the Agent user + auth.AutoPwd = agentPassword + + // the contents the keyfile should have, this file is owned and managed + // by the agent + auth.Key = agentKeyFileContents + + return nil +} + +func enableClientAuthentication(auth *automationconfig.Auth, opts authtypes.Options, users []automationconfig.MongoDBUser) error { + if err := validateClientOptions(opts); err != nil { + return err + } + + if !contains.Sha256(auth.DeploymentAuthMechanisms) && contains.Sha256(opts.AuthMechanisms) { + auth.DeploymentAuthMechanisms = append(auth.DeploymentAuthMechanisms, constants.Sha256) + } + if !contains.Sha1(auth.DeploymentAuthMechanisms) && contains.Sha1(opts.AuthMechanisms) { + auth.DeploymentAuthMechanisms = append(auth.DeploymentAuthMechanisms, constants.Sha1) + } + + auth.Users = append(auth.Users, users...) + return nil +} + +// validateAgentOptions validates that all the agent required fields have +// a non-empty value. +func validateAgentOptions(opts authtypes.Options) error { + var errs error + if opts.AutoAuthMechanism == "" { + errs = multierror.Append(errs, errors.New("AutoAuthMechanism must not be empty")) + } + if opts.AgentName == "" { + errs = multierror.Append(errs, errors.New("AgentName must be specified")) + } + if opts.KeyFile == "" { + errs = multierror.Append(errs, errors.New("KeyFile must be specified")) + } + return errs +} + +// validateClientOptions validates that all the deployment required fields have +// a non-empty value. +func validateClientOptions(opts authtypes.Options) error { + var errs error + if len(opts.AuthMechanisms) == 0 { + errs = multierror.Append(errs, errors.New("at least one AuthMechanism must be specified")) + } + return errs +} diff --git a/pkg/authentication/scram/scram_enabler_test.go b/pkg/authentication/scram/scram_enabler_test.go new file mode 100644 index 000000000..1930f7aed --- /dev/null +++ b/pkg/authentication/scram/scram_enabler_test.go @@ -0,0 +1,110 @@ +package scram + +import ( + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/stretchr/testify/assert" +) + +func TestScramAutomationConfig(t *testing.T) { + + // Case 1: Both SHA-256 and SHA-1 + auth := automationconfig.Auth{} + opts := authtypes.Options{ + AuthoritativeSet: false, + KeyFile: constants.AutomationAgentKeyFilePathInContainer, + AuthMechanisms: []string{constants.Sha256, constants.Sha1}, + AgentName: "mms-automation", + AutoAuthMechanism: constants.Sha256, + } + err := configureInAutomationConfig(&auth, "password", "keyfilecontents", []automationconfig.MongoDBUser{}, opts) + assert.NoError(t, err) + + t.Run("Authentication is correctly configured", func(t *testing.T) { + assert.Equal(t, constants.AgentName, auth.AutoUser) + assert.Equal(t, "keyfilecontents", auth.Key) + assert.Equal(t, "password", auth.AutoPwd) + assert.Equal(t, constants.Sha256, auth.AutoAuthMechanism) + assert.Len(t, auth.DeploymentAuthMechanisms, 2) + assert.Len(t, auth.AutoAuthMechanisms, 2) + assert.Equal(t, []string{constants.Sha256, constants.Sha1}, auth.DeploymentAuthMechanisms) + assert.Equal(t, []string{constants.Sha256, constants.Sha1}, auth.AutoAuthMechanisms) + assert.Equal(t, constants.AutomationAgentKeyFilePathInContainer, auth.KeyFile) + assert.Equal(t, constants.AutomationAgentWindowsKeyFilePath, auth.KeyFileWindows) + }) + t.Run("Subsequent configuration doesn't add to deployment auth mechanisms", func(t *testing.T) { + err := configureInAutomationConfig(&auth, "password", "keyfilecontents", []automationconfig.MongoDBUser{}, opts) + assert.NoError(t, err) + assert.Equal(t, []string{constants.Sha256, constants.Sha1}, auth.DeploymentAuthMechanisms) + }) + + // Case 2: only SHA-256 + auth = automationconfig.Auth{} + opts = authtypes.Options{ + AuthoritativeSet: false, + KeyFile: constants.AutomationAgentKeyFilePathInContainer, + AuthMechanisms: []string{constants.Sha256}, + AgentName: "mms-automation", + AutoAuthMechanism: constants.Sha256, + } + err = configureInAutomationConfig(&auth, "password", "keyfilecontents", []automationconfig.MongoDBUser{}, opts) + assert.NoError(t, err) + + t.Run("Authentication is correctly configured", func(t *testing.T) { + assert.Equal(t, constants.Sha256, auth.AutoAuthMechanism) + assert.Len(t, auth.DeploymentAuthMechanisms, 1) + assert.Len(t, auth.AutoAuthMechanisms, 1) + assert.Equal(t, []string{constants.Sha256}, auth.DeploymentAuthMechanisms) + assert.Equal(t, []string{constants.Sha256}, auth.AutoAuthMechanisms) + assert.Equal(t, constants.AutomationAgentKeyFilePathInContainer, auth.KeyFile) + assert.Equal(t, constants.AutomationAgentWindowsKeyFilePath, auth.KeyFileWindows) + }) + t.Run("Subsequent configuration doesn't add to deployment auth mechanisms", func(t *testing.T) { + err := configureInAutomationConfig(&auth, "password", "keyfilecontents", []automationconfig.MongoDBUser{}, opts) + assert.NoError(t, err) + assert.Equal(t, []string{constants.Sha256}, auth.DeploymentAuthMechanisms) + }) + + // Case 1: only SHA-1 + auth = automationconfig.Auth{} + opts = authtypes.Options{ + AuthoritativeSet: false, + KeyFile: constants.AutomationAgentKeyFilePathInContainer, + AuthMechanisms: []string{constants.Sha1}, + AgentName: "mms-automation", + AutoAuthMechanism: constants.Sha1, + } + err = configureInAutomationConfig(&auth, "password", "keyfilecontents", []automationconfig.MongoDBUser{}, opts) + assert.NoError(t, err) + + t.Run("Authentication is correctly configured", func(t *testing.T) { + assert.Equal(t, constants.Sha1, auth.AutoAuthMechanism) + assert.Len(t, auth.DeploymentAuthMechanisms, 1) + assert.Len(t, auth.AutoAuthMechanisms, 1) + assert.Equal(t, []string{constants.Sha1}, auth.DeploymentAuthMechanisms) + assert.Equal(t, []string{constants.Sha1}, auth.AutoAuthMechanisms) + assert.Equal(t, constants.AutomationAgentKeyFilePathInContainer, auth.KeyFile) + assert.Equal(t, constants.AutomationAgentWindowsKeyFilePath, auth.KeyFileWindows) + }) + t.Run("Subsequent configuration doesn't add to deployment auth mechanisms", func(t *testing.T) { + err := configureInAutomationConfig(&auth, "password", "keyfilecontents", []automationconfig.MongoDBUser{}, opts) + assert.NoError(t, err) + assert.Equal(t, []string{constants.Sha1}, auth.DeploymentAuthMechanisms) + }) +} + +// configureInAutomationConfig updates the provided auth struct and fully configures Scram authentication. +func configureInAutomationConfig(auth *automationconfig.Auth, agentPassword, agentKeyFile string, users []automationconfig.MongoDBUser, opts authtypes.Options) error { + err := enableAgentAuthentication(auth, agentPassword, agentKeyFile, opts) + if err != nil { + return err + } + err = enableClientAuthentication(auth, opts, users) + if err != nil { + return err + } + return nil +} diff --git a/pkg/authentication/scram/scram_test.go b/pkg/authentication/scram/scram_test.go new file mode 100644 index 000000000..dd43ffc9c --- /dev/null +++ b/pkg/authentication/scram/scram_test.go @@ -0,0 +1,320 @@ +package scram + +import ( + "context" + "os" + "reflect" + "testing" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/mocks" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scramcredentials" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/generate" + "github.com/stretchr/testify/assert" +) + +func init() { + logger, err := zap.NewDevelopment() + if err != nil { + os.Exit(1) + } + zap.ReplaceGlobals(logger) +} + +const ( + testSha1Salt = "zEt5uDSnr/l9paFPsQzhAA==" + testSha1ServerKey = "LEm/fv4gM0Y/XizbUoz/hULRnX0=" + testSha1StoredKey = "0HzXK7NtK40HXVn6zOqrNKVl+MY=" + + testSha256Salt = "qRr+7VgicfVcFjwZhu8u5JSE5ZeVBUP1A+lM4A==" + testSha256ServerKey = "C9FIUhP6mqwe/2SJIheGBpOIqlxuq9Nh3fs+t+R/3zk=" + testSha256StoredKey = "7M7dUSY0sHTOXdNnoPSVbXg9Flon1b3t8MINGI8Tst0=" +) + +func TestReadExistingCredentials(t *testing.T) { + ctx := context.Background() + mdbObjectKey := types.NamespacedName{Name: "mdb-0", Namespace: "default"} + user := mocks.BuildScramMongoDBUser("mdbuser-0") + t.Run("credentials are successfully generated when all fields are present", func(t *testing.T) { + scramCredsSecret := validScramCredentialsSecret(mdbObjectKey, user.ScramCredentialsSecretName) + scram1Creds, scram256Creds, err := readExistingCredentials(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredsSecret), mdbObjectKey, user.ScramCredentialsSecretName) + assert.NoError(t, err) + assertScramCredsCredentialsValidity(t, scram1Creds, scram256Creds) + }) + t.Run("credentials are not generated if a field is missing", func(t *testing.T) { + scramCredsSecret := invalidSecret(mdbObjectKey, user.ScramCredentialsSecretName) + _, _, err := readExistingCredentials(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredsSecret), mdbObjectKey, user.ScramCredentialsSecretName) + assert.Error(t, err) + }) + + t.Run("credentials are not generated if the secret does not exist", func(t *testing.T) { + scramCredsSecret := validScramCredentialsSecret(mdbObjectKey, user.ScramCredentialsSecretName) + _, _, err := readExistingCredentials(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredsSecret), mdbObjectKey, "different-username") + assert.Error(t, err) + }) +} + +func TestComputeScramCredentials_ComputesSameStoredAndServerKey_WithSameSalt(t *testing.T) { + sha1Salt, sha256SaltKey, err := generate.Salts() + assert.NoError(t, err) + + username := "user-1" + password := "X6oSVAfD1la8fJwhfN" // nolint + + for i := 0; i < 10; i++ { + sha1Creds0, sha256Creds0, err := computeScramShaCredentials(username, password, sha1Salt, sha256SaltKey) + assert.NoError(t, err) + sha1Creds1, sha256Creds1, err := computeScramShaCredentials(username, password, sha1Salt, sha256SaltKey) + assert.NoError(t, err) + + assert.True(t, reflect.DeepEqual(sha1Creds0, sha1Creds1)) + assert.True(t, reflect.DeepEqual(sha256Creds0, sha256Creds1)) + } +} + +func TestEnsureScramCredentials(t *testing.T) { + ctx := context.Background() + mdb, user := buildConfigurableAndUser("mdb-0") + t.Run("Fails when there is no password secret, and no credentials secret", func(t *testing.T) { + _, _, err := ensureScramCredentials(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(), user, mdb.NamespacedName(), nil) + assert.Error(t, err) + }) + t.Run("Existing credentials are used when password does not exist, but credentials secret has been created", func(t *testing.T) { + scramCredentialsSecret := validScramCredentialsSecret(mdb.NamespacedName(), user.ScramCredentialsSecretName) + scram1Creds, scram256Creds, err := ensureScramCredentials(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredentialsSecret), user, mdb.NamespacedName(), nil) + assert.NoError(t, err) + assertScramCredsCredentialsValidity(t, scram1Creds, scram256Creds) + }) + t.Run("Changing password results in different credentials being returned", func(t *testing.T) { + newPassword, err := generate.RandomFixedLengthStringOfSize(20) + assert.NoError(t, err) + + differentPasswordSecret := secret.Builder(). + SetName(user.PasswordSecretName). + SetNamespace(mdb.NamespacedName().Namespace). + SetField(user.PasswordSecretKey, newPassword). + Build() + + scramCredentialsSecret := validScramCredentialsSecret(mdb.NamespacedName(), user.ScramCredentialsSecretName) + scram1Creds, scram256Creds, err := ensureScramCredentials(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(scramCredentialsSecret, differentPasswordSecret), user, mdb.NamespacedName(), nil) + assert.NoError(t, err) + assert.NotEqual(t, testSha1Salt, scram1Creds.Salt) + assert.NotEmpty(t, scram1Creds.Salt) + assert.NotEqual(t, testSha1StoredKey, scram1Creds.StoredKey) + assert.NotEmpty(t, scram1Creds.StoredKey) + assert.NotEqual(t, testSha1StoredKey, scram1Creds.ServerKey) + assert.NotEmpty(t, scram1Creds.ServerKey) + assert.Equal(t, 10000, scram1Creds.IterationCount) + + assert.NotEqual(t, testSha256Salt, scram256Creds.Salt) + assert.NotEmpty(t, scram256Creds.Salt) + assert.NotEqual(t, testSha256StoredKey, scram256Creds.StoredKey) + assert.NotEmpty(t, scram256Creds.StoredKey) + assert.NotEqual(t, testSha256ServerKey, scram256Creds.ServerKey) + assert.NotEmpty(t, scram256Creds.ServerKey) + assert.Equal(t, 15000, scram256Creds.IterationCount) + }) +} + +func TestConvertMongoDBUserToAutomationConfigUser(t *testing.T) { + ctx := context.Background() + mdb, user := buildConfigurableAndUser("mdb-0") + + t.Run("When password exists, the user is created in the automation config", func(t *testing.T) { + passwordSecret := secret.Builder(). + SetName(user.PasswordSecretName). + SetNamespace(mdb.NamespacedName().Namespace). + SetField(user.PasswordSecretKey, "TDg_DESiScDrJV6"). + Build() + + acUser, err := convertMongoDBUserToAutomationConfigUser(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(passwordSecret), mdb.NamespacedName(), nil, user) + + assert.NoError(t, err) + assert.Equal(t, user.Username, acUser.Username) + assert.Equal(t, user.Database, "admin") + assert.Equal(t, len(user.Roles), len(acUser.Roles)) + assert.NotNil(t, acUser.ScramSha1Creds) + assert.NotNil(t, acUser.ScramSha256Creds) + for i, acRole := range acUser.Roles { + assert.Equal(t, user.Roles[i].Name, acRole.Role) + assert.Equal(t, user.Roles[i].Database, acRole.Database) + } + }) + + t.Run("If there is no password secret, the creation fails", func(t *testing.T) { + _, err := convertMongoDBUserToAutomationConfigUser(ctx, mocks.NewMockedSecretGetUpdateCreateDeleter(), mdb.NamespacedName(), nil, user) + assert.Error(t, err) + }) +} + +func TestConfigureScram(t *testing.T) { + ctx := context.Background() + t.Run("Should fail if there is no password present for the user", func(t *testing.T) { + mdb, _ := buildConfigurableAndUser("mdb-0") + s := mocks.NewMockedSecretGetUpdateCreateDeleter() + + auth := automationconfig.Auth{} + err := Enable(ctx, &auth, s, mdb) + assert.Error(t, err) + }) + + t.Run("Agent Credentials Secret should be created if there are no users", func(t *testing.T) { + mdb := buildConfigurable("mdb-0") + s := mocks.NewMockedSecretGetUpdateCreateDeleter() + auth := automationconfig.Auth{} + err := Enable(ctx, &auth, s, mdb) + assert.NoError(t, err) + + passwordSecret, err := s.GetSecret(ctx, mdb.GetAgentPasswordSecretNamespacedName()) + assert.NoError(t, err) + assert.True(t, secret.HasAllKeys(passwordSecret, constants.AgentPasswordKey)) + assert.NotEmpty(t, passwordSecret.Data[constants.AgentPasswordKey]) + + keyfileSecret, err := s.GetSecret(ctx, mdb.GetAgentKeyfileSecretNamespacedName()) + assert.NoError(t, err) + assert.True(t, secret.HasAllKeys(keyfileSecret, constants.AgentKeyfileKey)) + assert.NotEmpty(t, keyfileSecret.Data[constants.AgentKeyfileKey]) + }) + + t.Run("Agent Credentials Secret should contain owner reference", func(t *testing.T) { + mdb := buildConfigurable("mdb-0") + s := mocks.NewMockedSecretGetUpdateCreateDeleter() + auth := automationconfig.Auth{} + err := Enable(ctx, &auth, s, mdb) + assert.NoError(t, err) + + passwordSecret, err := s.GetSecret(ctx, mdb.GetAgentPasswordSecretNamespacedName()) + assert.NoError(t, err) + + actualRef := passwordSecret.GetOwnerReferences() + expectedRef := []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: "mdbc", + Name: "my-ref", + }} + assert.Equal(t, expectedRef, actualRef) + }) + + t.Run("Agent Password Secret is used if it exists", func(t *testing.T) { + mdb := buildConfigurable("mdb-0") + + agentPasswordSecret := secret.Builder(). + SetName(mdb.GetAgentPasswordSecretNamespacedName().Name). + SetNamespace(mdb.GetAgentPasswordSecretNamespacedName().Namespace). + SetField(constants.AgentPasswordKey, "A21Zv5agv3EKXFfM"). + Build() + + s := mocks.NewMockedSecretGetUpdateCreateDeleter(agentPasswordSecret) + auth := automationconfig.Auth{} + err := Enable(ctx, &auth, s, mdb) + assert.NoError(t, err) + + ps, err := s.GetSecret(ctx, mdb.GetAgentPasswordSecretNamespacedName()) + assert.NoError(t, err) + assert.True(t, secret.HasAllKeys(ps, constants.AgentPasswordKey)) + assert.NotEmpty(t, ps.Data[constants.AgentPasswordKey]) + assert.Equal(t, "A21Zv5agv3EKXFfM", string(ps.Data[constants.AgentPasswordKey])) + + }) + + t.Run("Agent Keyfile Secret is used if present", func(t *testing.T) { + mdb := buildConfigurable("mdb-0") + + keyfileSecret := secret.Builder(). + SetName(mdb.GetAgentKeyfileSecretNamespacedName().Name). + SetNamespace(mdb.GetAgentKeyfileSecretNamespacedName().Namespace). + SetField(constants.AgentKeyfileKey, "RuPeMaIe2g0SNTTa"). + Build() + + s := mocks.NewMockedSecretGetUpdateCreateDeleter(keyfileSecret) + auth := automationconfig.Auth{} + err := Enable(ctx, &auth, s, mdb) + assert.NoError(t, err) + + ks, err := s.GetSecret(ctx, mdb.GetAgentKeyfileSecretNamespacedName()) + assert.NoError(t, err) + assert.True(t, secret.HasAllKeys(ks, constants.AgentKeyfileKey)) + assert.Equal(t, "RuPeMaIe2g0SNTTa", string(ks.Data[constants.AgentKeyfileKey])) + + }) + + t.Run("Agent Credentials Secret should be created", func(t *testing.T) { + mdb := buildConfigurable("mdb-0") + s := mocks.NewMockedSecretGetUpdateCreateDeleter() + auth := automationconfig.Auth{} + err := Enable(ctx, &auth, s, mdb) + assert.NoError(t, err) + }) +} + +func buildConfigurable(name string, users ...authtypes.User) authtypes.Configurable { + return mocks.NewMockConfigurable( + authtypes.Options{ + AuthoritativeSet: false, + KeyFile: "/path/to/keyfile", + AuthMechanisms: []string{constants.Sha256}, + AgentName: constants.AgentName, + AutoAuthMechanism: constants.Sha256, + }, + users, + types.NamespacedName{ + Name: name, + Namespace: "default", + }, + []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: "mdbc", + Name: "my-ref", + }}, + ) +} + +func buildConfigurableAndUser(name string) (authtypes.Configurable, authtypes.User) { + mdb := buildConfigurable(name, mocks.BuildScramMongoDBUser(name)) + return mdb, mdb.GetAuthUsers()[0] +} + +func assertScramCredsCredentialsValidity(t *testing.T, scram1Creds, scram256Creds scramcredentials.ScramCreds) { + assert.Equal(t, testSha1Salt, scram1Creds.Salt) + assert.Equal(t, testSha1StoredKey, scram1Creds.StoredKey) + assert.Equal(t, testSha1ServerKey, scram1Creds.ServerKey) + assert.Equal(t, 10000, scram1Creds.IterationCount) + + assert.Equal(t, testSha256Salt, scram256Creds.Salt) + assert.Equal(t, testSha256StoredKey, scram256Creds.StoredKey) + assert.Equal(t, testSha256ServerKey, scram256Creds.ServerKey) + assert.Equal(t, 15000, scram256Creds.IterationCount) +} + +// validScramCredentialsSecret returns a secret that has all valid scram credentials +func validScramCredentialsSecret(objectKey types.NamespacedName, scramCredentialsSecretName string) corev1.Secret { + return secret.Builder(). + SetName(scramCredentialsSecretName). + SetNamespace(objectKey.Namespace). + SetField(sha1SaltKey, testSha1Salt). + SetField(sha1StoredKeyKey, testSha1StoredKey). + SetField(sha1ServerKeyKey, testSha1ServerKey). + SetField(sha256SaltKey, testSha256Salt). + SetField(sha256StoredKeyKey, testSha256StoredKey). + SetField(sha256ServerKeyKey, testSha256ServerKey). + Build() +} + +// invalidSecret returns a secret that is incomplete +func invalidSecret(objectKey types.NamespacedName, scramCredentialsSecretName string) corev1.Secret { + return secret.Builder(). + SetName(scramCredentialsSecretName). + SetNamespace(objectKey.Namespace). + SetField(sha1SaltKey, "nxBSYyZZIBZxStyt"). + SetField(sha1StoredKeyKey, "Bs4sePK0cdMy6n"). + SetField(sha1ServerKeyKey, "eP6_p76ql_h8iiH"). + Build() +} diff --git a/pkg/authentication/scramcredentials/scram_credentials.go b/pkg/authentication/scramcredentials/scram_credentials.go new file mode 100644 index 000000000..7d286ba61 --- /dev/null +++ b/pkg/authentication/scramcredentials/scram_credentials.go @@ -0,0 +1,166 @@ +package scramcredentials + +import ( + "crypto/hmac" + "crypto/md5" //nolint + "crypto/sha1" //nolint + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + + "github.com/xdg/stringprep" +) + +const ( + RFC5802MandatedSaltSize = 4 + + clientKeyInput = "Client Key" // specified in RFC 5802 + serverKeyInput = "Server Key" // specified in RFC 5802 + + // using the default MongoDB values for the number of iterations depending on mechanism + DefaultScramSha1Iterations = 10000 + DefaultScramSha256Iterations = 15000 +) + +type ScramCreds struct { + IterationCount int `json:"iterationCount"` + Salt string `json:"salt"` + ServerKey string `json:"serverKey"` + StoredKey string `json:"storedKey"` +} + +func ComputeScramSha256Creds(password string, salt []byte) (ScramCreds, error) { + base64EncodedSalt := base64.StdEncoding.EncodeToString(salt) + return computeScramCredentials(sha256.New, DefaultScramSha256Iterations, base64EncodedSalt, password) +} + +func ComputeScramSha1Creds(username, password string, salt []byte) (ScramCreds, error) { + base64EncodedSalt := base64.StdEncoding.EncodeToString(salt) + password = md5Hex(username + ":mongo:" + password) + return computeScramCredentials(sha1.New, DefaultScramSha1Iterations, base64EncodedSalt, password) +} + +func md5Hex(s string) string { + h := md5.New() // nolint + h.Write([]byte(s)) //nolint + return hex.EncodeToString(h.Sum(nil)) +} + +func generateSaltedPassword(hashConstructor func() hash.Hash, password string, salt []byte, iterationCount int) ([]byte, error) { + preparedPassword, err := stringprep.SASLprep.Prepare(password) + if err != nil { + return nil, fmt.Errorf("could not SASLprep password: %s", err) + } + + result, err := hmacIteration(hashConstructor, []byte(preparedPassword), salt, iterationCount) + if err != nil { + return nil, fmt.Errorf("could not run hmacIteration: %s", err) + } + return result, nil +} + +func hmacIteration(hashConstructor func() hash.Hash, input, salt []byte, iterationCount int) ([]byte, error) { + hashSize := hashConstructor().Size() + + // incorrect salt size will pass validation, but the credentials will be invalid. i.e. it will not + // be possible to auth with the password provided to create the credentials. + if len(salt) != hashSize-RFC5802MandatedSaltSize { + return nil, fmt.Errorf("salt should have a size of %d bytes, but instead has a size of %d bytes", hashSize-RFC5802MandatedSaltSize, len(salt)) + } + + startKey := append(salt, 0, 0, 0, 1) + result := make([]byte, hashSize) + + hmacHash := hmac.New(hashConstructor, input) + if _, err := hmacHash.Write(startKey); err != nil { + return nil, fmt.Errorf("error running hmacHash: %s", err) + } + + intermediateDigest := hmacHash.Sum(nil) + + copy(result, intermediateDigest) + + for i := 1; i < iterationCount; i++ { + hmacHash.Reset() + if _, err := hmacHash.Write(intermediateDigest); err != nil { + return nil, fmt.Errorf("error running hmacHash: %s", err) + } + + intermediateDigest = hmacHash.Sum(nil) + + for i := 0; i < len(intermediateDigest); i++ { + result[i] ^= intermediateDigest[i] + } + } + + return result, nil +} + +func generateClientOrServerKey(hashConstructor func() hash.Hash, saltedPassword []byte, input string) ([]byte, error) { + hmacHash := hmac.New(hashConstructor, saltedPassword) + if _, err := hmacHash.Write([]byte(input)); err != nil { + return nil, fmt.Errorf("error running hmacHash: %s", err) + } + + return hmacHash.Sum(nil), nil +} + +func generateStoredKey(hashConstructor func() hash.Hash, clientKey []byte) ([]byte, error) { + h := hashConstructor() + if _, err := h.Write(clientKey); err != nil { + return nil, fmt.Errorf("error hashing: %s", err) + } + return h.Sum(nil), nil +} + +func generateSecrets(hashConstructor func() hash.Hash, password string, salt []byte, iterationCount int) (storedKey, serverKey []byte, err error) { + saltedPassword, err := generateSaltedPassword(hashConstructor, password, salt, iterationCount) + if err != nil { + return nil, nil, fmt.Errorf("error generating salted password: %s", err) + } + + clientKey, err := generateClientOrServerKey(hashConstructor, saltedPassword, clientKeyInput) + if err != nil { + return nil, nil, fmt.Errorf("error generating client key: %s", err) + } + + storedKey, err = generateStoredKey(hashConstructor, clientKey) + if err != nil { + return nil, nil, fmt.Errorf("error generating stored key: %s", err) + } + + serverKey, err = generateClientOrServerKey(hashConstructor, saltedPassword, serverKeyInput) + if err != nil { + return nil, nil, fmt.Errorf("error generating server key: %s", err) + } + + return storedKey, serverKey, err +} + +func generateB64EncodedSecrets(hashConstructor func() hash.Hash, password, b64EncodedSalt string, iterationCount int) (storedKey, serverKey string, err error) { + salt, err := base64.StdEncoding.DecodeString(b64EncodedSalt) + if err != nil { + return "", "", fmt.Errorf("error decoding salt: %s", err) + } + + unencodedStoredKey, unencodedServerKey, err := generateSecrets(hashConstructor, password, salt, iterationCount) + if err != nil { + return "", "", fmt.Errorf("error generating secrets: %s", err) + } + + storedKey = base64.StdEncoding.EncodeToString(unencodedStoredKey) + serverKey = base64.StdEncoding.EncodeToString(unencodedServerKey) + return storedKey, serverKey, nil +} + +// password should be encrypted in the case of SCRAM-SHA-1 and unencrypted in the case of SCRAM-SHA-256 +func computeScramCredentials(hashConstructor func() hash.Hash, iterationCount int, base64EncodedSalt string, password string) (ScramCreds, error) { + storedKey, serverKey, err := generateB64EncodedSecrets(hashConstructor, password, base64EncodedSalt, iterationCount) + if err != nil { + return ScramCreds{}, fmt.Errorf("error generating SCRAM-SHA keys: %s", err) + } + + return ScramCreds{IterationCount: iterationCount, Salt: base64EncodedSalt, StoredKey: storedKey, ServerKey: serverKey}, nil +} diff --git a/pkg/authentication/scramcredentials/scram_credentials_test.go b/pkg/authentication/scramcredentials/scram_credentials_test.go new file mode 100644 index 000000000..e81234e5b --- /dev/null +++ b/pkg/authentication/scramcredentials/scram_credentials_test.go @@ -0,0 +1,29 @@ +package scramcredentials + +import ( + "crypto/sha1" //nolint + "crypto/sha256" + "hash" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestScramSha1SecretsMatch(t *testing.T) { + assertSecretsMatch(t, sha1.New, "caeec61ba3b15b15b188d29e876514e8", 10, "S3cuk2Rnu/MlbewzxrmmVA==", "sYBa3XlSPKNrgjzhOuEuRlJY4dQ=", "zuAxRSQb3gZkbaB1IGlusK4jy1M=") + assertSecretsMatch(t, sha1.New, "4d9625b297999b3ca786d4a9622d04f1", 10, "kW9KbCQiCOll5Ljd44cjkQ==", "VJ8fFVHkPltibvT//mG/OWw44Hc=", "ceDRsgj9HezpZ4/vkZX8GZNNN50=") + assertSecretsMatch(t, sha1.New, "fd0a78e418dcef39f8c768222810b894", 10, "hhX6xsoID6FeWjXncuNgAg==", "TxgaZJ4cIn+S9EfTcc9IOEG7RGc=", "d6/qjwBs0qkPKfUAjSh5eemsySE=") +} +func TestScramSha256SecretsMatch(t *testing.T) { + assertSecretsMatch(t, sha256.New, "Gy4ZNMr-SYEsEpAEZv", 15000, "ajdf1E1QTsNAQdBEodB4vzQOFuvcw9K6PmouVg==", "/pBk9XBwSm9UyeQmyJ3LfogfHu9Z/XTjGmRhQDHx/4I=", "Avm8mjtMyg659LAyeD4VmuzQb5lxL5iy3dCuzfscfMc=") + assertSecretsMatch(t, sha256.New, "Y9SPYSJYUJB_", 15000, "Oplsu3uju+lYyX4apKb0K6xfHpmFtH99Oyk4Ow==", "oTJhml8KKZUSt9k4tg+tS6D/ygR+a2Xfo8JKjTpQoAI=", "SUfA2+SKL35u665WY5NnJJmA9L5dHu/TnWXX/0nm42Y=") + assertSecretsMatch(t, sha256.New, "157VDZr0h-Pz-wj72", 15000, "P/4xs3anygxu3/l2p35CSBe4Z47IV/FtE/e44A==", "jOb27nFF72SQoY7WUqKXOTR4e8jETXxMS67SONrcbjA=", "3FnslkgUweautAfPRCOEjhS+YbUYUNmdDQUGxB+oaFE=") + assertSecretsMatch(t, sha256.New, "P8z1sDfELCePTNbVqX", 15000, "RPNhenwTHlqW5OE597XpuwvPLaiecPpYFa58Pg==", "sJ8UhQRszLNo15cOe62+HLjt2NxmSkJGjdJpclTIMBs=", "CSg02ODAvh9+swUHoimXcDsT9lLp/A5IhQXavXl7+qA=") +} + +func assertSecretsMatch(t *testing.T, hash func() hash.Hash, passwordHash string, iterationCount int, salt, storedKey, serverKey string) { + computedStoredKey, computedServerKey, err := generateB64EncodedSecrets(hash, passwordHash, salt, iterationCount) + assert.NoError(t, err) + assert.Equal(t, computedStoredKey, storedKey) + assert.Equal(t, computedServerKey, serverKey) +} diff --git a/pkg/authentication/x509/x509.go b/pkg/authentication/x509/x509.go new file mode 100644 index 000000000..20297e35f --- /dev/null +++ b/pkg/authentication/x509/x509.go @@ -0,0 +1,208 @@ +package x509 + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "fmt" + "math/big" + "regexp" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/generate" +) + +// Enable will configure all of the required Kubernetes resources for X509 to be enabled. +// The agent password and keyfile contents will be configured and stored in a secret. +// the user credentials will be generated if not present, or existing credentials will be read. +func Enable(ctx context.Context, auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable, agentCertSecret types.NamespacedName) error { + opts := mdb.GetAuthOptions() + + desiredUsers := convertMongoDBResourceUsersToAutomationConfigUsers(mdb) + + if opts.AutoAuthMechanism == constants.X509 { + if err := ensureAgent(ctx, auth, secretGetUpdateCreateDeleter, mdb, agentCertSecret); err != nil { + return err + } + } + + return enableClientAuthentication(auth, opts, desiredUsers) +} + +func ensureAgent(ctx context.Context, auth *automationconfig.Auth, secretGetUpdateCreateDeleter secret.GetUpdateCreateDeleter, mdb authtypes.Configurable, agentCertSecret types.NamespacedName) error { + generatedContents, err := generate.KeyFileContents() + if err != nil { + return fmt.Errorf("could not generate keyfile contents: %s", err) + } + + // ensure that the agent keyfile secret exists or read existing keyfile. + agentKeyFile, err := secret.EnsureSecretWithKey(ctx, secretGetUpdateCreateDeleter, mdb.GetAgentKeyfileSecretNamespacedName(), mdb.GetOwnerReferences(), constants.AgentKeyfileKey, generatedContents) + if err != nil { + return err + } + + agentCert, err := secret.ReadKey(ctx, secretGetUpdateCreateDeleter, "tls.crt", agentCertSecret) + if err != nil { + return err + } + + agentSubject, err := readAgentSubjectsFromCert(agentCert) + if err != nil { + return err + } + + if !isValidX509Subject(agentSubject) { + return fmt.Errorf("Agent subject: %s is not a valid subject", agentSubject) + } + + return enableAgentAuthentication(auth, agentKeyFile, agentSubject, mdb.GetAuthOptions()) +} + +// convertMongoDBResourceUsersToAutomationConfigUsers returns a list of users that are able to be set in the AutomationConfig +func convertMongoDBResourceUsersToAutomationConfigUsers(mdb authtypes.Configurable) []automationconfig.MongoDBUser { + var usersWanted []automationconfig.MongoDBUser + for _, u := range mdb.GetAuthUsers() { + if u.Database == constants.ExternalDB { + acUser := convertMongoDBUserToAutomationConfigUser(u) + usersWanted = append(usersWanted, acUser) + } + } + return usersWanted +} + +// convertMongoDBUserToAutomationConfigUser converts a single user configured in the MongoDB resource and converts it to a user +// that can be added directly to the AutomationConfig. +func convertMongoDBUserToAutomationConfigUser(user authtypes.User) automationconfig.MongoDBUser { + acUser := automationconfig.MongoDBUser{ + Username: user.Username, + Database: user.Database, + } + for _, role := range user.Roles { + acUser.Roles = append(acUser.Roles, automationconfig.Role{ + Role: role.Name, + Database: role.Database, + }) + } + acUser.AuthenticationRestrictions = []string{} + acUser.Mechanisms = []string{} + return acUser +} + +func readAgentSubjectsFromCert(agentCert string) (string, error) { + var rdns pkix.RDNSequence + + block, rest := pem.Decode([]byte(agentCert)) + + if block != nil && block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return "", err + } + + if _, err := asn1.Unmarshal(cert.RawSubject, &rdns); err != nil { + return "", err + } + } else if len(rest) > 0 { + cert, err := x509.ParseCertificate(rest) + if err != nil { + return "", err + } + + if _, err := asn1.Unmarshal(cert.RawSubject, &rdns); err != nil { + return "", err + } + } + + return rdns.String(), nil +} + +func isValidX509Subject(subject string) bool { + expected := []string{"CN", "C", "OU"} + for _, name := range expected { + matched, err := regexp.MatchString(name+`=\w+`, subject) + if err != nil { + continue + } + if !matched { + return false + } + } + return true +} + +func CreateAgentCertificateSecret(key string, invalid bool, agentCertSecret types.NamespacedName) v1.Secret { + agentCert, _, _ := CreateAgentCertificate() + if invalid { + agentCert = "INVALID CERT" + } + + return secret.Builder(). + SetName(agentCertSecret.Name). + SetNamespace(agentCertSecret.Namespace). + SetField(key, agentCert). + Build() +} + +func CreateAgentCertificate() (string, string, error) { + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return "", "", err + } + + privBytes, err := x509.MarshalPKCS8PrivateKey(priv) + if err != nil { + return "", "", err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return "", "", err + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Country: []string{"US"}, + Organization: []string{"MongoDB"}, + OrganizationalUnit: []string{"ENG"}, + CommonName: "mms-automation-agent", + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(10, 0, 0), // cert expires in 10 years + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + } + certBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return "", "", err + } + + caPEM := new(bytes.Buffer) + _ = pem.Encode(caPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + }) + + caPrivKeyPEM := new(bytes.Buffer) + _ = pem.Encode(caPrivKeyPEM, &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: privBytes, + }) + + return caPEM.String(), caPrivKeyPEM.String(), nil +} diff --git a/pkg/authentication/x509/x509_enabler.go b/pkg/authentication/x509/x509_enabler.go new file mode 100644 index 000000000..d21227921 --- /dev/null +++ b/pkg/authentication/x509/x509_enabler.go @@ -0,0 +1,76 @@ +package x509 + +import ( + "errors" + + "github.com/hashicorp/go-multierror" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains" +) + +// enableAgentAuthentication updates the provided auth struct and configures scram authentication based on the provided +// values and configuration options. +func enableAgentAuthentication(auth *automationconfig.Auth, agentKeyFileContents string, agentName string, opts authtypes.Options) error { + if err := validateAgentOptions(opts); err != nil { + return err + } + + auth.Disabled = false + auth.AuthoritativeSet = opts.AuthoritativeSet + auth.KeyFile = opts.KeyFile + + // the contents the keyfile should have, this file is owned and managed + // by the agent + auth.Key = agentKeyFileContents + + // windows file is specified to pass validation, this will never be used + auth.KeyFileWindows = constants.AutomationAgentWindowsKeyFilePath + + auth.AutoAuthMechanisms = []string{constants.X509} + + // the username of the MongoDB Agent + auth.AutoUser = agentName + + // the mechanism used by the Agent + auth.AutoAuthMechanism = constants.X509 + + // the password for the Agent user + auth.AutoPwd = "" + + return nil +} + +func enableClientAuthentication(auth *automationconfig.Auth, opts authtypes.Options, users []automationconfig.MongoDBUser) error { + if err := validateClientOptions(opts); err != nil { + return err + } + + if !contains.X509(auth.DeploymentAuthMechanisms) { + auth.DeploymentAuthMechanisms = append(auth.DeploymentAuthMechanisms, constants.X509) + } + + auth.Users = append(auth.Users, users...) + return nil +} + +// validateAgentOptions validates that all the agent required fields have +// a non-empty value. +func validateAgentOptions(opts authtypes.Options) error { + var errs error + if opts.AutoAuthMechanism == "" { + errs = multierror.Append(errs, errors.New("AutoAuthMechanism must not be empty")) + } + return errs +} + +// validateClientOptions validates that all the deployment required fields have +// a non-empty value. +func validateClientOptions(opts authtypes.Options) error { + var errs error + if len(opts.AuthMechanisms) == 0 { + errs = multierror.Append(errs, errors.New("at least one AuthMechanism must be specified")) + } + return errs +} diff --git a/pkg/authentication/x509/x509_enabler_test.go b/pkg/authentication/x509/x509_enabler_test.go new file mode 100644 index 000000000..d0e3a4b33 --- /dev/null +++ b/pkg/authentication/x509/x509_enabler_test.go @@ -0,0 +1,107 @@ +package x509 + +import ( + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/stretchr/testify/assert" +) + +func TestX509AutomationConfig(t *testing.T) { + t.Run("Only X509", func(t *testing.T) { + auth := automationconfig.Auth{} + opts := authtypes.Options{ + AuthoritativeSet: false, + KeyFile: constants.AutomationAgentKeyFilePathInContainer, + AuthMechanisms: []string{constants.X509}, + AutoAuthMechanism: constants.X509, + } + err := configureInAutomationConfig(&auth, "keyfilecontents", "CN=my-agent,O=MongoDB", []automationconfig.MongoDBUser{}, opts) + assert.NoError(t, err) + + t.Run("Authentication is correctly configured", func(t *testing.T) { + assert.Equal(t, "CN=my-agent,O=MongoDB", auth.AutoUser) + assert.Equal(t, "keyfilecontents", auth.Key) + assert.Equal(t, "", auth.AutoPwd) + assert.Equal(t, constants.X509, auth.AutoAuthMechanism) + assert.Len(t, auth.DeploymentAuthMechanisms, 1) + assert.Len(t, auth.AutoAuthMechanisms, 1) + assert.Equal(t, []string{constants.X509}, auth.DeploymentAuthMechanisms) + assert.Equal(t, []string{constants.X509}, auth.AutoAuthMechanisms) + assert.Equal(t, constants.AutomationAgentKeyFilePathInContainer, auth.KeyFile) + assert.Equal(t, constants.AutomationAgentWindowsKeyFilePath, auth.KeyFileWindows) + }) + t.Run("Subsequent configuration doesn't add to deployment auth mechanisms", func(t *testing.T) { + err := configureInAutomationConfig(&auth, "keyfilecontents", "CN=my-agent,O=MongoDB", []automationconfig.MongoDBUser{}, opts) + assert.NoError(t, err) + assert.Equal(t, []string{constants.X509}, auth.DeploymentAuthMechanisms) + }) + }) + + t.Run("X509 and SHA-256", func(t *testing.T) { + auth := automationconfig.Auth{} + opts := authtypes.Options{ + AuthoritativeSet: false, + KeyFile: constants.AutomationAgentKeyFilePathInContainer, + AuthMechanisms: []string{constants.X509, constants.Sha256}, + AutoAuthMechanism: constants.X509, + } + err := configureInAutomationConfig(&auth, "keyfilecontents", "CN=my-agent,O=MongoDB", []automationconfig.MongoDBUser{}, opts) + assert.NoError(t, err) + + t.Run("Authentication is correctly configured", func(t *testing.T) { + assert.Equal(t, "CN=my-agent,O=MongoDB", auth.AutoUser) + assert.Equal(t, "keyfilecontents", auth.Key) + assert.Equal(t, "", auth.AutoPwd) + assert.Equal(t, constants.X509, auth.AutoAuthMechanism) + assert.Len(t, auth.DeploymentAuthMechanisms, 1) + assert.Len(t, auth.AutoAuthMechanisms, 1) + assert.Equal(t, []string{constants.X509}, auth.DeploymentAuthMechanisms) + assert.Equal(t, []string{constants.X509}, auth.AutoAuthMechanisms) + assert.Equal(t, constants.AutomationAgentKeyFilePathInContainer, auth.KeyFile) + assert.Equal(t, constants.AutomationAgentWindowsKeyFilePath, auth.KeyFileWindows) + }) + t.Run("Subsequent configuration doesn't add to deployment auth mechanisms", func(t *testing.T) { + err := configureInAutomationConfig(&auth, "keyfilecontents", "CN=my-agent,O=MongoDB", []automationconfig.MongoDBUser{}, opts) + assert.NoError(t, err) + assert.Equal(t, []string{constants.X509}, auth.DeploymentAuthMechanisms) + }) + }) + + t.Run("Fail validation", func(t *testing.T) { + auth := automationconfig.Auth{} + opts := authtypes.Options{ + AuthoritativeSet: false, + KeyFile: constants.AutomationAgentKeyFilePathInContainer, + AuthMechanisms: []string{}, + AutoAuthMechanism: constants.X509, + } + err := configureInAutomationConfig(&auth, "keyfilecontents", "CN=my-agent,O=MongoDB", []automationconfig.MongoDBUser{}, opts) + assert.Error(t, err) + + auth = automationconfig.Auth{} + opts = authtypes.Options{ + AuthoritativeSet: false, + KeyFile: constants.AutomationAgentKeyFilePathInContainer, + AuthMechanisms: []string{constants.X509}, + AutoAuthMechanism: "", + } + err = configureInAutomationConfig(&auth, "keyfilecontents", "CN=my-agent,O=MongoDB", []automationconfig.MongoDBUser{}, opts) + assert.Error(t, err) + }) +} + +// configureInAutomationConfig updates the provided auth struct and fully configures Scram authentication. +func configureInAutomationConfig(auth *automationconfig.Auth, agentKeyFile, agentName string, users []automationconfig.MongoDBUser, opts authtypes.Options) error { + err := enableAgentAuthentication(auth, agentKeyFile, agentName, opts) + if err != nil { + return err + } + err = enableClientAuthentication(auth, opts, users) + if err != nil { + return err + } + return nil +} diff --git a/pkg/authentication/x509/x509_test.go b/pkg/authentication/x509/x509_test.go new file mode 100644 index 000000000..ed4f728fc --- /dev/null +++ b/pkg/authentication/x509/x509_test.go @@ -0,0 +1,272 @@ +package x509 + +import ( + "context" + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/mocks" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/stretchr/testify/assert" +) + +func TestEnable(t *testing.T) { + ctx := context.Background() + t.Run("X509 agent", func(t *testing.T) { + auth := automationconfig.Auth{} + mdb := buildX509Configurable("mdb", mocks.BuildX509MongoDBUser("my-user"), mocks.BuildScramMongoDBUser("my-scram-user")) + + agentSecret := CreateAgentCertificateSecret("tls.crt", false, mdb.AgentCertificateSecretNamespacedName()) + keyfileSecret := secret.Builder(). + SetName(mdb.GetAgentKeyfileSecretNamespacedName().Name). + SetNamespace(mdb.GetAgentKeyfileSecretNamespacedName().Namespace). + SetField(constants.AgentKeyfileKey, "RuPeMaIe2g0SNTTa"). + Build() + secrets := mocks.NewMockedSecretGetUpdateCreateDeleter(agentSecret, keyfileSecret) + + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + assert.NoError(t, err) + + expected := automationconfig.Auth{ + Users: []automationconfig.MongoDBUser{ + { + Mechanisms: []string{}, + Roles: []automationconfig.Role{ + { + Role: "readWrite", + Database: "admin", + }, + { + Role: "clusterAdmin", + Database: "admin", + }, + }, + Username: "CN=my-user,OU=organizationalunit,O=organization", + Database: "$external", + AuthenticationRestrictions: []string{}, + }, + }, + Disabled: false, + AuthoritativeSet: false, + AutoAuthMechanisms: []string{constants.X509}, + AutoAuthMechanism: constants.X509, + DeploymentAuthMechanisms: []string{constants.X509}, + AutoUser: "CN=mms-automation-agent,OU=ENG,O=MongoDB,C=US", + Key: "RuPeMaIe2g0SNTTa", + KeyFile: "/path/to/keyfile", + KeyFileWindows: constants.AutomationAgentWindowsKeyFilePath, + AutoPwd: "", + } + + assert.Equal(t, expected, auth) + }) + t.Run("SCRAM agent", func(t *testing.T) { + auth := automationconfig.Auth{} + mdb := buildScramConfigurable("mdb", mocks.BuildX509MongoDBUser("my-user"), mocks.BuildScramMongoDBUser("my-scram-user")) + + secrets := mocks.NewMockedSecretGetUpdateCreateDeleter() + + err := Enable(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + assert.NoError(t, err) + + expected := automationconfig.Auth{ + Users: []automationconfig.MongoDBUser{{ + Mechanisms: []string{}, + Roles: []automationconfig.Role{ + { + Role: "readWrite", + Database: "admin", + }, + { + Role: "clusterAdmin", + Database: "admin", + }, + }, + Username: "CN=my-user,OU=organizationalunit,O=organization", + Database: "$external", + AuthenticationRestrictions: []string{}, + }}, + Disabled: false, + AuthoritativeSet: false, + DeploymentAuthMechanisms: []string{constants.X509}, + } + + assert.Equal(t, expected, auth) + }) +} + +func Test_ensureAgent(t *testing.T) { + ctx := context.Background() + auth := automationconfig.Auth{} + mdb := buildX509Configurable("mdb") + secrets := mocks.NewMockedSecretGetUpdateCreateDeleter() + + err := ensureAgent(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + assert.Error(t, err) + + auth = automationconfig.Auth{} + agentSecret := CreateAgentCertificateSecret("tls.pem", false, mdb.AgentCertificateSecretNamespacedName()) + secrets = mocks.NewMockedSecretGetUpdateCreateDeleter(agentSecret) + + err = ensureAgent(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + assert.Error(t, err) + assert.ErrorContains(t, err, "key \"tls.crt\" not present in the Secret") + + auth = automationconfig.Auth{} + agentSecret = CreateAgentCertificateSecret("tls.crt", true, mdb.AgentCertificateSecretNamespacedName()) + secrets = mocks.NewMockedSecretGetUpdateCreateDeleter(agentSecret) + + err = ensureAgent(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + assert.Error(t, err) + assert.ErrorContains(t, err, "x509: malformed certificate") + + auth = automationconfig.Auth{} + agentSecret = CreateAgentCertificateSecret("tls.crt", false, mdb.AgentCertificateSecretNamespacedName()) + secrets = mocks.NewMockedSecretGetUpdateCreateDeleter(agentSecret) + + err = ensureAgent(ctx, &auth, secrets, mdb, mdb.AgentCertificateSecretNamespacedName()) + assert.NoError(t, err) +} + +func Test_convertMongoDBResourceUsersToAutomationConfigUsers(t *testing.T) { + type args struct { + mdb authtypes.Configurable + } + tests := []struct { + name string + args args + want []automationconfig.MongoDBUser + }{ + { + name: "Only x.509 users", + args: args{mdb: buildX509Configurable("mongodb", mocks.BuildX509MongoDBUser("my-user-1"), mocks.BuildX509MongoDBUser("my-user-2"))}, + want: []automationconfig.MongoDBUser{ + { + Mechanisms: []string{}, + Roles: []automationconfig.Role{ + { + Role: "readWrite", + Database: "admin", + }, + { + Role: "clusterAdmin", + Database: "admin", + }, + }, + Username: "CN=my-user-1,OU=organizationalunit,O=organization", + Database: "$external", + AuthenticationRestrictions: []string{}, + }, + { + Mechanisms: []string{}, + Roles: []automationconfig.Role{ + { + Role: "readWrite", + Database: "admin", + }, + { + Role: "clusterAdmin", + Database: "admin", + }, + }, + Username: "CN=my-user-2,OU=organizationalunit,O=organization", + Database: "$external", + AuthenticationRestrictions: []string{}, + }, + }, + }, + { + name: "Only SCRAM users", + args: args{mdb: buildX509Configurable("mongodb", mocks.BuildScramMongoDBUser("my-user-1"), mocks.BuildScramMongoDBUser("my-user-2"))}, + want: nil, + }, + { + name: "X.509 and SCRAM users", + args: args{mdb: buildX509Configurable("mongodb", mocks.BuildX509MongoDBUser("my-user-1"), mocks.BuildScramMongoDBUser("my-user-2"))}, + want: []automationconfig.MongoDBUser{ + { + Mechanisms: []string{}, + Roles: []automationconfig.Role{ + { + Role: "readWrite", + Database: "admin", + }, + { + Role: "clusterAdmin", + Database: "admin", + }, + }, + Username: "CN=my-user-1,OU=organizationalunit,O=organization", + Database: "$external", + AuthenticationRestrictions: []string{}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := convertMongoDBResourceUsersToAutomationConfigUsers(tt.args.mdb) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("convertMongoDBResourceUsersToAutomationConfigUsers() got = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_readAgentSubjectsFromCert(t *testing.T) { + agentCert, _, _ := CreateAgentCertificate() + + subjectName, err := readAgentSubjectsFromCert(agentCert) + assert.NoError(t, err) + + assert.Equal(t, "CN=mms-automation-agent,OU=ENG,O=MongoDB,C=US", subjectName) +} + +func buildX509Configurable(name string, users ...authtypes.User) mocks.MockConfigurable { + return mocks.NewMockConfigurable( + authtypes.Options{ + AuthoritativeSet: false, + KeyFile: "/path/to/keyfile", + AuthMechanisms: []string{constants.X509}, + AutoAuthMechanism: constants.X509, + }, + users, + types.NamespacedName{ + Name: name, + Namespace: "default", + }, + []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: "mdbc", + Name: "my-ref", + }}, + ) +} + +func buildScramConfigurable(name string, users ...authtypes.User) mocks.MockConfigurable { + return mocks.NewMockConfigurable( + authtypes.Options{ + AuthoritativeSet: false, + KeyFile: "/path/to/keyfile", + AuthMechanisms: []string{constants.Sha256, constants.X509}, + AgentName: constants.AgentName, + AutoAuthMechanism: constants.Sha256, + }, + users, + types.NamespacedName{ + Name: name, + Namespace: "default", + }, + []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: "mdbc", + Name: "my-ref", + }}, + ) +} diff --git a/pkg/automationconfig/automation_config.go b/pkg/automationconfig/automation_config.go index 837c23efd..855985108 100644 --- a/pkg/automationconfig/automation_config.go +++ b/pkg/automationconfig/automation_config.go @@ -1,17 +1,316 @@ package automationconfig import ( - "path" + "bytes" + "encoding/json" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scramcredentials" + "github.com/spf13/cast" + "github.com/stretchr/objx" + "go.uber.org/zap" +) + +const ( + Mongod ProcessType = "mongod" + DefaultMongoDBDataDir string = "/data" + DefaultDBPort int = 27017 + DefaultAgentLogPath string = "/var/log/mongodb-mms-automation" + DefaultAgentLogFile string = "/var/log/mongodb-mms-automation/automation-agent.log" + DefaultAgentMaxLogFileDurationHours int = 24 +) + +// +kubebuilder:object:generate=true +type MemberOptions struct { + Votes *int `json:"votes,omitempty"` + Priority *string `json:"priority,omitempty"` + Tags map[string]string `json:"tags,omitempty"` +} + +func (o *MemberOptions) GetVotes() int { + if o.Votes != nil { + return cast.ToInt(o.Votes) + } + return 1 +} + +func (o *MemberOptions) GetPriority() float32 { + if o.Priority != nil { + return cast.ToFloat32(o.Priority) + } + return 1.0 +} + +func (o *MemberOptions) GetTags() map[string]string { + return o.Tags +} + +type AutomationConfig struct { + Version int `json:"version"` + Processes []Process `json:"processes"` + ReplicaSets []ReplicaSet `json:"replicaSets"` + Auth Auth `json:"auth"` + Prometheus *Prometheus `json:"prometheus,omitempty"` + + // TLSConfig and SSLConfig exist to allow configuration of older agents which accept the "ssl" field rather or "tls" + // only one of these should be set. + TLSConfig *TLS `json:"tls,omitempty"` + SSLConfig *TLS `json:"ssl,omitempty"` + + Versions []MongoDbVersionConfig `json:"mongoDbVersions"` + BackupVersions []BackupVersion `json:"backupVersions"` + MonitoringVersions []MonitoringVersion `json:"monitoringVersions"` + Options Options `json:"options"` + Roles []CustomRole `json:"roles,omitempty"` +} + +func (ac *AutomationConfig) GetProcessByName(name string) *Process { + for i := 0; i < len(ac.Processes); i++ { + if ac.Processes[i].Name == name { + return &ac.Processes[i] + } + } + + return nil +} + +type BackupVersion struct { + BaseUrl string `json:"baseUrl"` +} + +type MonitoringVersion struct { + Hostname string `json:"hostname"` + Name string `json:"name"` + BaseUrl string `json:"baseUrl"` + AdditionalParams map[string]string `json:"additionalParams,omitempty"` +} + +// CrdLogRotate is the crd definition of LogRotate including fields in strings while the agent supports them as float64 +type CrdLogRotate struct { + LogRotate `json:",inline"` + // Maximum size for an individual log file before rotation. + // The string needs to be able to be converted to float64. + // Fractional values of MB are supported. + SizeThresholdMB string `json:"sizeThresholdMB"` + // Maximum percentage of the total disk space these log files should take up. + // The string needs to be able to be converted to float64 + // +optional + PercentOfDiskspace string `json:"percentOfDiskspace,omitempty"` +} + +// AcLogRotate is the internal agent representation of LogRotate +type AcLogRotate struct { + LogRotate `json:",inline"` + // Maximum size for an individual log file before rotation. + SizeThresholdMB float64 `json:"sizeThresholdMB"` + // Maximum percentage of the total disk space these log files should take up. + // +optional + PercentOfDiskspace float64 `json:"percentOfDiskspace,omitempty"` +} + +// LogRotate matches the setting defined here: +// https://www.mongodb.com/docs/ops-manager/current/reference/cluster-configuration/#mongodb-instances +// and https://www.mongodb.com/docs/rapid/reference/command/logRotate/#mongodb-dbcommand-dbcmd.logRotate +// +kubebuilder:object:generate=true +type LogRotate struct { + // maximum hours for an individual log file before rotation + TimeThresholdHrs int `json:"timeThresholdHrs"` + // maximum number of log files to leave uncompressed + // +optional + NumUncompressed int `json:"numUncompressed,omitempty"` + // maximum number of log files to have total + // +optional + NumTotal int `json:"numTotal,omitempty"` + // set to 'true' to have the Automation Agent rotate the audit files along + // with mongodb log files + // +optional + IncludeAuditLogsWithMongoDBLogs bool `json:"includeAuditLogsWithMongoDBLogs,omitempty"` +} + +type Process struct { + Name string `json:"name"` + Disabled bool `json:"disabled"` + HostName string `json:"hostname"` + Args26 objx.Map `json:"args2_6"` + FeatureCompatibilityVersion string `json:"featureCompatibilityVersion"` + ProcessType ProcessType `json:"processType"` + Version string `json:"version"` + AuthSchemaVersion int `json:"authSchemaVersion"` + LogRotate *AcLogRotate `json:"logRotate,omitempty"` + AuditLogRotate *AcLogRotate `json:"auditLogRotate,omitempty"` +} + +func (p *Process) SetPort(port int) *Process { + return p.SetArgs26Field("net.port", port) +} + +func (p *Process) GetPort() int { + if p.Args26 == nil { + return 0 + } + + // Args26 map could be manipulated from the code, e.g. via SetPort (e.g. in unit tests) - then it will be as int, + // or it could be deserialized from JSON and then integer in an untyped map will be deserialized as float64. + // It's behavior of https://pkg.go.dev/encoding/json#Unmarshal that is converting JSON integers as float64. + netPortValue := p.Args26.Get("net.port") + if netPortValue.IsFloat64() { + return int(netPortValue.Float64()) + } + + return netPortValue.Int() +} + +func (p *Process) SetStoragePath(storagePath string) *Process { + return p.SetArgs26Field("storage.dbPath", storagePath) +} + +func (p *Process) SetReplicaSetName(replSetName string) *Process { + return p.SetArgs26Field("replication.replSetName", replSetName) +} + +func (p *Process) SetSystemLog(systemLog SystemLog) *Process { + return p.SetArgs26Field("systemLog.path", systemLog.Path). + // since Destination is a go type wrapper around string, we will need to force it back to string otherwise + // SetArgs value boxing takes the upper (Destination) type instead of string. + SetArgs26Field("systemLog.destination", string(systemLog.Destination)). + SetArgs26Field("systemLog.logAppend", systemLog.LogAppend) +} + +// SetLogRotate sets the acLogRotate by converting the CrdLogRotate to an acLogRotate. +func (p *Process) SetLogRotate(lr *CrdLogRotate) *Process { + p.LogRotate = ConvertCrdLogRotateToAC(lr) + return p +} + +// SetAuditLogRotate sets the acLogRotate by converting the CrdLogRotate to an acLogRotate. +func (p *Process) SetAuditLogRotate(lr *CrdLogRotate) *Process { + p.AuditLogRotate = ConvertCrdLogRotateToAC(lr) + return p +} + +// ConvertCrdLogRotateToAC converts a CrdLogRotate to an AcLogRotate representation. +func ConvertCrdLogRotateToAC(lr *CrdLogRotate) *AcLogRotate { + if lr == nil { + return &AcLogRotate{} + } + + return &AcLogRotate{ + LogRotate: LogRotate{ + TimeThresholdHrs: lr.TimeThresholdHrs, + NumUncompressed: lr.NumUncompressed, + NumTotal: lr.NumTotal, + IncludeAuditLogsWithMongoDBLogs: lr.IncludeAuditLogsWithMongoDBLogs, + }, + SizeThresholdMB: cast.ToFloat64(lr.SizeThresholdMB), + PercentOfDiskspace: cast.ToFloat64(lr.PercentOfDiskspace), + } +} + +func (p *Process) SetWiredTigerCache(cacheSizeGb *float32) *Process { + if cacheSizeGb == nil { + return p + } + return p.SetArgs26Field("storage.wiredTiger.engineConfig.cacheSizeGB", cacheSizeGb) +} + +// SetArgs26Field should be used whenever any args26 field needs to be set. It ensures +// that the args26 map is non nil and assigns the given value. +func (p *Process) SetArgs26Field(fieldName string, value interface{}) *Process { + p.ensureArgs26() + p.Args26.Set(fieldName, value) + return p +} + +func (p *Process) ensureArgs26() { + if p.Args26 == nil { + p.Args26 = objx.New(map[string]interface{}{}) + } +} + +type TLSMode string + +const ( + TLSModeDisabled TLSMode = "disabled" + TLSModeAllowed TLSMode = "allowTLS" + TLSModePreferred TLSMode = "preferTLS" + TLSModeRequired TLSMode = "requireTLS" ) type ProcessType string +type Destination string + const ( - Mongod ProcessType = "mongod" - DefaultMongoDBDataDir = "/data" - DefaultAgentLogPath = "/var/log/mongodb-mms-automation" + File Destination = "file" + Syslog Destination = "syslog" ) +type SystemLog struct { + Destination Destination `json:"destination"` + Path string `json:"path"` + LogAppend bool `json:"logAppend"` +} + +type WiredTiger struct { + EngineConfig EngineConfig `json:"engineConfig"` +} + +type EngineConfig struct { + CacheSizeGB float32 `json:"cacheSizeGB"` +} + +// ReplSetForceConfig setting enables us to force reconfigure automation agent when the MongoDB deployment +// is in a broken state - for ex: doesn't have a primary. +// More info: https://www.mongodb.com/docs/ops-manager/current/reference/api/automation-config/automation-config-parameters/#replica-sets +type ReplSetForceConfig struct { + CurrentVersion int64 `json:"currentVersion"` +} + +type ReplicaSet struct { + Id string `json:"_id"` + Members []ReplicaSetMember `json:"members"` + ProtocolVersion string `json:"protocolVersion"` + NumberArbiters int `json:"numberArbiters"` + Force *ReplSetForceConfig `json:"force,omitempty"` + Settings map[string]interface{} `json:"settings,omitempty"` +} + +type ReplicaSetMember struct { + Id int `json:"_id"` + Host string `json:"host"` + ArbiterOnly bool `json:"arbiterOnly"` + Horizons ReplicaSetHorizons `json:"horizons,omitempty"` + // this is duplicated here instead of using MemberOptions because type of priority + // is different in AC from the CR(CR don't support float) - hence all the members are declared + // separately + Votes *int `json:"votes,omitempty"` + Priority *float32 `json:"priority,omitempty"` + Tags map[string]string `json:"tags,omitempty"` +} + +type ReplicaSetHorizons map[string]string + +// newReplicaSetMember returns a ReplicaSetMember. +func newReplicaSetMember(name string, id int, horizons ReplicaSetHorizons, isArbiter bool, isVotingMember bool) ReplicaSetMember { + // ensure that the number of voting members in the replica set is not more than 7 + // as this is the maximum number of voting members. + votes := 0 + priority := float32(0.0) + + if isVotingMember { + votes = 1 + priority = 1 + } + + return ReplicaSetMember{ + Id: id, + Host: name, + ArbiterOnly: isArbiter, + Horizons: horizons, + Votes: &votes, + Priority: &priority, + } +} + type Auth struct { // Users is a list which contains the desired users at the project level. Users []MongoDBUser `json:"usersWanted,omitempty"` @@ -21,8 +320,7 @@ type Auth struct { // AutoAuthMechanisms is a list of auth mechanisms the Automation Agent is able to use AutoAuthMechanisms []string `json:"autoAuthMechanisms,omitempty"` - // AutoAuthMechanism is the currently active agent authentication mechanism. This is a read only - // field + // AutoAuthMechanism is the currently active agent authentication mechanism. This is a read only field AutoAuthMechanism string `json:"autoAuthMechanism"` // DeploymentAuthMechanisms is a list of possible auth mechanisms that can be used within deployments DeploymentAuthMechanisms []string `json:"deploymentAuthMechanisms,omitempty"` @@ -36,136 +334,114 @@ type Auth struct { KeyFileWindows string `json:"keyfileWindows,omitempty"` // AutoPwd is a required field when going from `Disabled=false` to `Disabled=true` AutoPwd string `json:"autoPwd,omitempty"` + // UsersDeleted is an array of DeletedUser objects that define the authenticated users to be deleted from specified databases + UsersDeleted []DeletedUser `json:"usersDeleted,omitempty"` } -func DisabledAuth() Auth { - return Auth{ - Users: make([]MongoDBUser, 0), - AutoAuthMechanisms: make([]string, 0), - DeploymentAuthMechanisms: make([]string, 0), - AutoAuthMechanism: "MONGODB-CR", - Disabled: true, - } -} - -type MongoDBUser struct { +type DeletedUser struct { + // User is the username that should be deleted + User string `json:"user,omitempty"` + // Dbs is the array of database names from which the authenticated user should be deleted + Dbs []string `json:"dbs,omitempty"` } -type Process struct { - Name string `json:"name"` - HostName string `json:"hostname"` - Args26 Args26 `json:"args2_6"` - FeatureCompatibilityVersion string `json:"featureCompatibilityVersion"` - ProcessType ProcessType `json:"processType"` - Version string `json:"version"` - AuthSchemaVersion int `json:"authSchemaVersion"` - SystemLog SystemLog `json:"systemLog"` - WiredTiger WiredTiger `json:"wiredTiger"` +type Prometheus struct { + Enabled bool `json:"enabled"` + Username string `json:"username"` + Password string `json:"password,omitempty"` + PasswordHash string `json:"passwordHash,omitempty"` + PasswordSalt string `json:"passwordSalt,omitempty"` + Scheme string `json:"scheme"` + TLSPemPath string `json:"tlsPemPath"` + TLSPemPassword string `json:"tlsPemPassword"` + Mode string `json:"mode"` + ListenAddress string `json:"listenAddress"` + MetricsPath string `json:"metricsPath"` } -type SystemLog struct { - Destination string `json:"destination"` - Path string `json:"path"` -} - -func newProcess(name, hostName, version, replSetName string, opts ...func(process *Process)) Process { - p := Process{ - Name: name, - HostName: hostName, - FeatureCompatibilityVersion: "4.0", - ProcessType: Mongod, - Version: version, - SystemLog: SystemLog{ - Destination: "file", - Path: path.Join(DefaultAgentLogPath, "/mongodb.log"), - }, - AuthSchemaVersion: 5, - Args26: Args26{ - Net: Net{ - Port: 27017, - }, - Storage: Storage{ - DBPath: DefaultMongoDBDataDir, - }, - Replication: Replication{ReplicaSetName: replSetName}, - }, - } - - for _, opt := range opts { - opt(&p) +func NewDefaultPrometheus(username string) Prometheus { + return Prometheus{ + Enabled: true, + Username: username, + Scheme: "http", + Mode: "opsManager", + ListenAddress: "0.0.0.0:9216", + MetricsPath: "/metrics", } - - return p } -type Replication struct { - ReplicaSetName string `json:"replSetName"` +type CustomRole struct { + Role string `json:"role"` + DB string `json:"db"` + Privileges []Privilege `json:"privileges"` + Roles []Role `json:"roles"` + AuthenticationRestrictions []AuthenticationRestriction `json:"authenticationRestrictions,omitempty"` } -type Storage struct { - DBPath string `json:"dbPath"` +type Privilege struct { + Resource Resource `json:"resource"` + Actions []string `json:"actions"` } -type WiredTiger struct { - EngineConfig EngineConfig `json:"engineConfig"` +type Resource struct { + DB *string `json:"db,omitempty"` + Collection *string `json:"collection,omitempty"` + AnyResource bool `json:"anyResource,omitempty"` + Cluster bool `json:"cluster,omitempty"` } -type EngineConfig struct { - CacheSizeGB float32 `json:"cacheSizeGB"` +type AuthenticationRestriction struct { + ClientSource []string `json:"clientSource"` + ServerAddress []string `json:"serverAddress"` } -type LogRotate struct { - SizeThresholdMB int `json:"sizeThresholdMB"` - TimeThresholdHrs int `json:"timeThresholdHrs"` -} +type MongoDBUser struct { + Mechanisms []string `json:"mechanisms"` + Roles []Role `json:"roles"` + Username string `json:"user"` + Database string `json:"db"` + AuthenticationRestrictions []string `json:"authenticationRestrictions"` -type Args26 struct { - Net Net `json:"net"` - Security Security `json:"security"` - Storage Storage `json:"storage"` - Replication Replication `json:"replication"` + // ScramShaCreds are generated by the operator. + ScramSha256Creds *scramcredentials.ScramCreds `json:"scramSha256Creds,omitempty"` + ScramSha1Creds *scramcredentials.ScramCreds `json:"scramSha1Creds,omitempty"` } -type Net struct { - Port int `json:"port"` +type Role struct { + Role string `json:"role"` + Database string `json:"db"` } -type Security struct { - ClusterAuthMode string `json:"clusterAuthMode,omitempty"` +func disabledAuth() Auth { + return Auth{ + Users: make([]MongoDBUser, 0), + AutoAuthMechanisms: make([]string, 0), + DeploymentAuthMechanisms: make([]string, 0), + AutoAuthMechanism: "MONGODB-CR", + Disabled: true, + } } -type ReplicaSet struct { - Id string `json:"_id"` - Members []ReplicaSetMember `json:"members"` - ProtocolVersion string `json:"protocolVersion"` -} +type ClientCertificateMode string -type ReplicaSetMember struct { - Id int `json:"_id"` - Host string `json:"host"` - Priority int `json:"priority"` - ArbiterOnly bool `json:"arbiterOnly"` - Votes int `json:"votes"` -} +const ( + ClientCertificateModeOptional ClientCertificateMode = "OPTIONAL" + ClientCertificateModeRequired ClientCertificateMode = "REQUIRE" +) -func newReplicaSetMember(p Process, id int) ReplicaSetMember { - return ReplicaSetMember{ - Id: id, - Host: p.Name, - Priority: 1, - ArbiterOnly: false, - Votes: 1, - } +type TLS struct { + CAFilePath string `json:"CAFilePath"` + AutoPEMKeyFilePath string `json:"autoPEMKeyFilePath,omitempty"` + ClientCertificateMode ClientCertificateMode `json:"clientCertificateMode"` } -type AutomationConfig struct { - Version int `json:"version"` - Processes []Process `json:"processes"` - ReplicaSets []ReplicaSet `json:"replicaSets"` - Auth Auth `json:"auth"` +type ToolsVersion struct { + Version string `json:"version"` + URLs map[string]map[string]string `json:"urls"` +} - Versions []MongoDbVersionConfig `json:"mongoDbVersions"` - Options Options `json:"options"` +type Options struct { + DownloadBase string `json:"downloadBase"` } type VersionManifest struct { @@ -173,27 +449,6 @@ type VersionManifest struct { Versions []MongoDbVersionConfig `json:"versions"` } -// BuildsForVersion returns the MongoDbVersionConfig containing all of the version informatioon -// for the given mongodb version provided -func (v VersionManifest) BuildsForVersion(version string) MongoDbVersionConfig { - var builds []BuildConfig - for _, versionConfig := range v.Versions { - if versionConfig.Name != version { - continue - } - builds = versionConfig.Builds - break - } - return MongoDbVersionConfig{ - Name: version, - Builds: builds, - } -} - -type Options struct { - DownloadBase string `json:"downloadBase"` -} - type BuildConfig struct { Platform string `json:"platform"` Url string `json:"url"` @@ -210,3 +465,49 @@ type MongoDbVersionConfig struct { Name string `json:"name"` Builds []BuildConfig `json:"builds"` } + +// AreEqual returns whether the given AutomationConfigs have the same contents. +// the comparison does not take the version into account. +func AreEqual(ac0, ac1 AutomationConfig) (bool, error) { + // Here we compare the bytes of the two automationconfigs, + // we can't use reflect.DeepEqual() as it treats nil entries as different from empty ones, + // and in the AutomationConfig Struct we use omitempty to set empty field to nil + // The agent requires the nil value we provide, otherwise the agent attempts to configure authentication. + ac0.Version = ac1.Version + ac0Bytes, err := json.Marshal(ac0) + if err != nil { + return false, err + } + + ac1Bytes, err := json.Marshal(ac1) + if err != nil { + return false, err + } + return bytes.Equal(ac0Bytes, ac1Bytes), nil +} + +func FromBytes(acBytes []byte) (AutomationConfig, error) { + ac := AutomationConfig{} + if err := json.Unmarshal(acBytes, &ac); err != nil { + return AutomationConfig{}, err + } + return ac, nil +} + +func ConfigureAgentConfiguration(systemLog *SystemLog, logRotate *CrdLogRotate, auditLR *CrdLogRotate, p *Process) { + if systemLog != nil { + p.SetSystemLog(*systemLog) + } + + if logRotate != nil { + if systemLog == nil { + zap.S().Warn("Configuring LogRotate without systemLog will not work") + } + if systemLog != nil && systemLog.Destination == Syslog { + zap.S().Warn("Configuring LogRotate with systemLog.Destination = Syslog will not work") + } + p.SetLogRotate(logRotate) + p.SetAuditLogRotate(auditLR) + } + +} diff --git a/pkg/automationconfig/automation_config_builder.go b/pkg/automationconfig/automation_config_builder.go index 71562c8c9..3091734dc 100644 --- a/pkg/automationconfig/automation_config_builder.go +++ b/pkg/automationconfig/automation_config_builder.go @@ -2,63 +2,164 @@ package automationconfig import ( "fmt" + "reflect" + "strings" + + "github.com/blang/semver" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/versions" + + "k8s.io/utils/ptr" ) type Topology string const ( - ReplicaSetTopology Topology = "ReplicaSet" + ReplicaSetTopology Topology = "ReplicaSet" + maxVotingMembers int = 7 + arbitersStartingIndex int = 100 ) -type Builder struct { - processes []Process - replicaSets []ReplicaSet - version int - auth Auth - members int - domain string - name string - fcv string - topology Topology - mongodbVersion string +type Modification func(*AutomationConfig) + +func NOOP() Modification { + return func(config *AutomationConfig) {} +} +type Builder struct { + processes []Process + replicaSets []ReplicaSet + replicaSetHorizons []ReplicaSetHorizons + members int + arbiters int + domain string + arbiterDomain string + name string + fcv string + topology Topology + isEnterprise bool + mongodbVersion string + previousAC AutomationConfig // MongoDB installable versions - versions []MongoDbVersionConfig + versions []MongoDbVersionConfig + backupVersions []BackupVersion + monitoringVersions []MonitoringVersion + options Options + processModifications []func(int, *Process) + modifications []Modification + auth *Auth + cafilePath string + sslConfig *TLS + tlsConfig *TLS + dataDir string + port int + memberOptions []MemberOptions + forceReconfigureToVersion *int64 + replicaSetId *string + settings map[string]interface{} } func NewBuilder() *Builder { return &Builder{ - processes: []Process{}, - replicaSets: []ReplicaSet{}, - versions: []MongoDbVersionConfig{}, + processes: []Process{}, + replicaSets: []ReplicaSet{}, + versions: []MongoDbVersionConfig{}, + modifications: []Modification{}, + backupVersions: []BackupVersion{}, + monitoringVersions: []MonitoringVersion{}, + processModifications: []func(int, *Process){}, + tlsConfig: nil, + sslConfig: nil, } } +func (b *Builder) SetMemberOptions(memberOptions []MemberOptions) *Builder { + b.memberOptions = memberOptions + return b +} + +func (b *Builder) SetOptions(options Options) *Builder { + b.options = options + return b +} + +func (b *Builder) IsEnterprise(isEnterprise bool) *Builder { + b.isEnterprise = isEnterprise + return b +} + func (b *Builder) SetTopology(topology Topology) *Builder { b.topology = topology return b } +func (b *Builder) SetReplicaSetHorizons(horizons []ReplicaSetHorizons) *Builder { + b.replicaSetHorizons = horizons + return b +} + +func (b *Builder) SetTLSConfig(tlsConfig TLS) *Builder { + b.tlsConfig = &tlsConfig + return b +} + +func (b *Builder) SetSSLConfig(sslConfig TLS) *Builder { + b.sslConfig = &sslConfig + return b +} + func (b *Builder) SetMembers(members int) *Builder { b.members = members return b } +func (b *Builder) SetArbiters(arbiters int) *Builder { + b.arbiters = arbiters + return b +} + func (b *Builder) SetDomain(domain string) *Builder { b.domain = domain return b } +func (b *Builder) SetArbiterDomain(domain string) *Builder { + b.arbiterDomain = domain + return b +} + func (b *Builder) SetName(name string) *Builder { b.name = name return b } +func (b *Builder) SetDataDir(dataDir string) *Builder { + b.dataDir = dataDir + return b +} + +// Deprecated: ports should be set via ProcessModification or Modification +func (b *Builder) SetPort(port int) *Builder { + b.port = port + return b +} + func (b *Builder) SetFCV(fcv string) *Builder { b.fcv = fcv return b } +func (b *Builder) SetCAFilePath(caFilePath string) *Builder { + b.cafilePath = caFilePath + return b +} + +func (b *Builder) AddVersions(versions []MongoDbVersionConfig) *Builder { + for _, v := range versions { + b.AddVersion(v) + } + return b +} + func (b *Builder) AddVersion(version MongoDbVersionConfig) *Builder { for idx := range version.Builds { if version.Builds[idx].Modules == nil { @@ -74,48 +175,318 @@ func (b *Builder) SetMongoDBVersion(version string) *Builder { return b } -func (b *Builder) SetAutomationConfigVersion(version int) *Builder { - b.version = version +func (b *Builder) SetBackupVersions(versions []BackupVersion) *Builder { + b.backupVersions = versions return b } -func (b *Builder) Build() AutomationConfig { - hostnames := make([]string, b.members) +func (b *Builder) SetMonitoringVersions(versions []MonitoringVersion) *Builder { + b.monitoringVersions = versions + return b +} + +func (b *Builder) SetPreviousAutomationConfig(previousAC AutomationConfig) *Builder { + b.previousAC = previousAC + return b +} + +func (b *Builder) SetAuth(auth Auth) *Builder { + b.auth = &auth + return b +} + +func (b *Builder) SetReplicaSetId(id *string) *Builder { + b.replicaSetId = id + return b +} + +func (b *Builder) SetSettings(settings map[string]interface{}) *Builder { + b.settings = settings + return b +} + +func (b *Builder) SetForceReconfigureToVersion(version int64) *Builder { + b.forceReconfigureToVersion = &version + return b +} + +func (b *Builder) AddProcessModification(f func(int, *Process)) *Builder { + b.processModifications = append(b.processModifications, f) + return b +} + +func (b *Builder) AddModifications(mod ...Modification) *Builder { + b.modifications = append(b.modifications, mod...) + return b +} + +func (b *Builder) setFeatureCompatibilityVersionIfUpgradeIsHappening() error { + // If we are upgrading, we can't increase featureCompatibilityVersion + // as that will make the agent never reach goal state + if len(b.previousAC.Processes) > 0 && b.fcv == "" { + + // Create a x.y.0 version from FCV x.y + previousFCV := b.previousAC.Processes[0].FeatureCompatibilityVersion + previousFCVsemver, err := semver.Make(fmt.Sprintf("%s.0", previousFCV)) + if err != nil { + return fmt.Errorf("can't compute semver version from previous FeatureCompatibilityVersion %s", previousFCV) + } + + currentVersionSemver, err := semver.Make(b.mongodbVersion) + if err != nil { + return fmt.Errorf("current MongoDB version is not a valid semver version: %s", b.mongodbVersion) + } + + // We would increase FCV here. + // Note: in theory this will also catch upgrade like 4.2.0 -> 4.2.1 but we don't care about those + // as they would not change the FCV + if currentVersionSemver.GT(previousFCVsemver) { + b.fcv = previousFCV + } + } + return nil +} + +func (b *Builder) Build() (AutomationConfig, error) { + if err := b.setFeatureCompatibilityVersionIfUpgradeIsHappening(); err != nil { + return AutomationConfig{}, fmt.Errorf("can't build the automation config: %s", err) + } + + hostnames := make([]string, 0, b.members+b.arbiters) + + // Create hostnames for data-bearing nodes. They start from 0 for i := 0; i < b.members; i++ { - hostnames[i] = fmt.Sprintf("%s-%d.%s", b.name, i, b.domain) + hostnames = append(hostnames, fmt.Sprintf("%s-%d.%s", b.name, i, b.domain)) + } + + // Create hostnames for arbiters. They are added right after the regular members + for i := 0; i < b.arbiters; i++ { + // Arbiters will be in b.name-arb-svc service + hostnames = append(hostnames, fmt.Sprintf("%s-arb-%d.%s", b.name, i, b.arbiterDomain)) + } + + members := make([]ReplicaSetMember, b.members+b.arbiters) + processes := make([]Process, b.members+b.arbiters) + + if b.fcv != "" { + _, err := semver.Make(fmt.Sprintf("%s.0", b.fcv)) + + if err != nil { + return AutomationConfig{}, fmt.Errorf("invalid feature compatibility version: %s", err) + } + } + + if err := b.setFeatureCompatibilityVersionIfUpgradeIsHappening(); err != nil { + return AutomationConfig{}, fmt.Errorf("can't build the automation config: %s", err) + } + + dataDir := DefaultMongoDBDataDir + if b.dataDir != "" { + dataDir = b.dataDir + } + + fcv := versions.CalculateFeatureCompatibilityVersion(b.mongodbVersion) + if len(b.fcv) > 0 { + fcv = b.fcv + } + + mongoDBVersion := b.mongodbVersion + if b.isEnterprise { + if !strings.HasSuffix(mongoDBVersion, "-ent") { + mongoDBVersion = mongoDBVersion + "-ent" + } } - members := make([]ReplicaSetMember, b.members) - processes := make([]Process, b.members) for i, h := range hostnames { - process := newProcess(toHostName(b.name, i), h, b.mongodbVersion, b.name, withFCV(b.fcv)) - processes[i] = process - members[i] = newReplicaSetMember(process, i) + // Arbiters start counting from b.members and up + isArbiter := i >= b.members + replicaSetIndex := i + processIndex := i + + if isArbiter { + processIndex = i - b.members + // The arbiter's index will start on `arbitersStartingIndex` and increase + // from there. These ids must be kept constant if the data-bearing nodes + // change indexes, if for instance, they are scaled up and down. + // + replicaSetIndex = arbitersStartingIndex + processIndex + } + + // TODO: Replace with a Builder for Process. + process := &Process{ + Name: toProcessName(b.name, processIndex, isArbiter), + HostName: h, + FeatureCompatibilityVersion: fcv, + ProcessType: Mongod, + Version: mongoDBVersion, + AuthSchemaVersion: 5, + } + + // ports should be change via ProcessModification or Modification + // left for backwards compatibility, to be removed in the future + if b.port != 0 { + process.SetPort(b.port) + } + process.SetStoragePath(dataDir) + process.SetReplicaSetName(b.name) + + for _, mod := range b.processModifications { + mod(i, process) + } + + // ensure it has port set + if process.GetPort() == 0 { + process.SetPort(DefaultDBPort) + } + + processes[i] = *process + + var horizon ReplicaSetHorizons + if b.replicaSetHorizons != nil && i < len(b.replicaSetHorizons) { + horizon = b.replicaSetHorizons[i] + } + + // Arbiters can't be non-voting members + // If there are more than 7 (maxVotingMembers) members on this Replica Set + // those that lose right to vote should be the data-bearing nodes, not the + // arbiters. + isVotingMember := isArbiter || i < (maxVotingMembers-b.arbiters) + + // TODO: Replace with a Builder for ReplicaSetMember. + members[i] = newReplicaSetMember(process.Name, replicaSetIndex, horizon, isArbiter, isVotingMember) + + if len(b.memberOptions) > i { + // override the member options if explicitly specified in the spec + members[i].Votes = b.memberOptions[i].Votes + members[i].Priority = ptr.To(b.memberOptions[i].GetPriority()) + members[i].Tags = b.memberOptions[i].Tags + } } - return AutomationConfig{ - Version: b.version, + if b.auth == nil { + disabled := disabledAuth() + b.auth = &disabled + } + + dummyConfig := buildDummyMongoDbVersionConfig(mongoDBVersion) + if !versionsContain(b.versions, dummyConfig) { + b.versions = append(b.versions, dummyConfig) + } + + var replSetForceConfig *ReplSetForceConfig + if b.forceReconfigureToVersion != nil { + replSetForceConfig = &ReplSetForceConfig{CurrentVersion: *b.forceReconfigureToVersion} + } + + replicaSetId := b.name + if b.replicaSetId != nil { + replicaSetId = *b.replicaSetId + } + + currentAc := AutomationConfig{ + Version: b.previousAC.Version, Processes: processes, ReplicaSets: []ReplicaSet{ { - Id: b.name, + Id: replicaSetId, Members: members, ProtocolVersion: "1", + NumberArbiters: b.arbiters, + Force: replSetForceConfig, + Settings: b.settings, }, }, - Versions: b.versions, - Options: Options{DownloadBase: "/var/lib/mongodb-mms-automation"}, - Auth: DisabledAuth(), + MonitoringVersions: b.monitoringVersions, + BackupVersions: b.backupVersions, + Versions: b.versions, + Options: b.options, + Auth: *b.auth, + TLSConfig: &TLS{ + ClientCertificateMode: ClientCertificateModeOptional, + CAFilePath: b.cafilePath, + }, + } + + if b.tlsConfig != nil { + currentAc.TLSConfig = b.tlsConfig + } + + if b.sslConfig != nil { + currentAc.SSLConfig = b.sslConfig + } + + // Apply all modifications + for _, modification := range b.modifications { + modification(¤tAc) + } + + areEqual, err := AreEqual(b.previousAC, currentAc) + if err != nil { + return AutomationConfig{}, err + } + + if !areEqual { + currentAc.Version++ } + + return currentAc, nil } -func toHostName(name string, index int) string { +func toProcessName(name string, index int, isArbiter bool) string { + if isArbiter { + return fmt.Sprintf("%s-arb-%d", name, index) + } return fmt.Sprintf("%s-%d", name, index) } -// Process functional options -func withFCV(fcv string) func(*Process) { - return func(process *Process) { - process.FeatureCompatibilityVersion = fcv +func versionsContain(versions []MongoDbVersionConfig, version MongoDbVersionConfig) bool { + for _, v := range versions { + if reflect.DeepEqual(v, version) { + return true + } + } + return false +} + +// buildDummyMongoDbVersionConfig create a MongoDbVersionConfig which +// will be valid for any version of MongoDB. This is used as a default if no +// versions are manually specified. +func buildDummyMongoDbVersionConfig(version string) MongoDbVersionConfig { + versionConfig := MongoDbVersionConfig{ + Name: version, + Builds: []BuildConfig{ + { + Platform: "linux", + Architecture: "amd64", + Flavor: "rhel", + Modules: []string{}, + }, + { + Platform: "linux", + Architecture: "amd64", + Flavor: "ubuntu", + Modules: []string{}, + }, + { + Platform: "linux", + Architecture: "aarch64", + Flavor: "ubuntu", + Modules: []string{}, + }, + { + Platform: "linux", + Architecture: "aarch64", + Flavor: "rhel", + Modules: []string{}, + }, + }, + } + + // if we are using an enterprise version of MongoDB, we need to add the enterprise string to the modules array. + if strings.HasSuffix(version, "-ent") { + for i := range versionConfig.Builds { + versionConfig.Builds[i].Modules = append(versionConfig.Builds[i].Modules, "enterprise") + } } + return versionConfig } diff --git a/pkg/automationconfig/automation_config_secret.go b/pkg/automationconfig/automation_config_secret.go new file mode 100644 index 000000000..9ca6ed469 --- /dev/null +++ b/pkg/automationconfig/automation_config_secret.go @@ -0,0 +1,86 @@ +package automationconfig + +import ( + "context" + "encoding/json" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ConfigKey = "cluster-config.json" + +// ReadFromSecret returns the AutomationConfig present in the given Secret. If the Secret is not +// found, it is not considered an error and an empty AutomationConfig is returned. +func ReadFromSecret(ctx context.Context, secretGetter secret.Getter, secretNsName types.NamespacedName) (AutomationConfig, error) { + acSecret, err := secretGetter.GetSecret(ctx, secretNsName) + if err != nil { + if secret.SecretNotExist(err) { + err = nil + } + return AutomationConfig{}, err + } + return FromBytes(acSecret.Data[ConfigKey]) +} + +// EnsureSecret makes sure that the AutomationConfig secret exists with the desired config. +// if the desired config is the same as the current contents, no change is made. +// The most recent AutomationConfig is returned. If no change is made, it will return the existing one, if there +// is a change, the new AutomationConfig is returned. +func EnsureSecret(ctx context.Context, secretGetUpdateCreator secret.GetUpdateCreator, secretNsName types.NamespacedName, owner []metav1.OwnerReference, desiredAutomationConfig AutomationConfig) (AutomationConfig, error) { + existingSecret, err := secretGetUpdateCreator.GetSecret(ctx, secretNsName) + if err != nil { + if secret.SecretNotExist(err) { + return createNewAutomationConfigSecret(ctx, secretGetUpdateCreator, secretNsName, owner, desiredAutomationConfig) + } + return AutomationConfig{}, err + } + + acBytes, err := json.Marshal(desiredAutomationConfig) + if err != nil { + return AutomationConfig{}, err + } + if existingAcBytes, ok := existingSecret.Data[ConfigKey]; !ok { + // the secret exists but the key is not present. We can update the secret + existingSecret.Data[ConfigKey] = acBytes + } else { + // the secret already exists, we should check to see if we're making any changes. + existingAutomationConfig, err := FromBytes(existingAcBytes) + if err != nil { + return AutomationConfig{}, err + } + // we are attempting to update with the same version, no change is required. + areEqual, err := AreEqual(desiredAutomationConfig, existingAutomationConfig) + if err != nil { + return AutomationConfig{}, err + } + if areEqual { + return existingAutomationConfig, nil + } + existingSecret.Data[ConfigKey] = acBytes + } + + existingSecret.Name = secretNsName.Name + existingSecret.Namespace = secretNsName.Namespace + return desiredAutomationConfig, secretGetUpdateCreator.UpdateSecret(ctx, existingSecret) +} + +func createNewAutomationConfigSecret(ctx context.Context, secretGetUpdateCreator secret.GetUpdateCreator, secretNsName types.NamespacedName, owner []metav1.OwnerReference, desiredAutomation AutomationConfig) (AutomationConfig, error) { + acBytes, err := json.Marshal(desiredAutomation) + if err != nil { + return AutomationConfig{}, err + } + + newSecret := secret.Builder(). + SetName(secretNsName.Name). + SetNamespace(secretNsName.Namespace). + SetField(ConfigKey, string(acBytes)). + SetOwnerReferences(owner). + Build() + + if err := secretGetUpdateCreator.CreateSecret(ctx, newSecret); err != nil { + return AutomationConfig{}, err + } + return desiredAutomation, nil +} diff --git a/pkg/automationconfig/automation_config_secret_test.go b/pkg/automationconfig/automation_config_secret_test.go new file mode 100644 index 000000000..ed9a4af77 --- /dev/null +++ b/pkg/automationconfig/automation_config_secret_test.go @@ -0,0 +1,180 @@ +package automationconfig + +import ( + "context" + "encoding/json" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestEnsureSecret(t *testing.T) { + ctx := context.Background() + secretNsName := types.NamespacedName{Name: "ac-secret", Namespace: "test-namespace"} + desiredAutomationConfig, err := newAutomationConfig() + assert.NoError(t, err) + + t.Run("When the secret exists, but does not have the correct key, it is created correctly", func(t *testing.T) { + + s := secret.Builder(). + SetName(secretNsName.Name). + SetNamespace(secretNsName.Namespace). + Build() + + secretGetUpdateCreator := &mockSecretGetUpdateCreator{secret: &s} + + ac, err := EnsureSecret(ctx, secretGetUpdateCreator, secretNsName, []metav1.OwnerReference{}, desiredAutomationConfig) + assert.NoError(t, err) + assert.Equal(t, desiredAutomationConfig, ac, "The config should be returned if there is not one currently.") + + acSecret, err := secretGetUpdateCreator.GetSecret(ctx, secretNsName) + assert.NoError(t, err) + + assert.Contains(t, acSecret.Data, ConfigKey, "The secret of the given name should have been updated with the config.") + + }) + + t.Run("test LogRotate marshal and unmarshal", func(t *testing.T) { + ctx := context.Background() + + desiredAutomationConfig, err = NewBuilder().SetMembers(3).AddProcessModification(func(i_ int, p *Process) { + lr := &CrdLogRotate{ + SizeThresholdMB: "0.001", + LogRotate: LogRotate{ + TimeThresholdHrs: 1, + NumUncompressed: 1, + NumTotal: 1, + IncludeAuditLogsWithMongoDBLogs: false, + }, + PercentOfDiskspace: "1", + } + p.SetLogRotate(lr) + p.SetAuditLogRotate(lr) + }).Build() + assert.NoError(t, err) + + s := secret.Builder(). + SetName(secretNsName.Name). + SetNamespace(secretNsName.Namespace). + Build() + + secretGetUpdateCreator := &mockSecretGetUpdateCreator{secret: &s} + + ac, err := EnsureSecret(ctx, secretGetUpdateCreator, secretNsName, []metav1.OwnerReference{}, desiredAutomationConfig) + assert.NoError(t, err) + assert.Equal(t, desiredAutomationConfig, ac, "The config should be returned if there is not one currently.") + + bytes := s.Data[ConfigKey] + acFromBytes, err := FromBytes(bytes) + assert.NoError(t, err) + assert.Equal(t, 0.001, acFromBytes.Processes[0].LogRotate.SizeThresholdMB) + assert.Equal(t, 0.001, acFromBytes.Processes[0].AuditLogRotate.SizeThresholdMB) + assert.Equal(t, float64(1), acFromBytes.Processes[0].LogRotate.PercentOfDiskspace) + assert.Equal(t, float64(1), acFromBytes.Processes[0].AuditLogRotate.PercentOfDiskspace) + }) + + t.Run("test LogRotate marshal and unmarshal if not set", func(t *testing.T) { + ctx := context.Background() + + desiredAutomationConfig, err = NewBuilder().SetMembers(3).AddProcessModification(func(i_ int, p *Process) {}).Build() + assert.NoError(t, err) + + s := secret.Builder(). + SetName(secretNsName.Name). + SetNamespace(secretNsName.Namespace). + Build() + + secretGetUpdateCreator := &mockSecretGetUpdateCreator{secret: &s} + + ac, err := EnsureSecret(ctx, secretGetUpdateCreator, secretNsName, []metav1.OwnerReference{}, desiredAutomationConfig) + assert.NoError(t, err) + assert.Equal(t, desiredAutomationConfig, ac, "The config should be returned if there is not one currently.") + + bytes := s.Data[ConfigKey] + acFromBytes, err := FromBytes(bytes) + assert.NoError(t, err) + assert.NotEqual(t, &AcLogRotate{}, acFromBytes.Processes[0].LogRotate) + assert.Nil(t, acFromBytes.Processes[0].LogRotate) + }) + + t.Run("When the existing Automation Config is different the Automation Config Changes", func(t *testing.T) { + + oldAc, err := newAutomationConfig() + assert.NoError(t, err) + existingSecret, err := newAutomationConfigSecret(oldAc, secretNsName) + assert.NoError(t, err) + + secretGetUpdateCreator := &mockSecretGetUpdateCreator{secret: &existingSecret} + + newAc, err := newAutomationConfigBuilder().SetDomain("different-domain").Build() + assert.NoError(t, err) + + res, err := EnsureSecret(ctx, secretGetUpdateCreator, secretNsName, []metav1.OwnerReference{}, newAc) + assert.NoError(t, err) + assert.Equal(t, newAc, res) + + }) + +} +func newAutomationConfig() (AutomationConfig, error) { + return NewBuilder().Build() +} + +func newAutomationConfigBuilder() *Builder { + return NewBuilder().SetName("test-name").SetMembers(3).SetDomain("some-domain") +} + +func newAutomationConfigSecret(ac AutomationConfig, nsName types.NamespacedName) (corev1.Secret, error) { + acBytes, err := json.Marshal(ac) + if err != nil { + return corev1.Secret{}, err + } + + return secret.Builder(). + SetName(nsName.Name). + SetNamespace(nsName.Namespace). + SetField(ConfigKey, string(acBytes)). + Build(), nil + +} + +type mockSecretGetUpdateCreator struct { + secret *corev1.Secret +} + +func (m *mockSecretGetUpdateCreator) GetSecret(ctx context.Context, objectKey client.ObjectKey) (corev1.Secret, error) { + if m.secret != nil { + if objectKey.Name == m.secret.Name && objectKey.Namespace == m.secret.Namespace { + return *m.secret, nil + } + } + return corev1.Secret{}, notFoundError() +} + +func (m *mockSecretGetUpdateCreator) UpdateSecret(ctx context.Context, secret corev1.Secret) error { + m.secret = &secret + return nil +} + +func (m *mockSecretGetUpdateCreator) CreateSecret(ctx context.Context, secret corev1.Secret) error { + if m.secret == nil { + m.secret = &secret + return nil + } + return alreadyExistsError() +} + +// notFoundError returns an error which returns true for "errors.IsNotFound" +func notFoundError() error { + return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}} +} + +func alreadyExistsError() error { + return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists}} +} diff --git a/pkg/automationconfig/automation_config_test.go b/pkg/automationconfig/automation_config_test.go index 7a9bb9079..19b3bcfe8 100644 --- a/pkg/automationconfig/automation_config_test.go +++ b/pkg/automationconfig/automation_config_test.go @@ -1,9 +1,12 @@ package automationconfig import ( + "encoding/json" "fmt" "testing" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/assert" ) @@ -25,55 +28,253 @@ func defaultMongoDbVersion(version string) MongoDbVersionConfig { } func TestBuildAutomationConfig(t *testing.T) { - - ac := NewBuilder(). + builder := NewBuilder(). SetName("my-rs"). SetDomain("my-ns.svc.cluster.local"). SetMongoDBVersion("4.2.0"). - SetAutomationConfigVersion(1). SetMembers(3). SetFCV("4.0"). - Build() + SetForceReconfigureToVersion(-1) + + ac, err := builder.Build() + assert.NoError(t, err) assert.Len(t, ac.Processes, 3) assert.Equal(t, 1, ac.Version) for i, p := range ac.Processes { assert.Equal(t, Mongod, p.ProcessType) assert.Equal(t, fmt.Sprintf("my-rs-%d.my-ns.svc.cluster.local", i), p.HostName) - assert.Equal(t, DefaultMongoDBDataDir, p.Args26.Storage.DBPath) - assert.Equal(t, "my-rs", p.Args26.Replication.ReplicaSetName, "replication should be configured based on the replica set name provided") - assert.Equal(t, toHostName("my-rs", i), p.Name) + assert.Equal(t, DefaultMongoDBDataDir, p.Args26.Get("storage.dbPath").Data()) + assert.Equal(t, "my-rs", p.Args26.Get("replication.replSetName").Data()) + assert.Equal(t, toProcessName("my-rs", i, false), p.Name) assert.Equal(t, "4.2.0", p.Version) assert.Equal(t, "4.0", p.FeatureCompatibilityVersion) } + assert.Empty(t, ac.TLSConfig.CAFilePath, "the config shouldn't have a trusted CA") + assert.Len(t, ac.ReplicaSets, 1) rs := ac.ReplicaSets[0] assert.Equal(t, rs.Id, "my-rs", "The name provided should be configured to be the rs id") assert.Len(t, rs.Members, 3, "there should be the number of replicas provided") + require.NotNil(t, rs.Force) + assert.Equal(t, ReplSetForceConfig{CurrentVersion: -1}, *rs.Force) for i, member := range rs.Members { - assert.Equal(t, 1, member.Votes) + assert.Equal(t, 1, *member.Votes) assert.False(t, member.ArbiterOnly) assert.Equal(t, i, member.Id) assert.Equal(t, ac.Processes[i].Name, member.Host) } + + builder.SetForceReconfigureToVersion(1) + ac, err = builder.Build() + assert.NoError(t, err) + rs = ac.ReplicaSets[0] + require.NotNil(t, rs.Force) + assert.Equal(t, ReplSetForceConfig{CurrentVersion: 1}, *rs.Force) } -func TestMongoDbVersions(t *testing.T) { +func TestBuildAutomationConfigArbiters(t *testing.T) { + // Test no arbiter (field specified) + numArbiters := 0 + numMembers := 4 + ac, err := NewBuilder(). + SetMembers(numMembers). + SetArbiters(numArbiters). + Build() + + assert.NoError(t, err) + + rs := ac.ReplicaSets[0] + for _, member := range rs.Members { + assert.False(t, member.ArbiterOnly, "No member should be an arbiter") + } + + // Test no arbiter (field NOT specified) + ac, err = NewBuilder(). + SetMembers(numMembers). + Build() + + assert.NoError(t, err) + + rs = ac.ReplicaSets[0] + for _, member := range rs.Members { + assert.False(t, member.ArbiterOnly, "No member should be an arbiter") + } + + // Test only one arbiter + numArbiters = 1 + numMembers = 4 + ac, err = NewBuilder(). + SetMembers(numMembers). + SetArbiters(numArbiters). + Build() + + assert.NoError(t, err) + + rs = ac.ReplicaSets[0] + assert.Len(t, rs.Members, numMembers+numArbiters) + assert.False(t, rs.Members[0].ArbiterOnly) + assert.False(t, rs.Members[1].ArbiterOnly) + assert.False(t, rs.Members[2].ArbiterOnly) + assert.False(t, rs.Members[3].ArbiterOnly) + assert.True(t, rs.Members[4].ArbiterOnly) + + // Test with multiple arbiters + numArbiters = 2 + numMembers = 4 + ac, err = NewBuilder(). + SetMembers(numMembers). + SetArbiters(numArbiters). + Build() + + assert.NoError(t, err) + + rs = ac.ReplicaSets[0] + for i, member := range rs.Members { + if i < numMembers { + assert.False(t, member.ArbiterOnly, "First members should not be arbiters") + } else { + assert.True(t, member.ArbiterOnly, "Last members should be arbiters") + assert.Equal(t, member.Id, 100+i-numMembers) + } + } + + // Test arbiters should be able to vote + numArbiters = 2 + numMembers = 10 + ac, err = NewBuilder(). + SetMembers(numMembers). + SetArbiters(numArbiters). + Build() + + assert.NoError(t, err) + + m := ac.ReplicaSets[0].Members + + // First 5 data-bearing nodes have votes + assert.Equal(t, 1, *m[0].Votes) + assert.Equal(t, 1, *m[1].Votes) + assert.Equal(t, 1, *m[2].Votes) + assert.Equal(t, 1, *m[3].Votes) + assert.Equal(t, 1, *m[4].Votes) + + // From 6th data-bearing nodes, they won'thave any votes + assert.Equal(t, 0, *m[5].Votes) + assert.Equal(t, 0, *m[6].Votes) + assert.Equal(t, 0, *m[7].Votes) + assert.Equal(t, 0, *m[8].Votes) + assert.Equal(t, 0, *m[9].Votes) + + // Arbiters always have votes + assert.Equal(t, 1, *m[10].Votes) + assert.Equal(t, 1, *m[11].Votes) +} + +func TestReplicaSetMultipleHorizonsScaleDown(t *testing.T) { + var expected ReplicaSetHorizons + + horizons := []ReplicaSetHorizons{ + { + "internal": "test-horizon-0", + "external": "test-horizon-0", + }, + { + "internal": "test-horizon-1", + "external": "test-horizon-1", + }, + { + "internal": "test-horizon-2", + "external": "test-horizon-2", + }, + } + ac, err := NewBuilder(). + SetName("my-rs"). + SetDomain("my-ns.svc.cluster.local"). + SetMongoDBVersion("4.2.0"). + SetMembers(4). + SetReplicaSetHorizons(horizons). + Build() + + assert.NoError(t, err) + + for i, member := range ac.ReplicaSets[0].Members { + if i >= len(horizons) { + expected = nil + } else { + expected = ReplicaSetHorizons{ + "internal": fmt.Sprintf("test-horizon-%d", i), + "external": fmt.Sprintf("test-horizon-%d", i), + } + } + assert.Equal(t, expected, member.Horizons) + } +} + +func TestReplicaSetHorizonsScaleDown(t *testing.T) { + var expected ReplicaSetHorizons + + horizons := []ReplicaSetHorizons{ + {"horizon": "test-horizon-0"}, + {"horizon": "test-horizon-1"}, + {"horizon": "test-horizon-2"}, + } + ac, err := NewBuilder(). + SetName("my-rs"). + SetDomain("my-ns.svc.cluster.local"). + SetMongoDBVersion("4.2.0"). + SetMembers(4). + SetReplicaSetHorizons(horizons). + Build() + + assert.NoError(t, err) + + for i, member := range ac.ReplicaSets[0].Members { + if i >= len(horizons) { + expected = nil + } else { + expected = ReplicaSetHorizons{"horizon": fmt.Sprintf("test-horizon-%d", i)} + } + assert.Equal(t, expected, member.Horizons) + } +} + +func TestReplicaSetHorizons(t *testing.T) { + ac, err := NewBuilder(). + SetName("my-rs"). + SetDomain("my-ns.svc.cluster.local"). + SetMongoDBVersion("4.2.0"). + SetMembers(3). + SetReplicaSetHorizons([]ReplicaSetHorizons{ + {"horizon": "test-horizon-0"}, + {"horizon": "test-horizon-1"}, + {"horizon": "test-horizon-2"}, + }). + Build() + + assert.NoError(t, err) + + for i, member := range ac.ReplicaSets[0].Members { + assert.NotEmpty(t, member.Horizons) + assert.Contains(t, member.Horizons, "horizon") + assert.Equal(t, fmt.Sprintf("test-horizon-%d", i), member.Horizons["horizon"]) + } +} - ac := NewBuilder(). +func TestMongoDbVersions(t *testing.T) { + ac, err := NewBuilder(). SetName("my-rs"). SetDomain("my-ns.svc.cluster.local"). SetMongoDBVersion("4.2.0"). - SetAutomationConfigVersion(1). SetMembers(3). AddVersion(defaultMongoDbVersion("4.2.0")). Build() + assert.NoError(t, err) assert.Len(t, ac.Processes, 3) - assert.Len(t, ac.Versions, 1) + assert.Len(t, ac.Versions, 2) assert.Len(t, ac.Versions[0].Builds, 1) // TODO: be able to pass amount of builds @@ -90,87 +291,259 @@ func TestMongoDbVersions(t *testing.T) { }, ) - ac = NewBuilder(). + ac, err = NewBuilder(). SetName("my-rs"). SetDomain("my-ns.svc.cluster.local"). SetMongoDBVersion("4.2.0"). - SetAutomationConfigVersion(1). SetMembers(3). AddVersion(defaultMongoDbVersion("4.2.0")). AddVersion(version2). Build() + assert.NoError(t, err) assert.Len(t, ac.Processes, 3) - assert.Len(t, ac.Versions, 2) + assert.Len(t, ac.Versions, 3) assert.Len(t, ac.Versions[0].Builds, 1) assert.Len(t, ac.Versions[1].Builds, 2) } func TestHasOptions(t *testing.T) { - ac := NewBuilder(). + ac, err := NewBuilder(). SetName("my-rs"). SetDomain("my-ns.svc.cluster.local"). SetMongoDBVersion("4.2.0"). - SetAutomationConfigVersion(1). SetMembers(3). + SetOptions(Options{DownloadBase: "/var/lib/mongodb-mms-automation"}). Build() + assert.NoError(t, err) assert.Equal(t, ac.Options.DownloadBase, "/var/lib/mongodb-mms-automation") } func TestModulesNotNil(t *testing.T) { // We make sure the .Modules is initialized as an empty list of strings // or it will dumped as null attribute in json. - ac := NewBuilder(). + ac, err := NewBuilder(). SetName("my-rs"). SetDomain("my-ns.svc.cluster.local"). SetMongoDBVersion("4.2.0"). - SetAutomationConfigVersion(1). SetMembers(3). AddVersion(defaultMongoDbVersion("4.3.2")). Build() + assert.NoError(t, err) assert.NotNil(t, ac.Versions[0].Builds[0].Modules) } func TestProcessHasPortSetToDefault(t *testing.T) { - ac := NewBuilder(). + ac, err := NewBuilder(). SetName("my-rs"). SetDomain("my-ns.svc.cluster.local"). SetMongoDBVersion("4.2.0"). - SetAutomationConfigVersion(1). SetMembers(3). AddVersion(defaultMongoDbVersion("4.3.2")). Build() + assert.NoError(t, err) assert.Len(t, ac.Processes, 3) - assert.Equal(t, ac.Processes[0].Args26.Net.Port, 27017) - assert.Equal(t, ac.Processes[1].Args26.Net.Port, 27017) - assert.Equal(t, ac.Processes[2].Args26.Net.Port, 27017) -} - -func TestVersionManifest_BuildsForVersion(t *testing.T) { - vm := VersionManifest{ - Updated: 0, - Versions: []MongoDbVersionConfig{ - defaultMongoDbVersion("4.2.0"), - defaultMongoDbVersion("4.2.3"), - defaultMongoDbVersion("4.2.4"), - }, + for _, process := range ac.Processes { + assert.Equal(t, 27017, process.Args26.Get("net.port").Data()) + } +} + +func TestPortsAfterMarshalling(t *testing.T) { + ac, err := NewBuilder(). + SetName("my-rs"). + SetMembers(2). + AddProcessModification(func(i int, process *Process) { + process.SetPort((i + 1) * 1000) + }). + Build() + assert.NoError(t, err) + + require.Len(t, ac.Processes, 2) + // ac built in-memory has ports stored as ints + assert.Equal(t, 1000, ac.Processes[0].Args26.Get("net.port").Int()) + assert.Equal(t, 1000, ac.Processes[0].GetPort()) + assert.Equal(t, 2000, ac.Processes[1].Args26.Get("net.port").Int()) + assert.Equal(t, 2000, ac.Processes[1].GetPort()) + + bytes, err := json.Marshal(&ac) + require.NoError(t, err) + acDeserialized := AutomationConfig{} + require.NoError(t, json.Unmarshal(bytes, &acDeserialized)) + + require.Len(t, acDeserialized.Processes, 2) + // ac after deserialization has ports stored as float64 + assert.Equal(t, 1000., acDeserialized.Processes[0].Args26.Get("net.port").Float64()) + assert.Equal(t, 1000, acDeserialized.Processes[0].GetPort()) + assert.Equal(t, 2000., acDeserialized.Processes[1].Args26.Get("net.port").Float64()) + assert.Equal(t, 2000, acDeserialized.Processes[1].GetPort()) +} + +func TestModifications(t *testing.T) { + incrementVersion := func(config *AutomationConfig) { + config.Version += 1 } - version := vm.BuildsForVersion("4.2.0") - assert.Len(t, version.Builds, 1) - assert.Equal(t, defaultMongoDbVersion("4.2.0"), version) + ac, err := NewBuilder(). + AddModifications(incrementVersion, incrementVersion, incrementVersion). + AddModifications(NOOP()). + Build() + + assert.NoError(t, err) + assert.Equal(t, 4, ac.Version) +} + +func TestMongoDBVersionsConfig(t *testing.T) { + + t.Run("Dummy Config is used when no versions are set", func(t *testing.T) { + ac, err := NewBuilder().SetMongoDBVersion("4.4.2").Build() + assert.NoError(t, err) + + versions := ac.Versions + assert.Len(t, versions, 1) + v := versions[0] + dummyConfig := buildDummyMongoDbVersionConfig("4.4.2") + assert.Equal(t, v, dummyConfig) + }) + + t.Run("Dummy Config is not used when versions are set", func(t *testing.T) { + ac, err := NewBuilder().SetMongoDBVersion("4.4.2").AddVersion(MongoDbVersionConfig{ + Name: "4.4.2", + Builds: []BuildConfig{ + { + Platform: "linux", + Url: "url", + GitVersion: "gitVersion", + Architecture: "arch", + Flavor: "flavor", + MinOsVersion: "minOs", + MaxOsVersion: "maxOs", + }, + }, + }).Build() + + assert.NoError(t, err) + + versions := ac.Versions + assert.Len(t, versions, 2) + v := versions[0] + dummyConfig := buildDummyMongoDbVersionConfig("4.4.2") + assert.NotEqual(t, v, dummyConfig) + + b := versions[0].Builds[0] + assert.Equal(t, "linux", b.Platform) + assert.Equal(t, "url", b.Url) + assert.Equal(t, "gitVersion", b.GitVersion) + assert.Equal(t, "arch", b.Architecture) + assert.Equal(t, "minOs", b.MinOsVersion) + assert.Equal(t, "maxOs", b.MaxOsVersion) + + }) - version = vm.BuildsForVersion("4.2.3") - assert.Len(t, version.Builds, 1) - assert.Equal(t, defaultMongoDbVersion("4.2.3"), version) +} + +func TestAreEqual(t *testing.T) { + t.Run("Automation Configs with same values are equal", func(t *testing.T) { + + areEqual, err := AreEqual( + createAutomationConfig("name0", "mdbVersion0", "domain0", Options{DownloadBase: "downloadBase0"}, Auth{Disabled: true}, 5, 2), + createAutomationConfig("name0", "mdbVersion0", "domain0", Options{DownloadBase: "downloadBase0"}, Auth{Disabled: true}, 5, 2), + ) + + assert.NoError(t, err) + assert.True(t, areEqual) + }) + + t.Run("Automation Configs with same values but different version are equal", func(t *testing.T) { + + areEqual, err := AreEqual( + createAutomationConfig("name0", "mdbVersion0", "domain0", Options{DownloadBase: "downloadBase0"}, Auth{Disabled: true}, 5, 2), + createAutomationConfig("name0", "mdbVersion0", "domain0", Options{DownloadBase: "downloadBase0"}, Auth{Disabled: true}, 5, 10), + ) + + assert.NoError(t, err) + assert.True(t, areEqual) + }) + + t.Run("Automation Configs with different values are not equal", func(t *testing.T) { + + areEqual, err := AreEqual( + createAutomationConfig("name0", "differentVersion", "domain0", Options{DownloadBase: "downloadBase1"}, Auth{Disabled: false}, 2, 2), + createAutomationConfig("name0", "mdbVersion0", "domain0", Options{DownloadBase: "downloadBase0"}, Auth{Disabled: true}, 5, 2), + ) + + assert.NoError(t, err) + assert.False(t, areEqual) + }) + + t.Run("Automation Configs with nil and zero values are not equal", func(t *testing.T) { + votes := 1 + priority := "0.0" + firstBuilder := NewBuilder().SetName("name0").SetMongoDBVersion("mdbVersion0").SetOptions(Options{DownloadBase: "downloadBase0"}).SetDomain("domain0").SetMembers(2).SetAuth(Auth{Disabled: true}) + firstBuilder.SetMemberOptions([]MemberOptions{MemberOptions{Votes: &votes, Priority: &priority}}) + firstAc, _ := firstBuilder.Build() + firstAc.Version = 2 + secondBuilder := NewBuilder().SetName("name0").SetMongoDBVersion("mdbVersion0").SetOptions(Options{DownloadBase: "downloadBase0"}).SetDomain("domain0").SetMembers(2).SetAuth(Auth{Disabled: true}) + secondBuilder.SetMemberOptions([]MemberOptions{MemberOptions{Votes: &votes, Priority: nil}}) + secondAc, _ := secondBuilder.Build() + secondAc.Version = 2 + + areEqual, err := AreEqual(firstAc, secondAc) + assert.NoError(t, err) + assert.False(t, areEqual) + }) +} + +func TestValidateFCV(t *testing.T) { + _, err := NewBuilder().SetFCV("4.2.4").Build() + + assert.Error(t, err) +} - version = vm.BuildsForVersion("4.2.4") - assert.Len(t, version.Builds, 1) - assert.Equal(t, defaultMongoDbVersion("4.2.4"), version) +func TestEnterpriseVersion(t *testing.T) { + //given + mongoDBVersion := "6.0.5" + expectedVersionInTheAutomationConfig := mongoDBVersion + "-ent" - version = vm.BuildsForVersion("4.2.1") - assert.Empty(t, version.Builds) + //when + ac, err := NewBuilder().SetMongoDBVersion(mongoDBVersion).SetMembers(1).IsEnterprise(true).Build() + + //then + assert.NoError(t, err) + assert.Equal(t, expectedVersionInTheAutomationConfig, ac.Processes[0].Version) + assert.Equal(t, "enterprise", ac.Versions[0].Builds[0].Modules[0]) + assert.Equal(t, "enterprise", ac.Versions[0].Builds[1].Modules[0]) +} + +func createAutomationConfig(name, mongodbVersion, domain string, opts Options, auth Auth, members, acVersion int) AutomationConfig { + ac, _ := NewBuilder(). + SetName(name). + SetMongoDBVersion(mongodbVersion). + SetOptions(opts). + SetDomain(domain). + SetMembers(members). + SetAuth(auth). + Build() + + ac.Version = acVersion + return ac +} + +func TestReplicaSetId(t *testing.T) { + id := "rs0" + ac, err := NewBuilder(). + SetName("my-rs"). + SetDomain("my-ns.svc.cluster.local"). + SetMongoDBVersion("4.2.0"). + SetMembers(3). + AddVersion(defaultMongoDbVersion("4.3.2")). + SetReplicaSetId(&id). + Build() + + assert.NoError(t, err) + assert.Len(t, ac.ReplicaSets, 1) + rs := ac.ReplicaSets[0] + assert.Equal(t, rs.Id, id, "The provided id should be used") } diff --git a/pkg/automationconfig/zz_generated.deepcopy.go b/pkg/automationconfig/zz_generated.deepcopy.go new file mode 100644 index 000000000..723f24ba9 --- /dev/null +++ b/pkg/automationconfig/zz_generated.deepcopy.go @@ -0,0 +1,70 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package automationconfig + +import () + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogRotate) DeepCopyInto(out *LogRotate) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogRotate. +func (in *LogRotate) DeepCopy() *LogRotate { + if in == nil { + return nil + } + out := new(LogRotate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemberOptions) DeepCopyInto(out *MemberOptions) { + *out = *in + if in.Votes != nil { + in, out := &in.Votes, &out.Votes + *out = new(int) + **out = **in + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberOptions. +func (in *MemberOptions) DeepCopy() *MemberOptions { + if in == nil { + return nil + } + out := new(MemberOptions) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/controller/add_mongodb.go b/pkg/controller/add_mongodb.go deleted file mode 100644 index cf5263a39..000000000 --- a/pkg/controller/add_mongodb.go +++ /dev/null @@ -1,10 +0,0 @@ -package controller - -import ( - "github.com/mongodb/mongodb-kubernetes-operator/pkg/controller/mongodb" -) - -func init() { - // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. - AddToManagerFuncs = append(AddToManagerFuncs, mongodb.Add) -} diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go deleted file mode 100644 index 7c069f3ee..000000000 --- a/pkg/controller/controller.go +++ /dev/null @@ -1,18 +0,0 @@ -package controller - -import ( - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -// AddToManagerFuncs is a list of functions to add all Controllers to the Manager -var AddToManagerFuncs []func(manager.Manager) error - -// AddToManager adds all Controllers to the Manager -func AddToManager(m manager.Manager) error { - for _, f := range AddToManagerFuncs { - if err := f(m); err != nil { - return err - } - } - return nil -} diff --git a/pkg/controller/mongodb/mongodb_controller.go b/pkg/controller/mongodb/mongodb_controller.go deleted file mode 100644 index 618f162f7..000000000 --- a/pkg/controller/mongodb/mongodb_controller.go +++ /dev/null @@ -1,434 +0,0 @@ -package mongodb - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "time" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/controller/predicates" - - mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" - mdbClient "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/configmap" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/resourcerequirements" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/service" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" - "go.uber.org/zap" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const ( - AutomationConfigKey = "automation-config" - agentName = "mongodb-agent" - mongodbName = "mongod" - agentImageEnvVariable = "AGENT_IMAGE" - versionManifestFilePath = "/usr/local/version_manifest.json" - readinessProbePath = "/var/lib/mongodb-mms-automation/probes/readinessprobe" - agentHealthStatusFilePath = "/var/log/mongodb-mms-automation/agent-health-status.json" - clusterFilePath = "/var/lib/automation/config/automation-config" -) - -// Add creates a new MongoDB Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager) error { - return add(mgr, newReconciler(mgr, readVersionManifestFromDisk)) -} - -// ManifestProvider is a function which returns the VersionManifest which -// contains the list of all available MongoDB versions -type ManifestProvider func() (automationconfig.VersionManifest, error) - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, manifestProvider ManifestProvider) reconcile.Reconciler { - mgrClient := mgr.GetClient() - return &ReplicaSetReconciler{ - client: mdbClient.NewClient(mgrClient), - scheme: mgr.GetScheme(), - manifestProvider: manifestProvider, - log: zap.S(), - } -} - -// add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New("replicaset-controller", mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - - // Watch for changes to primary resource MongoDB - err = c.Watch(&source.Kind{Type: &mdbv1.MongoDB{}}, &handler.EnqueueRequestForObject{}, predicates.OnlyOnSpecChange()) - if err != nil { - return err - } - return nil -} - -// blank assignment to verify that ReplicaSetReconciler implements reconcile.Reconciler -var _ reconcile.Reconciler = &ReplicaSetReconciler{} - -// ReplicaSetReconciler reconciles a MongoDB ReplicaSet -type ReplicaSetReconciler struct { - // This client, initialized using mgr.Client() above, is a split client - // that reads objects from the cache and writes to the apiserver - client mdbClient.Client - scheme *runtime.Scheme - manifestProvider func() (automationconfig.VersionManifest, error) - log *zap.SugaredLogger -} - -// Reconcile reads that state of the cluster for a MongoDB object and makes changes based on the state read -// and what is in the MongoDB.Spec -// Note: -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReplicaSetReconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { - r.log = zap.S().With("ReplicaSet", request.NamespacedName) - r.log.Info("Reconciling MongoDB") - - // TODO: generalize preparation for resource - // Fetch the MongoDB instance - mdb := mdbv1.MongoDB{} - err := r.client.Get(context.TODO(), request.NamespacedName, &mdb) - if err != nil { - if errors.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. - // Return and don't requeue - return reconcile.Result{}, nil - } - r.log.Errorf("error reconciling MongoDB resource: %s", err) - // Error reading the object - requeue the request. - return reconcile.Result{}, err - } - - // TODO: Read current automation config version from config map - if err := r.ensureAutomationConfig(mdb); err != nil { - r.log.Infof("error creating automation config config map: %s", err) - return reconcile.Result{}, err - } - - svc := buildService(mdb) - if err = r.client.CreateOrUpdate(&svc); err != nil { - r.log.Infof("The service already exists... moving forward: %s", err) - } - - if err := r.createOrUpdateStatefulSet(mdb); err != nil { - r.log.Infof("Error creating/updating StatefulSet: %+v", err) - return reconcile.Result{}, err - } - - if ready, err := r.isStatefulSetReady(mdb); err != nil { - r.log.Infof("error checking StatefulSet status: %+v", err) - return reconcile.Result{}, err - } else if !ready { - r.log.Infof("StatefulSet %s/%s is not yet ready, retrying in 10 seconds", mdb.Namespace, mdb.Name) - return reconcile.Result{RequeueAfter: time.Second * 10}, nil - } - - if err := r.resetStatefulSetUpdateStrategy(mdb); err != nil { - r.log.Infof("error resetting StatefulSet UpdateStrategyType: %+v", err) - return reconcile.Result{}, err - } - - if err := r.setAnnotation(types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, mdbv1.LastVersionAnnotationKey, mdb.Spec.Version); err != nil { - r.log.Infof("Error setting annotation: %+v", err) - return reconcile.Result{}, err - } - - if err := r.updateStatusSuccess(&mdb); err != nil { - r.log.Infof("Error updating the status of the MongoDB resource: %+v", err) - return reconcile.Result{}, err - } - - r.log.Info("Successfully finished reconciliation", "MongoDB.Spec:", mdb.Spec, "MongoDB.Status", mdb.Status) - return reconcile.Result{}, nil -} - -// resetStatefulSetUpdateStrategy ensures the stateful set is configured back to using RollingUpdateStatefulSetStrategyType -// and does not keep using OnDelete -func (r *ReplicaSetReconciler) resetStatefulSetUpdateStrategy(mdb mdbv1.MongoDB) error { - if !mdb.ChangingVersion() { - return nil - } - // if we changed the version, we need to reset the UpdatePolicy back to OnUpdate - sts := &appsv1.StatefulSet{} - return r.client.GetAndUpdate(types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, sts, func() { - sts.Spec.UpdateStrategy.Type = appsv1.RollingUpdateStatefulSetStrategyType - }) -} - -// isStatefulSetReady checks to see if the stateful set corresponding to the given MongoDB resource -// is currently in the ready state -func (r *ReplicaSetReconciler) isStatefulSetReady(mdb mdbv1.MongoDB) (bool, error) { - set := appsv1.StatefulSet{} - if err := r.client.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &set); err != nil { - return false, fmt.Errorf("error getting StatefulSet: %s", err) - } - return statefulset.IsReady(set), nil -} - -func (r *ReplicaSetReconciler) createOrUpdateStatefulSet(mdb mdbv1.MongoDB) error { - sts, err := buildStatefulSet(mdb) - if err != nil { - return fmt.Errorf("error building StatefulSet: %s", err) - } - if err = r.client.CreateOrUpdate(&sts); err != nil { - return fmt.Errorf("error creating/updating StatefulSet: %s", err) - } - - r.log.Debugf("Waiting for StatefulSet %s/%s to reach ready state", mdb.Namespace, mdb.Name) - set := appsv1.StatefulSet{} - if err := r.client.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &set); err != nil { - return fmt.Errorf("error getting StatefulSet: %s", err) - } - return nil -} - -// setAnnotation updates the monogdb resource with the given namespaced name and sets the annotation -// "key" with the provided value "val" -func (r ReplicaSetReconciler) setAnnotation(nsName types.NamespacedName, key, val string) error { - mdb := mdbv1.MongoDB{} - return r.client.GetAndUpdate(nsName, &mdb, func() { - if mdb.Annotations == nil { - mdb.Annotations = map[string]string{} - } - mdb.Annotations[key] = val - }) -} - -// updateStatusSuccess should be called after a successful reconciliation -// the resource's status is updated to reflect to the state, and any other cleanup -// operators should be performed here -func (r ReplicaSetReconciler) updateStatusSuccess(mdb *mdbv1.MongoDB) error { - newMdb := &mdbv1.MongoDB{} - if err := r.client.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, newMdb); err != nil { - return fmt.Errorf("error getting resource: %+v", err) - } - newMdb.UpdateSuccess() - if err := r.client.Status().Update(context.TODO(), newMdb); err != nil { - return fmt.Errorf("error updating status: %+v", err) - } - return nil -} - -func (r ReplicaSetReconciler) ensureAutomationConfig(mdb mdbv1.MongoDB) error { - cm, err := r.buildAutomationConfigConfigMap(mdb) - if err != nil { - return err - } - if err := r.client.CreateOrUpdate(&cm); err != nil { - return err - } - return nil -} - -func buildAutomationConfig(mdb mdbv1.MongoDB, mdbVersionConfig automationconfig.MongoDbVersionConfig) automationconfig.AutomationConfig { - domain := getDomain(mdb.ServiceName(), mdb.Namespace, "") - return automationconfig.NewBuilder(). - SetTopology(automationconfig.ReplicaSetTopology). - SetName(mdb.Name). - SetDomain(domain). - SetMembers(mdb.Spec.Members). - SetMongoDBVersion(mdb.Spec.Version). - SetAutomationConfigVersion(1). // TODO: Correctly set the version - SetFCV(mdb.GetFCV()). - AddVersion(mdbVersionConfig). - Build() -} - -func readVersionManifestFromDisk() (automationconfig.VersionManifest, error) { - bytes, err := ioutil.ReadFile(versionManifestFilePath) - if err != nil { - return automationconfig.VersionManifest{}, err - } - return versionManifestFromBytes(bytes) -} - -func versionManifestFromBytes(bytes []byte) (automationconfig.VersionManifest, error) { - versionManifest := automationconfig.VersionManifest{} - if err := json.Unmarshal(bytes, &versionManifest); err != nil { - return automationconfig.VersionManifest{}, err - } - return versionManifest, nil -} - -// buildService creates a Service that will be used for the Replica Set StatefulSet -// that allows all the members of the STS to see each other. -// TODO: Make sure this Service is as minimal as posible, to not interfere with -// future implementations and Service Discovery mechanisms we might implement. -func buildService(mdb mdbv1.MongoDB) corev1.Service { - label := make(map[string]string) - label["app"] = mdb.ServiceName() - return service.Builder(). - SetName(mdb.ServiceName()). - SetNamespace(mdb.Namespace). - SetSelector(label). - SetServiceType(corev1.ServiceTypeClusterIP). - SetClusterIP("None"). - SetPort(27017). - Build() -} - -func (r ReplicaSetReconciler) buildAutomationConfigConfigMap(mdb mdbv1.MongoDB) (corev1.ConfigMap, error) { - manifest, err := r.manifestProvider() - if err != nil { - return corev1.ConfigMap{}, fmt.Errorf("error reading version manifest from disk: %+v", err) - } - ac := buildAutomationConfig(mdb, manifest.BuildsForVersion(mdb.Spec.Version)) - acBytes, err := json.Marshal(ac) - if err != nil { - return corev1.ConfigMap{}, err - } - - return configmap.Builder(). - SetName(mdb.ConfigMapName()). - SetNamespace(mdb.Namespace). - SetField(AutomationConfigKey, string(acBytes)). - Build(), nil -} - -// buildContainers constructs the mongodb-agent container as well as the -// mongod container. -func buildContainers(mdb mdbv1.MongoDB) []corev1.Container { - agentCommand := []string{ - "agent/mongodb-agent", - "-cluster=" + clusterFilePath, - "-skipMongoStart", - "-noDaemonize", - "-healthCheckFilePath=" + agentHealthStatusFilePath, - "-serveStatusPort=5000", - } - - readinessProbe := defaultReadinessProbe() - agentContainer := corev1.Container{ - Name: agentName, - Image: os.Getenv(agentImageEnvVariable), - ImagePullPolicy: corev1.PullAlways, - Resources: resourcerequirements.Defaults(), - Command: agentCommand, - ReadinessProbe: &readinessProbe, - } - - mongoDbCommand := []string{ - "/bin/sh", - "-c", - `while [ ! -f /data/automation-mongod.conf ]; do sleep 3 ; done ; sleep 2; mongod -f /data/automation-mongod.conf`, - } - mongodbContainer := corev1.Container{ - Name: mongodbName, - Image: fmt.Sprintf("mongo:%s", mdb.Spec.Version), - Command: mongoDbCommand, - Resources: resourcerequirements.Defaults(), - } - return []corev1.Container{agentContainer, mongodbContainer} -} - -func defaultReadinessProbe() corev1.Probe { - return corev1.Probe{ - Handler: corev1.Handler{ - Exec: &corev1.ExecAction{Command: []string{readinessProbePath}}, - }, - // Setting the failure threshold to quite big value as the agent may spend some time to reach the goal - FailureThreshold: 240, - // The agent may be not on time to write the status file right after the container is created - we need to wait - // for some time - InitialDelaySeconds: 5, - } -} - -// getUpdateStrategyType returns the type of RollingUpgradeStrategy that the StatefulSet -// should be configured with -func getUpdateStrategyType(mdb mdbv1.MongoDB) appsv1.StatefulSetUpdateStrategyType { - if !mdb.ChangingVersion() { - return appsv1.RollingUpdateStatefulSetStrategyType - } - return appsv1.OnDeleteStatefulSetStrategyType -} - -// buildStatefulSet takes a MongoDB resource and converts it into -// the corresponding stateful set -func buildStatefulSet(mdb mdbv1.MongoDB) (appsv1.StatefulSet, error) { - labels := map[string]string{ - "app": mdb.ServiceName(), - } - - podSpecTemplate := corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: corev1.PodSpec{ - Containers: buildContainers(mdb), - }, - } - - builder := statefulset.NewBuilder(). - SetPodTemplateSpec(podSpecTemplate). - SetNamespace(mdb.Namespace). - SetName(mdb.Name). - SetReplicas(mdb.Spec.Members). - SetLabels(labels). - SetMatchLabels(labels). - SetServiceName(mdb.ServiceName()). - SetUpdateStrategy(getUpdateStrategyType(mdb)) - - // TODO: Add this section to architecture document. - // The design of the multi-container and the different volumes mounted to them is as follows: - // There will be two volumes mounted: - // 1. "data-volume": Access to /data for both agent and mongod. Shared data is required because - // agent writes automation-mongod.conf file in it and reads certain lock files from there. - // 2. "automation-config": This is /var/lib/automation/config that holds the automation config - // mounted from a ConfigMap. This is only required in the Agent container. - dataVolume, dataVolumeClaim := buildDataVolumeClaim() - builder. - AddVolumeMount(mongodbName, dataVolume). - AddVolumeMount(agentName, dataVolume). - AddVolumeClaimTemplates(dataVolumeClaim) - // the automation config is only mounted, as read only, on the agent container - automationConfigVolume := statefulset.CreateVolumeFromConfigMap("automation-config", "example-mongodb-config") - automationConfigVolumeMount := statefulset.CreateVolumeMount("automation-config", "/var/lib/automation/config", statefulset.WithReadOnly(true)) - builder. - AddVolume(automationConfigVolume). - AddVolumeMount(agentName, automationConfigVolumeMount) - - return builder.Build() -} - -func buildDataVolumeClaim() (corev1.VolumeMount, []corev1.PersistentVolumeClaim) { - dataVolume := statefulset.CreateVolumeMount("data-volume", "/data") - dataVolumeClaim := []corev1.PersistentVolumeClaim{{ - ObjectMeta: metav1.ObjectMeta{ - Name: "data-volume", - }, - Spec: corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ - Requests: resourcerequirements.BuildDefaultStorageRequirements(), - }, - }, - }} - - return dataVolume, dataVolumeClaim -} - -func getDomain(service, namespace, clusterName string) string { - if clusterName == "" { - clusterName = "cluster.local" - } - return fmt.Sprintf("%s.%s.svc.%s", service, namespace, clusterName) -} diff --git a/pkg/controller/mongodb/reconcilliation_assertions.go b/pkg/controller/mongodb/reconcilliation_assertions.go deleted file mode 100644 index dc1ffcb55..000000000 --- a/pkg/controller/mongodb/reconcilliation_assertions.go +++ /dev/null @@ -1,14 +0,0 @@ -package mongodb - -import ( - "github.com/stretchr/testify/assert" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "testing" - "time" -) - -func assertReconciliationSuccessful(t *testing.T, result reconcile.Result, err error) { - assert.NoError(t, err) - assert.Equal(t, false, result.Requeue) - assert.Equal(t, time.Duration(0), result.RequeueAfter) -} diff --git a/pkg/controller/mongodb/replicaset_controller_test.go b/pkg/controller/mongodb/replicaset_controller_test.go deleted file mode 100644 index 8948f1e29..000000000 --- a/pkg/controller/mongodb/replicaset_controller_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package mongodb - -import ( - "context" - "os" - "reflect" - "testing" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" - - mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/resourcerequirements" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func init() { - os.Setenv("AGENT_IMAGE", "agent-image") -} - -func newTestReplicaSet() mdbv1.MongoDB { - return mdbv1.MongoDB{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-rs", - Namespace: "my-ns", - Annotations: map[string]string{}, - }, - Spec: mdbv1.MongoDBSpec{ - Members: 3, - Version: "4.2.2", - }, - } -} - -func mockManifestProvider(version string) func() (automationconfig.VersionManifest, error) { - return func() (automationconfig.VersionManifest, error) { - return automationconfig.VersionManifest{ - Updated: 0, - Versions: []automationconfig.MongoDbVersionConfig{ - { - Name: version, - Builds: []automationconfig.BuildConfig{{ - Platform: "platform", - Url: "url", - GitVersion: "gitVersion", - Architecture: "arch", - Flavor: "flavor", - MinOsVersion: "0", - MaxOsVersion: "10", - Modules: []string{}, - }}, - }}, - }, nil - } -} - -func TestKubernetesResources_AreCreated(t *testing.T) { - // TODO: Create builder/yaml fixture of some type to construct MDB objects for unit tests - mdb := newTestReplicaSet() - - mgr := client.NewManager(&mdb) - r := newReconciler(mgr, mockManifestProvider(mdb.Spec.Version)) - - res, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) - assertReconciliationSuccessful(t, res, err) - - cm := corev1.ConfigMap{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.ConfigMapName(), Namespace: mdb.Namespace}, &cm) - assert.NoError(t, err) - assert.Equal(t, mdb.Namespace, cm.Namespace) - assert.Equal(t, mdb.ConfigMapName(), cm.Name) - assert.Contains(t, cm.Data, AutomationConfigKey) - assert.NotEmpty(t, cm.Data[AutomationConfigKey]) -} - -func TestStatefulSet_IsCorrectlyConfigured(t *testing.T) { - mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := newReconciler(mgr, mockManifestProvider(mdb.Spec.Version)) - res, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) - assertReconciliationSuccessful(t, res, err) - - sts := appsv1.StatefulSet{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) - assert.NoError(t, err) - - assert.Len(t, sts.Spec.Template.Spec.Containers, 2) - - agentContainer := sts.Spec.Template.Spec.Containers[0] - assert.Equal(t, agentName, agentContainer.Name) - assert.Equal(t, os.Getenv(agentImageEnvVariable), agentContainer.Image) - expectedProbe := defaultReadinessProbe() - assert.True(t, reflect.DeepEqual(&expectedProbe, agentContainer.ReadinessProbe)) - - mongodbContainer := sts.Spec.Template.Spec.Containers[1] - assert.Equal(t, mongodbName, mongodbContainer.Name) - assert.Equal(t, "mongo:4.2.2", mongodbContainer.Image) - - assert.Equal(t, resourcerequirements.Defaults(), agentContainer.Resources) -} - -func TestChangingVersion_ResultsInRollingUpdateStrategyType(t *testing.T) { - mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - mgrClient := mgr.GetClient() - r := newReconciler(mgr, mockManifestProvider(mdb.Spec.Version)) - res, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) - assertReconciliationSuccessful(t, res, err) - - // fetch updated resource after first reconciliation - _ = mgrClient.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &mdb) - - sts := appsv1.StatefulSet{} - err = mgrClient.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) - assert.NoError(t, err) - assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) - - mdbRef := &mdb - mdbRef.Spec.Version = "4.2.3" - - _ = mgrClient.Update(context.TODO(), &mdb) - - res, err = r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) - assertReconciliationSuccessful(t, res, err) - - sts = appsv1.StatefulSet{} - err = mgrClient.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) - assert.NoError(t, err) - - assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type, - "The StatefulSet should have be re-configured to use RollingUpdates after it reached the ready state") -} - -func TestBuildStatefulSet_ConfiguresUpdateStrategyCorrectly(t *testing.T) { - t.Run("On No Version Change, Same Version", func(t *testing.T) { - mdb := newTestReplicaSet() - mdb.Spec.Version = "4.0.0" - mdb.Annotations[mdbv1.LastVersionAnnotationKey] = "4.0.0" - sts, err := buildStatefulSet(mdb) - assert.NoError(t, err) - assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) - }) - t.Run("On No Version Change, First Version", func(t *testing.T) { - mdb := newTestReplicaSet() - mdb.Spec.Version = "4.0.0" - delete(mdb.Annotations, mdbv1.LastVersionAnnotationKey) - sts, err := buildStatefulSet(mdb) - assert.NoError(t, err) - assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) - }) - t.Run("On Version Change", func(t *testing.T) { - mdb := newTestReplicaSet() - mdb.Spec.Version = "4.0.0" - mdb.Annotations[mdbv1.LastVersionAnnotationKey] = "4.2.0" - sts, err := buildStatefulSet(mdb) - assert.NoError(t, err) - assert.Equal(t, appsv1.OnDeleteStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) - }) -} diff --git a/pkg/helm/helm.go b/pkg/helm/helm.go new file mode 100644 index 000000000..35392f7f2 --- /dev/null +++ b/pkg/helm/helm.go @@ -0,0 +1,62 @@ +package helm + +import ( + "fmt" + "os/exec" + "strings" +) + +// Uninstall uninstalls a helm chart of the given name. There is no error in the case +// of the helm chart not existing. +func Uninstall(chartName string, namespace string) error { + helmArgs := []string{"uninstall", chartName, "-n", namespace} + return executeHelmCommand(helmArgs, isNotFoundMessage) +} + +// DependencyUpdate downloads dependencies for a Chart. +func DependencyUpdate(chartPath string) error { + helmArgs := []string{"dependency", "update", chartPath} + return executeHelmCommand(helmArgs, nil) +} + +// Install a helm chert at the given path with the given name and the provided set arguments. +func Install(chartPath, chartName string, flags map[string]string, templateValues map[string]string) error { + helmArgs := []string{"install"} + helmArgs = append(helmArgs, chartName, chartPath) + for flagKey, flagValue := range flags { + helmArgs = append(helmArgs, fmt.Sprintf("--%s", flagKey)) + if flagValue != "" { + helmArgs = append(helmArgs, flagValue) + } + } + helmArgs = append(helmArgs, mapToHelmValuesArg(templateValues)...) + return executeHelmCommand(helmArgs, nil) +} + +func isNotFoundMessage(s string) bool { + return strings.Contains(s, "not found") +} + +// executeHelmCommand accepts a list of arguments that should be passed to the helm command +// and a predicate that when returning true, indicates that the error message should be ignored. +func executeHelmCommand(args []string, messagePredicate func(string) bool) error { + cmd := exec.Command("helm", args...) + output, err := cmd.CombinedOutput() + if err != nil { + if messagePredicate != nil && messagePredicate(string(output)) { + return nil + } + return fmt.Errorf("error executing command: %s %s", err, output) + } + return nil +} + +// mapToHelmValuesArg accepts a map of string to string and returns a list of arguments +// that can be passed to a shell helm command. +func mapToHelmValuesArg(m map[string]string) []string { + var args []string + for k, v := range m { + args = append(args, "--set", fmt.Sprintf("%s=%s", k, v)) + } + return args +} diff --git a/pkg/kube/annotations/annotations.go b/pkg/kube/annotations/annotations.go new file mode 100644 index 000000000..44f5e9695 --- /dev/null +++ b/pkg/kube/annotations/annotations.go @@ -0,0 +1,84 @@ +package annotations + +import ( + "context" + "encoding/json" + "strings" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Versioned interface { + client.Object + GetMongoDBVersionForAnnotation() string + NamespacedName() types.NamespacedName + IsChangingVersion() bool +} + +type patchValue struct { + Op string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value"` +} + +const ( + LastAppliedMongoDBVersion = "mongodb.com/v1.lastAppliedMongoDBVersion" +) + +func GetAnnotation(object client.Object, key string) string { + value, ok := object.GetAnnotations()[key] + if !ok { + return "" + } + return value +} + +// SetAnnotations updates the objects.Annotation with the supplied annotation and does the same with the object backed in kubernetes. +func SetAnnotations(ctx context.Context, object client.Object, annotations map[string]string, kubeClient client.Client) error { + currentObject := object.DeepCopyObject().(client.Object) + err := kubeClient.Get(ctx, types.NamespacedName{Name: object.GetName(), Namespace: object.GetNamespace()}, currentObject) + if err != nil { + return err + } + + // If the object has no annotations, we first need to create an empty entry in + // metadata.annotations, otherwise the server will reject our request + var payload []patchValue + if currentObject.GetAnnotations() == nil || len(currentObject.GetAnnotations()) == 0 { + payload = append(payload, patchValue{ + Op: "replace", + Path: "/metadata/annotations", + Value: map[string]interface{}{}, + }) + } + + for key, val := range annotations { + payload = append(payload, patchValue{ + Op: "replace", + // every "/" in the value needs to be replaced with ~1 when patching + Path: "/metadata/annotations/" + strings.Replace(key, "/", "~1", 1), + Value: val, + }) + } + + data, err := json.Marshal(payload) + if err != nil { + return err + } + + patch := client.RawPatch(types.JSONPatchType, data) + if err = kubeClient.Patch(ctx, currentObject, patch); err != nil { + return err + } + object.SetAnnotations(currentObject.GetAnnotations()) + return nil +} + +func UpdateLastAppliedMongoDBVersion(ctx context.Context, mdb Versioned, kubeClient client.Client) error { + annotations := map[string]string{ + LastAppliedMongoDBVersion: mdb.GetMongoDBVersionForAnnotation(), + } + + return SetAnnotations(ctx, mdb, annotations, kubeClient) +} diff --git a/pkg/kube/client/client.go b/pkg/kube/client/client.go index b68e4ea05..640e23373 100644 --- a/pkg/kube/client/client.go +++ b/pkg/kube/client/client.go @@ -2,10 +2,17 @@ package client import ( "context" - "reflect" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/pod" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/configmap" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/service" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" k8sClient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -18,46 +25,164 @@ func NewClient(c k8sClient.Client) Client { type Client interface { k8sClient.Client - CreateOrUpdate(obj runtime.Object) error - GetAndUpdate(nsName types.NamespacedName, obj runtime.Object, updateFunc func()) error + KubernetesSecretClient + // TODO: remove this function, add mongodb package which has GetAndUpdate function + GetAndUpdate(ctx context.Context, nsName types.NamespacedName, obj k8sClient.Object, updateFunc func()) error + configmap.GetUpdateCreateDeleter + service.GetUpdateCreateDeleter + statefulset.GetUpdateCreateDeleter + pod.Getter } -type client struct { - k8sClient.Client +type KubernetesSecretClient interface { + secret.GetUpdateCreateDeleter } -// CreateOrUpdate will either Create the runtime.Object if it doesn't exist, or Update it -// if it does -func (c client) CreateOrUpdate(obj runtime.Object) error { - objCopy := obj.DeepCopyObject() - err := c.Get(context.TODO(), namespacedNameFromObject(obj), objCopy) - if err != nil { - if errors.IsNotFound(err) { - return c.Create(context.TODO(), obj) - } - return err - } - return c.Update(context.TODO(), obj) +type client struct { + k8sClient.Client } // GetAndUpdate fetches the most recent version of the runtime.Object with the provided // nsName and applies the update function. The update function should update "obj" from // an outer scope -func (c client) GetAndUpdate(nsName types.NamespacedName, obj runtime.Object, updateFunc func()) error { - err := c.Get(context.TODO(), nsName, obj) +func (c client) GetAndUpdate(ctx context.Context, nsName types.NamespacedName, obj k8sClient.Object, updateFunc func()) error { + err := c.Get(ctx, nsName, obj) if err != nil { return err } // apply the function on the most recent version of the resource updateFunc() - return c.Update(context.TODO(), obj) + return c.Update(ctx, obj) +} + +// GetConfigMap provides a thin wrapper and client.client to access corev1.ConfigMap types +func (c client) GetConfigMap(ctx context.Context, objectKey k8sClient.ObjectKey) (corev1.ConfigMap, error) { + cm := corev1.ConfigMap{} + if err := c.Get(ctx, objectKey, &cm); err != nil { + return corev1.ConfigMap{}, err + } + return cm, nil +} + +// UpdateConfigMap provides a thin wrapper and client.Client to update corev1.ConfigMap types +func (c client) UpdateConfigMap(ctx context.Context, cm corev1.ConfigMap) error { + return c.Update(ctx, &cm) +} + +// CreateConfigMap provides a thin wrapper and client.Client to create corev1.ConfigMap types +func (c client) CreateConfigMap(ctx context.Context, cm corev1.ConfigMap) error { + return c.Create(ctx, &cm) +} + +// DeleteConfigMap deletes the configmap of the given object key +func (c client) DeleteConfigMap(ctx context.Context, key k8sClient.ObjectKey) error { + cm := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + } + return c.Delete(ctx, &cm) +} + +// GetPod provides a thin wrapper and client.client to access corev1.Pod types. +func (c client) GetPod(ctx context.Context, objectKey k8sClient.ObjectKey) (corev1.Pod, error) { + p := corev1.Pod{} + if err := c.Get(ctx, objectKey, &p); err != nil { + return corev1.Pod{}, err + } + return p, nil +} + +// GetSecret provides a thin wrapper and client.Client to access corev1.Secret types +func (c client) GetSecret(ctx context.Context, objectKey k8sClient.ObjectKey) (corev1.Secret, error) { + s := corev1.Secret{} + if err := c.Get(ctx, objectKey, &s); err != nil { + return corev1.Secret{}, err + } + return s, nil +} + +// UpdateSecret provides a thin wrapper and client.Client to update corev1.Secret types +func (c client) UpdateSecret(ctx context.Context, secret corev1.Secret) error { + return c.Update(ctx, &secret) +} + +// CreateSecret provides a thin wrapper and client.Client to create corev1.Secret types +func (c client) CreateSecret(ctx context.Context, secret corev1.Secret) error { + return c.Create(ctx, &secret) +} + +// DeleteSecret provides a thin wrapper and client.Client to delete corev1.Secret types +func (c client) DeleteSecret(ctx context.Context, key k8sClient.ObjectKey) error { + s := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + } + return c.Delete(ctx, &s) +} + +// GetService provides a thin wrapper and client.Client to access corev1.Service types +func (c client) GetService(ctx context.Context, objectKey k8sClient.ObjectKey) (corev1.Service, error) { + s := corev1.Service{} + if err := c.Get(ctx, objectKey, &s); err != nil { + return corev1.Service{}, err + } + return s, nil +} + +// UpdateService provides a thin wrapper and client.Client to update corev1.Service types +func (c client) UpdateService(ctx context.Context, service corev1.Service) error { + return c.Update(ctx, &service) +} + +// CreateService provides a thin wrapper and client.Client to create corev1.Service types +func (c client) CreateService(ctx context.Context, service corev1.Service) error { + return c.Create(ctx, &service) +} + +// DeleteService provides a thin wrapper around client.Client to delete corev1.Service types +func (c client) DeleteService(ctx context.Context, objectKey k8sClient.ObjectKey) error { + svc := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: objectKey.Name, + Namespace: objectKey.Namespace, + }, + } + return c.Delete(ctx, &svc) +} + +// GetStatefulSet provides a thin wrapper and client.Client to access appsv1.StatefulSet types +func (c client) GetStatefulSet(ctx context.Context, objectKey k8sClient.ObjectKey) (appsv1.StatefulSet, error) { + sts := appsv1.StatefulSet{} + if err := c.Get(ctx, objectKey, &sts); err != nil { + return appsv1.StatefulSet{}, err + } + return sts, nil +} + +// UpdateStatefulSet provides a thin wrapper and client.Client to update appsv1.StatefulSet types +// the updated StatefulSet is returned +func (c client) UpdateStatefulSet(ctx context.Context, sts appsv1.StatefulSet) (appsv1.StatefulSet, error) { + stsToUpdate := &sts + err := c.Update(ctx, stsToUpdate) + return *stsToUpdate, err +} + +// CreateStatefulSet provides a thin wrapper and client.Client to create appsv1.StatefulSet types +func (c client) CreateStatefulSet(ctx context.Context, sts appsv1.StatefulSet) error { + return c.Create(ctx, &sts) } -func namespacedNameFromObject(obj runtime.Object) types.NamespacedName { - ns := reflect.ValueOf(obj).Elem().FieldByName("Namespace").String() - name := reflect.ValueOf(obj).Elem().FieldByName("Name").String() - return types.NamespacedName{ - Name: name, - Namespace: ns, +// DeleteStatefulSet provides a thin wrapper and client.Client to delete appsv1.StatefulSet types +func (c client) DeleteStatefulSet(ctx context.Context, objectKey k8sClient.ObjectKey) error { + sts := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: objectKey.Name, + Namespace: objectKey.Namespace, + }, } + return c.Delete(ctx, &sts) } diff --git a/pkg/kube/client/client_test.go b/pkg/kube/client/client_test.go index c42a28c07..083df075b 100644 --- a/pkg/kube/client/client_test.go +++ b/pkg/kube/client/client_test.go @@ -4,6 +4,8 @@ import ( "context" "testing" + "k8s.io/apimachinery/pkg/types" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/configmap" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" @@ -11,20 +13,21 @@ import ( ) func TestChangingName_CreatesNewObject(t *testing.T) { + ctx := context.Background() cm := configmap.Builder(). SetName("some-name"). SetNamespace("some-namespace"). Build() client := NewClient(NewMockedClient()) - err := client.CreateOrUpdate(&cm) + err := configmap.CreateOrUpdate(ctx, client, cm) assert.NoError(t, err) newCm := corev1.ConfigMap{} - objectKey, err := k8sClient.ObjectKeyFromObject(&cm) + objectKey := k8sClient.ObjectKeyFromObject(&cm) assert.NoError(t, err) - err = client.Get(context.TODO(), objectKey, &newCm) + err = client.Get(ctx, objectKey, &newCm) assert.NoError(t, err) assert.Equal(t, newCm.Name, "some-name") @@ -32,33 +35,52 @@ func TestChangingName_CreatesNewObject(t *testing.T) { newCm.Name = "new-name" - objectKey, _ = k8sClient.ObjectKeyFromObject(&newCm) - _ = client.CreateOrUpdate(&newCm) + objectKey = k8sClient.ObjectKeyFromObject(&newCm) + _ = configmap.CreateOrUpdate(ctx, client, newCm) - _ = client.Get(context.TODO(), objectKey, &newCm) + _ = client.Get(ctx, objectKey, &newCm) assert.Equal(t, newCm.Name, "new-name") assert.Equal(t, newCm.Namespace, "some-namespace") } func TestAddingDataField_ModifiesExistingObject(t *testing.T) { + ctx := context.Background() cm := configmap.Builder(). SetName("some-name"). SetNamespace("some-namespace"). Build() client := NewClient(NewMockedClient()) - err := client.CreateOrUpdate(&cm) + err := configmap.CreateOrUpdate(ctx, client, cm) assert.NoError(t, err) cm.Data["new-field"] = "value" - _ = client.CreateOrUpdate(&cm) + _ = configmap.CreateOrUpdate(ctx, client, cm) newCm := corev1.ConfigMap{} - objectKey, err := k8sClient.ObjectKeyFromObject(&newCm) + objectKey := k8sClient.ObjectKeyFromObject(&newCm) assert.NoError(t, err) - _ = client.Get(context.TODO(), objectKey, &newCm) + _ = client.Get(ctx, objectKey, &newCm) assert.Contains(t, cm.Data, "new-field") assert.Equal(t, cm.Data["new-field"], "value") } + +func TestDeleteConfigMap(t *testing.T) { + ctx := context.Background() + cm := configmap.Builder(). + SetName("config-map"). + SetNamespace("default"). + Build() + + client := NewClient(NewMockedClient()) + err := client.CreateConfigMap(ctx, cm) + assert.NoError(t, err) + + err = client.DeleteConfigMap(ctx, types.NamespacedName{Name: "config-map", Namespace: "default"}) + assert.NoError(t, err) + + _, err = client.GetConfigMap(ctx, types.NamespacedName{Name: "config-map", Namespace: "default"}) + assert.Equal(t, err, notFoundError()) +} diff --git a/pkg/kube/client/mocked_client.go b/pkg/kube/client/mocked_client.go index e18831889..f4a8e499e 100644 --- a/pkg/kube/client/mocked_client.go +++ b/pkg/kube/client/mocked_client.go @@ -2,18 +2,129 @@ package client import ( "context" - "reflect" - + "encoding/json" + "fmt" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "reflect" k8sClient "sigs.k8s.io/controller-runtime/pkg/client" + "strings" ) -// mockedClient dynamically creates maps to store instances of runtime.Object +var ( + _ k8sClient.Client = mockedClient{} + _ k8sClient.StatusWriter = mockedStatusWriter{} +) + +type patchValue struct { + Op string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value"` +} + +// mockedClient dynamically creates maps to store instances of k8sClient.Object type mockedClient struct { - backingMap map[reflect.Type]map[k8sClient.ObjectKey]runtime.Object + backingMap map[reflect.Type]map[k8sClient.ObjectKey]k8sClient.Object +} + +func (m mockedClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + panic("not implemented") +} + +func (m mockedClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + panic("not implemented") +} + +func (m mockedClient) Create(_ context.Context, obj k8sClient.Object, _ ...k8sClient.CreateOption) error { + relevantMap := m.ensureMapFor(obj) + objKey := k8sClient.ObjectKeyFromObject(obj) + if _, ok := relevantMap[objKey]; ok { + return alreadyExistsError() + } + + switch v := obj.(type) { + case *appsv1.StatefulSet: + makeStatefulSetReady(v) + } + + relevantMap[objKey] = obj + return nil +} + +func (m mockedClient) Update(_ context.Context, obj k8sClient.Object, _ ...k8sClient.UpdateOption) error { + relevantMap := m.ensureMapFor(obj) + objKey := k8sClient.ObjectKeyFromObject(obj) + if _, ok := relevantMap[objKey]; !ok { + return errors.NewNotFound(schema.GroupResource{}, obj.GetName()) + } + relevantMap[objKey] = obj + return nil +} + +func (m mockedClient) Patch(_ context.Context, obj k8sClient.Object, patch k8sClient.Patch, _ ...k8sClient.PatchOption) error { + if patch.Type() != types.JSONPatchType { + return fmt.Errorf("patch types different from JSONPatchType are not yet implemented") + } + relevantMap := m.ensureMapFor(obj) + objKey := k8sClient.ObjectKeyFromObject(obj) + var patches []patchValue + data, err := patch.Data(obj) + if err != nil { + return err + } + err = json.Unmarshal(data, &patches) + if err != nil { + return err + } + objectAnnotations := obj.GetAnnotations() + for _, patch := range patches { + if patch.Op != "replace" { + return fmt.Errorf("patch operations different from \"replace\" are not yet implemented") + } + if !strings.HasPrefix(patch.Path, "/metadata/annotations") { + return fmt.Errorf("patch that modify something different from annotations are not yet implemented") + } + if patch.Path == "/metadata/annotations" { + objectAnnotations = map[string]string{} + continue + } + pathElements := strings.SplitAfterN(patch.Path, "/metadata/annotations/", 2) + finalPatchPath := strings.Replace(pathElements[1], "~1", "/", 1) + switch val := patch.Value.(type) { + case string: + objectAnnotations[finalPatchPath] = val + default: + return fmt.Errorf("patch operations with values that are not strings are not implemented yet: %+v", pathElements[1]) + } + } + obj.SetAnnotations(objectAnnotations) + relevantMap[objKey] = obj + return nil +} + +type mockedStatusWriter struct { + parent mockedClient +} + +func (m mockedStatusWriter) Create(ctx context.Context, obj k8sClient.Object, _ k8sClient.Object, _ ...k8sClient.SubResourceCreateOption) error { + return m.parent.Create(ctx, obj) +} + +func (m mockedStatusWriter) Update(ctx context.Context, obj k8sClient.Object, _ ...k8sClient.SubResourceUpdateOption) error { + return m.parent.Update(ctx, obj) +} + +func (m mockedStatusWriter) Patch(ctx context.Context, obj k8sClient.Object, patch k8sClient.Patch, _ ...k8sClient.SubResourcePatchOption) error { + return m.parent.Patch(ctx, obj, patch) +} + +func (m mockedClient) SubResource(string) k8sClient.SubResourceClient { + panic("implement me") } // notFoundError returns an error which returns true for "errors.IsNotFound" @@ -26,82 +137,64 @@ func alreadyExistsError() error { } func NewMockedClient() k8sClient.Client { - return &mockedClient{backingMap: map[reflect.Type]map[k8sClient.ObjectKey]runtime.Object{}} + return &mockedClient{backingMap: map[reflect.Type]map[k8sClient.ObjectKey]k8sClient.Object{}} } -func (m *mockedClient) ensureMapFor(obj runtime.Object) map[k8sClient.ObjectKey]runtime.Object { +func (m mockedClient) ensureMapFor(obj k8sClient.Object) map[k8sClient.ObjectKey]k8sClient.Object { t := reflect.TypeOf(obj) if _, ok := m.backingMap[t]; !ok { - m.backingMap[t] = map[k8sClient.ObjectKey]runtime.Object{} + m.backingMap[t] = map[k8sClient.ObjectKey]k8sClient.Object{} } return m.backingMap[t] } -func (m *mockedClient) Get(_ context.Context, key k8sClient.ObjectKey, obj runtime.Object) error { +func (m mockedClient) Get(_ context.Context, key k8sClient.ObjectKey, obj k8sClient.Object, _ ...k8sClient.GetOption) error { relevantMap := m.ensureMapFor(obj) if val, ok := relevantMap[key]; ok { - v := reflect.ValueOf(obj).Elem() - v.Set(reflect.ValueOf(val).Elem()) + if currSts, ok := val.(*appsv1.StatefulSet); ok { + // TODO: this currently doesn't work with additional mongodb config + // just doing it for StatefulSets for now + objCopy := currSts.DeepCopyObject() + v := reflect.ValueOf(obj).Elem() + v.Set(reflect.ValueOf(objCopy).Elem()) + } else { + v := reflect.ValueOf(obj).Elem() + v.Set(reflect.ValueOf(val).Elem()) + } return nil } return notFoundError() } -func (m *mockedClient) Create(_ context.Context, obj runtime.Object, _ ...k8sClient.CreateOption) error { - relevantMap := m.ensureMapFor(obj) - objKey, err := k8sClient.ObjectKeyFromObject(obj) - if err != nil { - return err - } - if _, ok := relevantMap[objKey]; ok { - return alreadyExistsError() - } - - switch v := obj.(type) { - case *appsv1.StatefulSet: - makeStatefulSetReady(v) - } - - relevantMap[objKey] = obj - return nil -} - -// makeStatefulSetReady configures the statefulset to be in the running state. +// makeStatefulSetReady configures the stateful to be in the running state. func makeStatefulSetReady(set *appsv1.StatefulSet) { set.Status.UpdatedReplicas = *set.Spec.Replicas set.Status.ReadyReplicas = *set.Spec.Replicas } -func (m *mockedClient) List(_ context.Context, _ runtime.Object, _ ...k8sClient.ListOption) error { +func (m mockedClient) List(_ context.Context, _ k8sClient.ObjectList, _ ...k8sClient.ListOption) error { return nil } -func (m *mockedClient) Delete(_ context.Context, _ runtime.Object, _ ...k8sClient.DeleteOption) error { +func (m mockedClient) Delete(_ context.Context, obj k8sClient.Object, _ ...k8sClient.DeleteOption) error { + relevantMap := m.ensureMapFor(obj) + objKey := k8sClient.ObjectKeyFromObject(obj) + delete(relevantMap, objKey) return nil } -func (m *mockedClient) Update(_ context.Context, obj runtime.Object, _ ...k8sClient.UpdateOption) error { - relevantMap := m.ensureMapFor(obj) - objKey, err := k8sClient.ObjectKeyFromObject(obj) - if err != nil { - return err - } - switch v := obj.(type) { - case *appsv1.StatefulSet: - makeStatefulSetReady(v) - } - relevantMap[objKey] = obj +func (m mockedClient) DeleteAllOf(_ context.Context, _ k8sClient.Object, _ ...k8sClient.DeleteAllOfOption) error { return nil } -func (m *mockedClient) Patch(_ context.Context, _ runtime.Object, _ k8sClient.Patch, _ ...k8sClient.PatchOption) error { - return nil +func (m mockedClient) Status() k8sClient.StatusWriter { + return mockedStatusWriter{parent: m} } -func (m *mockedClient) DeleteAllOf(_ context.Context, _ runtime.Object, _ ...k8sClient.DeleteAllOfOption) error { +func (m mockedClient) RESTMapper() meta.RESTMapper { return nil } -func (m *mockedClient) Status() k8sClient.StatusWriter { - return m +func (m mockedClient) Scheme() *runtime.Scheme { + return nil } diff --git a/pkg/kube/client/mocked_client_test.go b/pkg/kube/client/mocked_client_test.go index 9eb310fd4..870b85380 100644 --- a/pkg/kube/client/mocked_client_test.go +++ b/pkg/kube/client/mocked_client_test.go @@ -2,31 +2,35 @@ package client import ( "context" + "testing" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/configmap" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/service" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "testing" ) func TestMockedClient(t *testing.T) { + ctx := context.Background() mockedClient := NewMockedClient() cm := configmap.Builder(). SetName("cm-name"). SetNamespace("cm-namespace"). - SetField("field-1", "value-1"). + SetDataField("field-1", "value-1"). + SetData(map[string]string{"key-2": "field-2"}). Build() - err := mockedClient.Create(context.TODO(), &cm) + err := mockedClient.Create(ctx, &cm) assert.NoError(t, err) newCm := corev1.ConfigMap{} - err = mockedClient.Get(context.TODO(), types.NamespacedName{Name: "cm-name", Namespace: "cm-namespace"}, &newCm) + err = mockedClient.Get(ctx, types.NamespacedName{Name: "cm-name", Namespace: "cm-namespace"}, &newCm) assert.NoError(t, err) assert.Equal(t, "cm-namespace", newCm.Namespace) assert.Equal(t, "cm-name", newCm.Name) + assert.Equal(t, newCm.Data, map[string]string{"field-1": "value-1", "key-2": "field-2"}) svc := service.Builder(). SetName("svc-name"). @@ -34,11 +38,11 @@ func TestMockedClient(t *testing.T) { SetServiceType("service-type"). Build() - err = mockedClient.Create(context.TODO(), &svc) + err = mockedClient.Create(ctx, &svc) assert.NoError(t, err) newSvc := corev1.Service{} - err = mockedClient.Get(context.TODO(), types.NamespacedName{Name: "svc-name", Namespace: "svc-namespace"}, &newSvc) + err = mockedClient.Get(ctx, types.NamespacedName{Name: "svc-name", Namespace: "svc-namespace"}, &newSvc) assert.NoError(t, err) assert.Equal(t, "svc-namespace", newSvc.Namespace) assert.Equal(t, "svc-name", newSvc.Name) diff --git a/pkg/kube/client/mocked_manager.go b/pkg/kube/client/mocked_manager.go index 25a257cd6..2f9a3d30c 100644 --- a/pkg/kube/client/mocked_manager.go +++ b/pkg/kube/client/mocked_manager.go @@ -2,7 +2,11 @@ package client import ( "context" + "net/http" + "sigs.k8s.io/controller-runtime/pkg/config" + "time" + "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" @@ -17,21 +21,33 @@ import ( // MockedManager exists to unit test the reconciliation loops and wrap the mocked client type MockedManager struct { - client k8sClient.Client + Client Client } -func NewManager(obj runtime.Object) *MockedManager { +func NewManager(ctx context.Context, obj k8sClient.Object) *MockedManager { c := NewMockedClient() if obj != nil { - _ = c.Create(context.TODO(), obj) + _ = c.Create(ctx, obj) } - return &MockedManager{client: c} + return &MockedManager{Client: NewClient(c)} +} + +func NewManagerWithClient(c k8sClient.Client) *MockedManager { + return &MockedManager{Client: NewClient(c)} +} + +func (m *MockedManager) GetHTTPClient() *http.Client { + panic("implement me") } func (m *MockedManager) Add(_ manager.Runnable) error { return nil } +func (m *MockedManager) Elected() <-chan struct{} { + return nil +} + // SetFields will set any dependencies on an object for which the object has implemented the inject // interface - e.g. inject.Client. func (m *MockedManager) SetFields(interface{}) error { @@ -40,7 +56,7 @@ func (m *MockedManager) SetFields(interface{}) error { // Start starts all registered Controllers and blocks until the Stop channel is closed. // Returns an error if there is an error starting any controller. -func (m *MockedManager) Start(<-chan struct{}) error { +func (m *MockedManager) Start(context.Context) error { return nil } @@ -57,8 +73,7 @@ func (m *MockedManager) GetScheme() *runtime.Scheme { // GetAdmissionDecoder returns the runtime.Decoder based on the scheme. func (m *MockedManager) GetAdmissionDecoder() admission.Decoder { // just returning nothing - d, _ := admission.NewDecoder(runtime.NewScheme()) - return *d + return admission.NewDecoder(runtime.NewScheme()) } // GetAPIReader returns the client reader @@ -68,7 +83,7 @@ func (m *MockedManager) GetAPIReader() k8sClient.Reader { // GetClient returns a client configured with the Config func (m *MockedManager) GetClient() k8sClient.Client { - return m.client + return m.Client } func (m *MockedManager) GetEventRecorderFor(_ string) record.EventRecorder { @@ -95,7 +110,11 @@ func (m *MockedManager) GetRESTMapper() meta.RESTMapper { return nil } -func (m *MockedManager) GetWebhookServer() *webhook.Server { +func (m *MockedManager) GetWebhookServer() webhook.Server { + return nil +} + +func (m *MockedManager) AddMetricsServerExtraHandler(path string, handler http.Handler) error { return nil } @@ -108,3 +127,14 @@ func (m *MockedManager) AddHealthzCheck(name string, check healthz.Checker) erro func (m *MockedManager) AddReadyzCheck(name string, check healthz.Checker) error { return nil } + +func (m *MockedManager) GetLogger() logr.Logger { + return logr.Logger{} +} + +func (m *MockedManager) GetControllerOptions() config.Controller { + var duration = time.Duration(0) + return config.Controller{ + CacheSyncTimeout: duration, + } +} diff --git a/pkg/kube/configmap/configmap.go b/pkg/kube/configmap/configmap.go new file mode 100644 index 000000000..36f38d469 --- /dev/null +++ b/pkg/kube/configmap/configmap.go @@ -0,0 +1,144 @@ +package configmap + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Getter interface { + GetConfigMap(ctx context.Context, objectKey client.ObjectKey) (corev1.ConfigMap, error) +} + +type Updater interface { + UpdateConfigMap(ctx context.Context, cm corev1.ConfigMap) error +} + +type Creator interface { + CreateConfigMap(ctx context.Context, cm corev1.ConfigMap) error +} + +type Deleter interface { + DeleteConfigMap(ctx context.Context, key client.ObjectKey) error +} + +type GetUpdater interface { + Getter + Updater +} + +type GetUpdateCreator interface { + Getter + Updater + Creator +} + +type GetUpdateCreateDeleter interface { + Getter + Updater + Creator + Deleter +} + +const ( + lineSeparator = "\n" + keyValueSeparator = "=" +) + +// ReadKey accepts a ConfigMap Getter, the object of the ConfigMap to get, and the key within +// the config map to read. It returns the string value, and an error if one occurred. +func ReadKey(ctx context.Context, getter Getter, key string, objectKey client.ObjectKey) (string, error) { + data, err := ReadData(ctx, getter, objectKey) + if err != nil { + return "", err + } + if val, ok := data[key]; ok { + return val, nil + } + return "", fmt.Errorf("key \"%s\" not present in ConfigMap %s/%s", key, objectKey.Namespace, objectKey.Name) +} + +// ReadData extracts the contents of the Data field in a given config map +func ReadData(ctx context.Context, getter Getter, key client.ObjectKey) (map[string]string, error) { + cm, err := getter.GetConfigMap(ctx, key) + if err != nil { + return nil, err + } + return cm.Data, nil +} + +// UpdateField updates the sets "key" to the given "value" +func UpdateField(ctx context.Context, getUpdater GetUpdater, objectKey client.ObjectKey, key, value string) error { + cm, err := getUpdater.GetConfigMap(ctx, objectKey) + if err != nil { + return err + } + cm.Data[key] = value + return getUpdater.UpdateConfigMap(ctx, cm) +} + +// CreateOrUpdate creates the given ConfigMap if it doesn't exist, +// or updates it if it does. +func CreateOrUpdate(ctx context.Context, getUpdateCreator GetUpdateCreator, cm corev1.ConfigMap) error { + if err := getUpdateCreator.UpdateConfigMap(ctx, cm); err != nil { + if apiErrors.IsNotFound(err) { + return getUpdateCreator.CreateConfigMap(ctx, cm) + } else { + return err + } + } + return nil +} + +// filelikePropertiesToMap converts a file-like field in a ConfigMap to a map[string]string. +func filelikePropertiesToMap(s string) (map[string]string, error) { + keyValPairs := map[string]string{} + s = strings.TrimRight(s, lineSeparator) + for _, keyValPair := range strings.Split(s, lineSeparator) { + splittedPair := strings.Split(keyValPair, keyValueSeparator) + if len(splittedPair) != 2 { + return nil, fmt.Errorf("%s is not a valid key-value pair", keyValPair) + } + keyValPairs[splittedPair[0]] = splittedPair[1] + } + return keyValPairs, nil +} + +// ReadFileLikeField reads a ConfigMap with file-like properties and returns the value inside one of the fields. +func ReadFileLikeField(ctx context.Context, getter Getter, objectKey client.ObjectKey, externalKey string, internalKey string) (string, error) { + cmData, err := ReadData(ctx, getter, objectKey) + if err != nil { + return "", err + } + mappingString, ok := cmData[externalKey] + if !ok { + return "", fmt.Errorf("key %s is not present in ConfigMap %s", externalKey, objectKey) + } + mapping, err := filelikePropertiesToMap(mappingString) + if err != nil { + return "", err + } + value, ok := mapping[internalKey] + if !ok { + return "", fmt.Errorf("key %s is not present in the %s field of ConfigMap %s", internalKey, externalKey, objectKey) + } + return value, nil +} + +// Exists return whether a configmap with the given namespaced name exists +func Exists(ctx context.Context, cmGetter Getter, nsName types.NamespacedName) (bool, error) { + _, err := cmGetter.GetConfigMap(ctx, nsName) + + if err != nil { + if apiErrors.IsNotFound(err) { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/pkg/kube/configmap/configmap_builder.go b/pkg/kube/configmap/configmap_builder.go index 5341158bd..7c93d9331 100644 --- a/pkg/kube/configmap/configmap_builder.go +++ b/pkg/kube/configmap/configmap_builder.go @@ -10,6 +10,7 @@ type builder struct { name string namespace string ownerReferences []metav1.OwnerReference + labels map[string]string } func (b *builder) SetName(name string) *builder { @@ -22,7 +23,7 @@ func (b *builder) SetNamespace(namespace string) *builder { return b } -func (b *builder) SetField(key, value string) *builder { +func (b *builder) SetDataField(key, value string) *builder { b.data[key] = value return b } @@ -32,12 +33,29 @@ func (b *builder) SetOwnerReferences(ownerReferences []metav1.OwnerReference) *b return b } +func (b *builder) SetLabels(labels map[string]string) *builder { + newLabels := make(map[string]string) + for k, v := range labels { + newLabels[k] = v + } + b.labels = newLabels + return b +} + +func (b *builder) SetData(data map[string]string) *builder { + for k, v := range data { + b.SetDataField(k, v) + } + return b +} + func (b builder) Build() corev1.ConfigMap { return corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: b.name, Namespace: b.namespace, OwnerReferences: b.ownerReferences, + Labels: b.labels, }, Data: b.data, } @@ -47,5 +65,6 @@ func Builder() *builder { return &builder{ data: map[string]string{}, ownerReferences: []metav1.OwnerReference{}, + labels: map[string]string{}, } } diff --git a/pkg/kube/configmap/configmap_test.go b/pkg/kube/configmap/configmap_test.go new file mode 100644 index 000000000..1d731573a --- /dev/null +++ b/pkg/kube/configmap/configmap_test.go @@ -0,0 +1,168 @@ +package configmap + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type configMapGetter struct { + cm corev1.ConfigMap +} + +func (c configMapGetter) GetConfigMap(ctx context.Context, objectKey client.ObjectKey) (corev1.ConfigMap, error) { + if c.cm.Name == objectKey.Name && c.cm.Namespace == objectKey.Namespace { + return c.cm, nil + } + return corev1.ConfigMap{}, notFoundError() +} + +func newGetter(cm corev1.ConfigMap) Getter { + return configMapGetter{ + cm: cm, + } +} + +func TestReadKey(t *testing.T) { + ctx := context.Background() + getter := newGetter( + Builder(). + SetName("name"). + SetNamespace("namespace"). + SetDataField("key1", "value1"). + SetDataField("key2", "value2"). + Build(), + ) + + value, err := ReadKey(ctx, getter, "key1", nsName("namespace", "name")) + assert.Equal(t, "value1", value) + assert.NoError(t, err) + + value, err = ReadKey(ctx, getter, "key2", nsName("namespace", "name")) + assert.Equal(t, "value2", value) + assert.NoError(t, err) + + _, err = ReadKey(ctx, getter, "key3", nsName("namespace", "name")) + assert.Error(t, err) +} + +func TestReadData(t *testing.T) { + ctx := context.Background() + getter := newGetter( + Builder(). + SetName("name"). + SetNamespace("namespace"). + SetDataField("key1", "value1"). + SetDataField("key2", "value2"). + Build(), + ) + + data, err := ReadData(ctx, getter, nsName("namespace", "name")) + assert.NoError(t, err) + + assert.Contains(t, data, "key1") + assert.Contains(t, data, "key2") + + assert.Equal(t, "value1", data["key1"]) + assert.Equal(t, "value2", data["key2"]) +} + +func TestReadFileLikeField(t *testing.T) { + ctx := context.Background() + getter := newGetter( + Builder(). + SetName("name"). + SetNamespace("namespace"). + SetDataField("key1", "value1=1\nvalue2=2"). + Build(), + ) + + data, err := ReadFileLikeField(ctx, getter, nsName("namespace", "name"), "key1", "value1") + assert.NoError(t, err) + + assert.Equal(t, "1", data) +} + +func TestReadFileLikeField_InvalidExternalKey(t *testing.T) { + ctx := context.Background() + getter := newGetter( + Builder(). + SetName("name"). + SetNamespace("namespace"). + SetDataField("key1", "value1=1\nvalue2=2"). + Build(), + ) + + _, err := ReadFileLikeField(ctx, getter, nsName("namespace", "name"), "key2", "value1") + assert.Error(t, err) + assert.Equal(t, "key key2 is not present in ConfigMap namespace/name", err.Error()) +} + +func TestReadFileLikeField_InvalidInternalKey(t *testing.T) { + ctx := context.Background() + getter := newGetter( + Builder(). + SetName("name"). + SetNamespace("namespace"). + SetDataField("key1", "value1=1\nvalue2=2"). + Build(), + ) + + _, err := ReadFileLikeField(ctx, getter, nsName("namespace", "name"), "key1", "value3") + assert.Error(t, err) + assert.Equal(t, "key value3 is not present in the key1 field of ConfigMap namespace/name", err.Error()) +} + +type configMapGetUpdater struct { + cm corev1.ConfigMap +} + +func (c configMapGetUpdater) GetConfigMap(ctx context.Context, objectKey client.ObjectKey) (corev1.ConfigMap, error) { + if c.cm.Name == objectKey.Name && c.cm.Namespace == objectKey.Namespace { + return c.cm, nil + } + return corev1.ConfigMap{}, notFoundError() +} + +func (c *configMapGetUpdater) UpdateConfigMap(ctx context.Context, cm corev1.ConfigMap) error { + c.cm = cm + return nil +} + +func newGetUpdater(cm corev1.ConfigMap) GetUpdater { + return &configMapGetUpdater{ + cm: cm, + } +} + +func TestUpdateField(t *testing.T) { + ctx := context.Background() + getUpdater := newGetUpdater( + Builder(). + SetName("name"). + SetNamespace("namespace"). + SetDataField("field1", "value1"). + SetDataField("field2", "value2"). + Build(), + ) + err := UpdateField(ctx, getUpdater, nsName("namespace", "name"), "field1", "newValue") + assert.NoError(t, err) + val, _ := ReadKey(ctx, getUpdater, "field1", nsName("namespace", "name")) + assert.Equal(t, "newValue", val) + val2, _ := ReadKey(ctx, getUpdater, "field2", nsName("namespace", "name")) + assert.Equal(t, "value2", val2) +} + +func nsName(namespace, name string) types.NamespacedName { + return types.NamespacedName{Name: name, Namespace: namespace} +} + +func notFoundError() error { + return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}} +} diff --git a/pkg/kube/container/container_test.go b/pkg/kube/container/container_test.go new file mode 100644 index 000000000..a61a0be15 --- /dev/null +++ b/pkg/kube/container/container_test.go @@ -0,0 +1,230 @@ +package container + +import ( + "fmt" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/resourcerequirements" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/lifecycle" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/probes" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" +) + +func TestContainer(t *testing.T) { + c := New( + WithName("name"), + WithImage("image"), + WithImagePullPolicy(corev1.PullAlways), + WithPorts([]corev1.ContainerPort{{Name: "port-1", ContainerPort: int32(1000)}}), + WithSecurityContext(corev1.SecurityContext{ + RunAsGroup: int64Ref(100), + RunAsNonRoot: boolRef(true), + }), + WithLifecycle(lifecycle.Apply( + lifecycle.WithPrestopCommand([]string{"pre-stop-command"}), + )), + WithReadinessProbe(probes.Apply( + probes.WithExecCommand([]string{"exec"}), + probes.WithFailureThreshold(10), + probes.WithPeriodSeconds(5), + )), + WithLivenessProbe(probes.Apply( + probes.WithExecCommand([]string{"liveness-exec"}), + probes.WithFailureThreshold(15), + probes.WithPeriodSeconds(10), + )), + WithStartupProbe( + probes.Apply( + probes.WithExecCommand([]string{"startup-exec"}), + probes.WithFailureThreshold(20), + probes.WithPeriodSeconds(30), + ), + ), + WithResourceRequirements(resourcerequirements.Defaults()), + WithCommand([]string{"container-cmd"}), + WithEnvs( + []corev1.EnvVar{ + { + Name: "env-1", + Value: "env-1-value", + }, + }..., + ), + ) + + assert.Equal(t, "name", c.Name) + assert.Equal(t, "image", c.Image) + assert.Equal(t, corev1.PullAlways, c.ImagePullPolicy) + + assert.Len(t, c.Ports, 1) + assert.Equal(t, int32(1000), c.Ports[0].ContainerPort) + assert.Equal(t, "port-1", c.Ports[0].Name) + + securityContext := c.SecurityContext + assert.Equal(t, int64Ref(100), securityContext.RunAsGroup) + assert.Equal(t, boolRef(true), securityContext.RunAsNonRoot) + + readinessProbe := c.ReadinessProbe + assert.Equal(t, int32(10), readinessProbe.FailureThreshold) + assert.Equal(t, int32(5), readinessProbe.PeriodSeconds) + assert.Equal(t, "exec", readinessProbe.Exec.Command[0]) + + liveNessProbe := c.LivenessProbe + assert.Equal(t, int32(15), liveNessProbe.FailureThreshold) + assert.Equal(t, int32(10), liveNessProbe.PeriodSeconds) + assert.Equal(t, "liveness-exec", liveNessProbe.Exec.Command[0]) + + startupProbe := c.StartupProbe + assert.Equal(t, int32(20), startupProbe.FailureThreshold) + assert.Equal(t, int32(30), startupProbe.PeriodSeconds) + assert.Equal(t, "startup-exec", startupProbe.Exec.Command[0]) + + assert.Equal(t, c.Resources, resourcerequirements.Defaults()) + + assert.Len(t, c.Command, 1) + assert.Equal(t, "container-cmd", c.Command[0]) + + lifeCycle := c.Lifecycle + assert.NotNil(t, lifeCycle) + assert.NotNil(t, lifeCycle.PreStop) + assert.NotNil(t, lifeCycle.PreStop.Exec) + assert.Equal(t, "pre-stop-command", lifeCycle.PreStop.Exec.Command[0]) + + assert.Len(t, c.Env, 1) + assert.Equal(t, "env-1", c.Env[0].Name) + assert.Equal(t, "env-1-value", c.Env[0].Value) +} + +func TestMergeEnvs(t *testing.T) { + existing := []corev1.EnvVar{ + { + Name: "C_env", + Value: "C_value", + }, + { + Name: "B_env", + Value: "B_value", + }, + { + Name: "A_env", + Value: "A_value", + }, + { + Name: "F_env", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: "f_key", + }, + }, + }, + } + + desired := []corev1.EnvVar{ + { + Name: "D_env", + Value: "D_value", + }, + { + Name: "E_env", + Value: "E_value", + }, + { + Name: "C_env", + Value: "C_value_new", + }, + { + Name: "B_env", + Value: "B_value_new", + }, + { + Name: "A_env", + Value: "A_value", + }, + } + + merged := envvar.MergeWithOverride(existing, desired) // nolint:forbidigo + + t.Run("EnvVars should be sorted", func(t *testing.T) { + assert.Equal(t, "A_env", merged[0].Name) + assert.Equal(t, "B_env", merged[1].Name) + assert.Equal(t, "C_env", merged[2].Name) + assert.Equal(t, "D_env", merged[3].Name) + assert.Equal(t, "E_env", merged[4].Name) + assert.Equal(t, "F_env", merged[5].Name) + }) + + t.Run("EnvVars of same name are updated", func(t *testing.T) { + assert.Equal(t, "B_env", merged[1].Name) + assert.Equal(t, "B_value_new", merged[1].Value) + }) + + t.Run("Existing EnvVars are not touched", func(t *testing.T) { + envVar := merged[5] + assert.NotNil(t, envVar.ValueFrom) + assert.Equal(t, "f_key", envVar.ValueFrom.SecretKeyRef.Key) + }) +} + +func TestWithVolumeMounts(t *testing.T) { + c := New( + WithVolumeMounts( + []corev1.VolumeMount{ + { + Name: "name-0", + MountPath: "mount-path-0", + SubPath: "sub-path-0", + }, + { + Name: "name-1", + MountPath: "mount-path-1", + SubPath: "sub-path-1", + }, + { + Name: "name-2", + MountPath: "mount-path-2", + SubPath: "sub-path-2", + }, + }, + ), + ) + + newVolumeMounts := []corev1.VolumeMount{ + { + Name: "name-0", + MountPath: "mount-path-0", + SubPath: "sub-path-0", + }, + { + Name: "name-4", + MountPath: "mount-path-4", + SubPath: "sub-path-4", + }, + { + Name: "name-3", + MountPath: "mount-path-3", + SubPath: "sub-path-3", + }, + } + + WithVolumeMounts(newVolumeMounts)(&c) + + assert.Len(t, c.VolumeMounts, 5, "duplicates should have been removed") + for i, v := range c.VolumeMounts { + assert.Equal(t, fmt.Sprintf("name-%d", i), v.Name, "Volumes should be sorted but were not!") + assert.Equal(t, fmt.Sprintf("mount-path-%d", i), v.MountPath, "Volumes should be sorted but were not!") + assert.Equal(t, fmt.Sprintf("sub-path-%d", i), v.SubPath, "Volumes should be sorted but were not!") + } + +} + +func boolRef(b bool) *bool { + return &b +} + +func int64Ref(i int64) *int64 { + return &i +} diff --git a/pkg/kube/container/container_util.go b/pkg/kube/container/container_util.go new file mode 100644 index 000000000..cd40c2cb5 --- /dev/null +++ b/pkg/kube/container/container_util.go @@ -0,0 +1,14 @@ +package container + +import corev1 "k8s.io/api/core/v1" + +// GetByName returns a container with the given name from the slice of containers. +// nil is returned if the container does not exist. +func GetByName(name string, containers []corev1.Container) *corev1.Container { + for i, c := range containers { + if c.Name == name { + return &containers[i] + } + } + return nil +} diff --git a/pkg/kube/container/containers.go b/pkg/kube/container/containers.go new file mode 100644 index 000000000..687befc5b --- /dev/null +++ b/pkg/kube/container/containers.go @@ -0,0 +1,210 @@ +package container + +import ( + "sort" + "strings" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/lifecycle" + corev1 "k8s.io/api/core/v1" +) + +type Modification func(*corev1.Container) + +// Apply returns a function which applies a series of Modification functions to a *corev1.Container +func Apply(modifications ...Modification) Modification { + return func(container *corev1.Container) { + for _, mod := range modifications { + mod(container) + } + } +} + +// New returns a concrete corev1.Container instance which has been modified based on the provided +// modifications +func New(mods ...Modification) corev1.Container { + c := corev1.Container{} + for _, mod := range mods { + mod(&c) + } + return c +} + +// NOOP is a valid Modification which applies no changes +func NOOP() Modification { + return func(container *corev1.Container) {} +} + +// WithName sets the container name +func WithName(name string) Modification { + return func(container *corev1.Container) { + container.Name = name + } +} + +// WithImage sets the container image +func WithImage(image string) Modification { + return func(container *corev1.Container) { + container.Image = image + } +} + +// WithImagePullPolicy sets the container pullPolicy +func WithImagePullPolicy(pullPolicy corev1.PullPolicy) Modification { + return func(container *corev1.Container) { + container.ImagePullPolicy = pullPolicy + } +} + +// WithWorkDir sets the container Working Directory +func WithWorkDir(workDir string) Modification { + return func(container *corev1.Container) { + container.WorkingDir = workDir + } +} + +// WithReadinessProbe modifies the container's Readiness Probe +func WithReadinessProbe(probeFunc func(*corev1.Probe)) Modification { + return func(container *corev1.Container) { + if container.ReadinessProbe == nil { + container.ReadinessProbe = &corev1.Probe{} + } + probeFunc(container.ReadinessProbe) + } +} + +// WithLivenessProbe modifies the container's Liveness Probe +func WithLivenessProbe(livenessProbeFunc func(*corev1.Probe)) Modification { + return func(container *corev1.Container) { + if container.LivenessProbe == nil { + container.LivenessProbe = &corev1.Probe{} + } + livenessProbeFunc(container.LivenessProbe) + } +} + +// WithStartupProbe modifies the container's Startup Probe +func WithStartupProbe(startupProbeFunc func(*corev1.Probe)) Modification { + return func(container *corev1.Container) { + if container.StartupProbe == nil { + container.StartupProbe = &corev1.Probe{} + } + startupProbeFunc(container.StartupProbe) + } +} + +// WithResourceRequirements sets the container's Resources +func WithResourceRequirements(resources corev1.ResourceRequirements) Modification { + return func(container *corev1.Container) { + container.Resources = resources + } +} + +// WithCommand sets the containers Command +func WithCommand(cmd []string) Modification { + return func(container *corev1.Container) { + container.Command = cmd + } +} + +// WithArgs sets the containers Args +func WithArgs(args []string) Modification { + return func(container *corev1.Container) { + container.Args = args + } +} + +// WithLifecycle applies the lifecycle Modification to this container's +// Lifecycle +func WithLifecycle(lifeCycleMod lifecycle.Modification) Modification { + return func(container *corev1.Container) { + if container.Lifecycle == nil { + container.Lifecycle = &corev1.Lifecycle{} + } + lifeCycleMod(container.Lifecycle) + } +} + +// WithEnvs ensures all of the provided envs exist in the container +func WithEnvs(envs ...corev1.EnvVar) Modification { + return func(container *corev1.Container) { + container.Env = envvar.MergeWithOverride(container.Env, envs) // nolint:forbidigo + } +} + +// WithVolumeMounts sets the VolumeMounts +func WithVolumeMounts(volumeMounts []corev1.VolumeMount) Modification { + volumesMountsCopy := make([]corev1.VolumeMount, len(volumeMounts)) + copy(volumesMountsCopy, volumeMounts) + return func(container *corev1.Container) { + merged := map[string]corev1.VolumeMount{} + for _, ex := range container.VolumeMounts { + merged[volumeMountToString(ex)] = ex + } + for _, des := range volumesMountsCopy { + merged[volumeMountToString(des)] = des + } + + var final []corev1.VolumeMount + for _, v := range merged { + final = append(final, v) + } + sort.SliceStable(final, func(i, j int) bool { + a := final[i] + b := final[j] + return volumeMountToString(a) < volumeMountToString(b) + }) + container.VolumeMounts = final + } +} + +func RemoveVolumeMount(volumeMount string) Modification { + return func(container *corev1.Container) { + index := 0 + found := false + for i := range container.VolumeMounts { + if container.VolumeMounts[i].Name == volumeMount { + index = i + found = true + } + } + + if found { + container.VolumeMounts = append(container.VolumeMounts[:index], container.VolumeMounts[index+1:]...) + } + } +} + +func volumeMountToString(v corev1.VolumeMount) string { + return strings.Join([]string{v.Name, v.MountPath, v.SubPath}, "-") +} + +// WithPWithVolumeDevice sets the container's VolumeDevices +func WithVolumeDevices(devices []corev1.VolumeDevice) Modification { + return func(container *corev1.Container) { + container.VolumeDevices = devices + } +} + +// WithPorts sets the container's Ports +func WithPorts(ports []corev1.ContainerPort) Modification { + return func(container *corev1.Container) { + container.Ports = ports + } +} + +// WithSecurityContext sets the container's SecurityContext +func WithSecurityContext(context corev1.SecurityContext) Modification { + return func(container *corev1.Container) { + container.SecurityContext = &context + } +} + +// DefaultSecurityContext returns the default container security context with: +// - readOnlyRootFilesystem set to true +func DefaultSecurityContext() corev1.SecurityContext { + readOnlyRootFilesystem := true + allowPrivilegeEscalation := false + return corev1.SecurityContext{ReadOnlyRootFilesystem: &readOnlyRootFilesystem, AllowPrivilegeEscalation: &allowPrivilegeEscalation} +} diff --git a/pkg/kube/lifecycle/lifecyle.go b/pkg/kube/lifecycle/lifecyle.go new file mode 100644 index 000000000..63f2f4b05 --- /dev/null +++ b/pkg/kube/lifecycle/lifecyle.go @@ -0,0 +1,27 @@ +package lifecycle + +import corev1 "k8s.io/api/core/v1" + +type Modification func(lifecycle *corev1.Lifecycle) + +// Apply returns a function which applies a series of Modification functions to a *corev1.Lifecycle +func Apply(modifications ...Modification) Modification { + return func(lifecycle *corev1.Lifecycle) { + for _, mod := range modifications { + mod(lifecycle) + } + } +} + +// WithPrestopCommand sets the LifeCycles PreStop Exec Command +func WithPrestopCommand(preStopCmd []string) Modification { + return func(lc *corev1.Lifecycle) { + if lc.PreStop == nil { + lc.PreStop = &corev1.LifecycleHandler{} + } + if lc.PreStop.Exec == nil { + lc.PreStop.Exec = &corev1.ExecAction{} + } + lc.PreStop.Exec.Command = preStopCmd + } +} diff --git a/pkg/kube/persistentvolumeclaim/pvc.go b/pkg/kube/persistentvolumeclaim/pvc.go new file mode 100644 index 000000000..36231b66b --- /dev/null +++ b/pkg/kube/persistentvolumeclaim/pvc.go @@ -0,0 +1,64 @@ +package persistentvolumeclaim + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Modification func(claim *corev1.PersistentVolumeClaim) + +// Apply returns a function which applies a series of Modification functions to a *corev1.PersistentVolumeClaim +func Apply(funcs ...Modification) Modification { + return func(claim *corev1.PersistentVolumeClaim) { + for _, f := range funcs { + f(claim) + } + } +} + +// NOOP is a valid Modification which applies no changes +func NOOP() Modification { + return func(claim *corev1.PersistentVolumeClaim) {} +} + +// WithName sets the PersistentVolumeClaim's name +func WithName(name string) Modification { + return func(claim *corev1.PersistentVolumeClaim) { + claim.Name = name + } +} + +// WithAccessModes sets the PersistentVolumeClaim's AccessModes +func WithAccessModes(accessMode corev1.PersistentVolumeAccessMode) Modification { + return func(claim *corev1.PersistentVolumeClaim) { + claim.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{accessMode} + } +} + +// WithResourceRequests sets the PersistentVolumeClaim's Resource Requests +func WithResourceRequests(requests corev1.ResourceList) Modification { + return func(claim *corev1.PersistentVolumeClaim) { + claim.Spec.Resources.Requests = requests + } +} + +// WithLabelSelector sets the PersistentVolumeClaim's LevelSelector +func WithLabelSelector(selector *metav1.LabelSelector) Modification { + return func(claim *corev1.PersistentVolumeClaim) { + claim.Spec.Selector = selector + } +} + +// WithStorageClassName sets the PersistentVolumeClaim's storage class name +func WithStorageClassName(storageClassName string) Modification { + return func(claim *corev1.PersistentVolumeClaim) { + claim.Spec.StorageClassName = &storageClassName + } +} + +// WithLabels sets the PersistentVolumeClaim's labels +func WithLabels(labels map[string]string) Modification { + return func(claim *corev1.PersistentVolumeClaim) { + claim.Labels = labels + } +} diff --git a/pkg/kube/pod/pod.go b/pkg/kube/pod/pod.go index d5696912e..7b991a694 100644 --- a/pkg/kube/pod/pod.go +++ b/pkg/kube/pod/pod.go @@ -1,82 +1,11 @@ package pod import ( - "bufio" "context" - "fmt" - "io" - "time" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - typedCorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" ) -type Streamer interface { - Stream() (io.ReadCloser, error) -} - -// CoreV1FollowStreamer returns a Streamer that will stream the logs to -// the given pod -func CoreV1FollowStreamer(pod corev1.Pod, corev1Interface typedCorev1.CoreV1Interface) Streamer { - return corev1Interface. - Pods(pod.Namespace). - GetLogs(pod.Name, &corev1.PodLogOptions{ - Follow: true, - }) -} - -// GetLogs will follow the logs of the provided pod to the given io.Writer until the pod has -// been terminated or has completed. -func GetLogs(writer io.Writer, streamer Streamer) error { - podLogs, err := streamer.Stream() - - if err != nil { - return fmt.Errorf("error in opening stream: %v", err) - } - - defer podLogs.Close() - - sc := bufio.NewScanner(podLogs) - for sc.Scan() { - if _, err := fmt.Fprintln(writer, sc.Text()); err != nil { - return err - } - } - - if sc.Err() != nil { - return fmt.Errorf("error from scanner: %+v", sc.Err()) - } - - return nil -} - -type Poller interface { - Poll(interval, timeout time.Duration, condition wait.ConditionFunc) error -} - -type waitPoller struct{} - -func (p waitPoller) Poll(interval, timeout time.Duration, condition wait.ConditionFunc) error { - return wait.Poll(interval, timeout, condition) -} - -// WaitForPhase waits for a pdo with the given namespacedName to exist, checking every interval with and using -// the provided timeout. The pod itself is returned and any error that occurred. -func WaitForPhase(c client.Client, namespacedName types.NamespacedName, interval, timeout time.Duration, podPhase corev1.PodPhase) (corev1.Pod, error) { - return waitForPhase(c, namespacedName, interval, timeout, podPhase, waitPoller{}) -} - -func waitForPhase(c client.Client, namespacedName types.NamespacedName, interval, timeout time.Duration, podPhase corev1.PodPhase, poller Poller) (corev1.Pod, error) { - pod := corev1.Pod{} - err := poller.Poll(interval, timeout, func() (done bool, err error) { - if err := c.Get(context.TODO(), namespacedName, &pod); err != nil { - return false, err - } - return pod.Status.Phase == podPhase, nil - }) - return pod, err +type Getter interface { + GetPod(ctx context.Context, objectKey client.ObjectKey) (corev1.Pod, error) } diff --git a/pkg/kube/pod/pod_test.go b/pkg/kube/pod/pod_test.go deleted file mode 100644 index 50b75dfbd..000000000 --- a/pkg/kube/pod/pod_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package pod - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "strings" - "testing" - "time" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" -) - -type mockPoller struct{} - -func (m mockPoller) Poll(interval, timeout time.Duration, condition wait.ConditionFunc) error { - elapsedTime := time.Duration(0) - for timeout >= elapsedTime { - done, err := condition() - if err != nil { - return fmt.Errorf("error in condition func: %+v", err) - } - elapsedTime += interval - if done { - return nil - } - } - return fmt.Errorf("timed out") -} - -func TestWaitForPhase(t *testing.T) { - mockedClient := client.NewClient(client.NewMockedClient()) - testPod := newPod(corev1.PodRunning) - err := mockedClient.Update(context.TODO(), &testPod) - assert.NoError(t, err) - _, err = waitForPhase( - mockedClient, - types.NamespacedName{Name: testPod.Name, Namespace: testPod.Namespace}, - time.Second*5, - time.Minute*5, - corev1.PodRunning, - mockPoller{}, - ) - assert.NoError(t, err) - - testPod = newPod(corev1.PodFailed) - _ = mockedClient.Update(context.TODO(), &testPod) - _, err = waitForPhase(mockedClient, - types.NamespacedName{Name: testPod.Name, Namespace: testPod.Namespace}, - time.Second*5, - time.Minute*5, - corev1.PodRunning, - mockPoller{}, - ) - assert.Error(t, err) -} - -func newPod(phase corev1.PodPhase) corev1.Pod { - return corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "test-namespace", - }, - - Spec: corev1.PodSpec{}, - Status: corev1.PodStatus{ - Phase: phase, - }, - } -} - -type mockStreamer struct { - logs string -} - -func (m mockStreamer) Stream() (io.ReadCloser, error) { - return ioutil.NopCloser(strings.NewReader(m.logs)), nil -} - -func TestGetLogs(t *testing.T) { - tests := []struct { - expected string - }{ - {expected: "Hello World"}, - {expected: "Line 1\nLine2\nLine3"}, - {expected: "Some other log line."}, - } - for _, tt := range tests { - var b bytes.Buffer - err := GetLogs(&b, mockStreamer{logs: tt.expected}) - assert.NoError(t, err) - assert.Equal(t, tt.expected+"\n", b.String()) - } -} diff --git a/pkg/kube/podtemplatespec/podspec_template.go b/pkg/kube/podtemplatespec/podspec_template.go new file mode 100644 index 000000000..f908a214a --- /dev/null +++ b/pkg/kube/podtemplatespec/podspec_template.go @@ -0,0 +1,309 @@ +package podtemplatespec + +import ( + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/merge" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Modification func(*corev1.PodTemplateSpec) + +const ( + notFound = -1 + ManagedSecurityContextEnv = "MANAGED_SECURITY_CONTEXT" +) + +func New(templateMods ...Modification) corev1.PodTemplateSpec { + podTemplateSpec := corev1.PodTemplateSpec{} + for _, templateMod := range templateMods { + templateMod(&podTemplateSpec) + } + return podTemplateSpec +} + +// Apply returns a function which applies a series of Modification functions to a *corev1.PodTemplateSpec +func Apply(templateMods ...Modification) Modification { + return func(template *corev1.PodTemplateSpec) { + for _, f := range templateMods { + f(template) + } + } +} + +// NOOP is a valid Modification which applies no changes +func NOOP() Modification { + return func(spec *corev1.PodTemplateSpec) {} +} + +// WithContainer applies the modifications to the container with the provided name +func WithContainer(name string, containerfunc func(*corev1.Container)) Modification { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + idx := findIndexByName(name, podTemplateSpec.Spec.Containers) + if idx == notFound { + // if we are attempting to modify a container that does not exist, we will add a new one + podTemplateSpec.Spec.Containers = append(podTemplateSpec.Spec.Containers, corev1.Container{Name: name}) + idx = len(podTemplateSpec.Spec.Containers) - 1 + } + c := &podTemplateSpec.Spec.Containers[idx] + containerfunc(c) + } +} + +// WithContainerByIndex applies the modifications to the container with the provided index +// if the index is out of range, a new container is added to accept these changes. +func WithContainerByIndex(index int, funcs ...func(container *corev1.Container)) func(podTemplateSpec *corev1.PodTemplateSpec) { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + if index >= len(podTemplateSpec.Spec.Containers) { + podTemplateSpec.Spec.Containers = append(podTemplateSpec.Spec.Containers, corev1.Container{}) + } + c := &podTemplateSpec.Spec.Containers[index] + for _, f := range funcs { + f(c) + } + } +} + +// WithInitContainer applies the modifications to the init container with the provided name +func WithInitContainer(name string, containerfunc func(*corev1.Container)) Modification { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + idx := findIndexByName(name, podTemplateSpec.Spec.InitContainers) + if idx == notFound { + // if we are attempting to modify a container that does not exist, we will add a new one + podTemplateSpec.Spec.InitContainers = append(podTemplateSpec.Spec.InitContainers, corev1.Container{Name: name}) + idx = len(podTemplateSpec.Spec.InitContainers) - 1 + } + c := &podTemplateSpec.Spec.InitContainers[idx] + containerfunc(c) + } +} + +// WithInitContainerByIndex applies the modifications to the container with the provided index +// if the index is out of range, a new container is added to accept these changes. +func WithInitContainerByIndex(index int, funcs ...func(container *corev1.Container)) func(podTemplateSpec *corev1.PodTemplateSpec) { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + if index >= len(podTemplateSpec.Spec.InitContainers) { + podTemplateSpec.Spec.InitContainers = append(podTemplateSpec.Spec.InitContainers, corev1.Container{}) + } + c := &podTemplateSpec.Spec.InitContainers[index] + for _, f := range funcs { + f(c) + } + } +} + +// WithPodLabels sets the PodTemplateSpec's Labels +func WithPodLabels(labels map[string]string) Modification { + if labels == nil { + labels = map[string]string{} + } + return func(podTemplateSpec *corev1.PodTemplateSpec) { + podTemplateSpec.ObjectMeta.Labels = labels + } +} + +// WithServiceAccount sets the PodTemplateSpec's ServiceAccount name +func WithServiceAccount(serviceAccountName string) Modification { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + podTemplateSpec.Spec.ServiceAccountName = serviceAccountName + } +} + +// WithVolumes ensures given volume is present in the PodTemplateSpec. It merges the volumes with existing ones. +func WithVolumes(volumes []corev1.Volume) Modification { + return func(template *corev1.PodTemplateSpec) { + for _, volume := range volumes { + WithVolume(volume)(template) + } + } +} + +// WithVolume ensures given volume is present in the PodTemplateSpec. It merges the volume if it already exists. +func WithVolume(volume corev1.Volume) Modification { + return func(template *corev1.PodTemplateSpec) { + for i := range template.Spec.Volumes { + if template.Spec.Volumes[i].Name == volume.Name { + template.Spec.Volumes[i] = merge.Volume(template.Spec.Volumes[i], volume) + return + } + } + + template.Spec.Volumes = append(template.Spec.Volumes, volume) + } +} + +func RemoveVolume(volume string) Modification { + return func(template *corev1.PodTemplateSpec) { + index := 0 + found := false + for i := range template.Spec.Volumes { + if template.Spec.Volumes[i].Name == volume { + index = i + found = true + } + } + + if found { + template.Spec.Volumes = append(template.Spec.Volumes[:index], template.Spec.Volumes[index+1:]...) + } + } +} + +func findIndexByName(name string, containers []corev1.Container) int { + for idx, c := range containers { + if c.Name == name { + return idx + } + } + return notFound +} + +// WithTerminationGracePeriodSeconds sets the PodTemplateSpec's termination grace period seconds +func WithTerminationGracePeriodSeconds(seconds int) Modification { + s := int64(seconds) + return func(podTemplateSpec *corev1.PodTemplateSpec) { + podTemplateSpec.Spec.TerminationGracePeriodSeconds = &s + } +} + +// WithSecurityContext sets the PodTemplateSpec's SecurityContext +func WithSecurityContext(securityContext corev1.PodSecurityContext) Modification { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + spec := &podTemplateSpec.Spec + spec.SecurityContext = &securityContext + } +} + +// DefaultPodSecurityContext returns the default pod security context with: +// - uid 2000 +// - fsGroup 2000 +// - runAsNonRoot set to true +func DefaultPodSecurityContext() corev1.PodSecurityContext { + runAsNonRoot := true + runAsUser := int64(2000) + fsGroup := int64(2000) + return corev1.PodSecurityContext{RunAsUser: &runAsUser, RunAsNonRoot: &runAsNonRoot, FSGroup: &fsGroup} +} + +// WithImagePullSecrets adds an ImagePullSecrets local reference with the given name +func WithImagePullSecrets(name string) Modification { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + for _, v := range podTemplateSpec.Spec.ImagePullSecrets { + if v.Name == name { + return + } + } + podTemplateSpec.Spec.ImagePullSecrets = append(podTemplateSpec.Spec.ImagePullSecrets, corev1.LocalObjectReference{ + Name: name, + }) + } +} + +// WithTopologyKey sets the PodTemplateSpec's topology at a given index +func WithTopologyKey(topologyKey string, idx int) Modification { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + podTemplateSpec.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[idx].PodAffinityTerm.TopologyKey = topologyKey + } +} + +// WithAffinity updates the name, antiAffinityLabelKey and weight of the PodTemplateSpec's Affinity +func WithAffinity(stsName, antiAffinityLabelKey string, weight int) Modification { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + podTemplateSpec.Spec.Affinity = + &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{{ + Weight: int32(weight), + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{antiAffinityLabelKey: stsName}}, + }, + }}, + }, + } + } +} + +// WithNodeAffinity sets the PodTemplateSpec's node affinity +func WithNodeAffinity(nodeAffinity *corev1.NodeAffinity) Modification { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + podTemplateSpec.Spec.Affinity.NodeAffinity = nodeAffinity + } +} + +// WithPodAffinity sets the PodTemplateSpec's pod affinity +func WithPodAffinity(podAffinity *corev1.PodAffinity) Modification { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + podTemplateSpec.Spec.Affinity.PodAffinity = podAffinity + } +} + +// WithTolerations sets the PodTemplateSpec's tolerations +func WithTolerations(tolerations []corev1.Toleration) Modification { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + podTemplateSpec.Spec.Tolerations = tolerations + } +} + +// WithAnnotations sets the PodTemplateSpec's annotations +func WithAnnotations(annotations map[string]string) Modification { + if annotations == nil { + annotations = map[string]string{} + } + return func(podTemplateSpec *corev1.PodTemplateSpec) { + if podTemplateSpec.Annotations == nil { + podTemplateSpec.Annotations = map[string]string{} + } + for k, v := range annotations { + podTemplateSpec.Annotations[k] = v + } + } +} + +// WithVolumeMounts will add volume mounts to a container or init container by name +func WithVolumeMounts(containerName string, volumeMounts ...corev1.VolumeMount) Modification { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + c := FindContainerByName(containerName, podTemplateSpec) + if c == nil { + return + } + container.WithVolumeMounts(volumeMounts)(c) + } +} + +func RemoveVolumeMount(containerName string, volumeMount string) Modification { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + c := FindContainerByName(containerName, podTemplateSpec) + if c == nil { + return + } + container.RemoveVolumeMount(volumeMount)(c) + } +} + +// FindContainerByName will find either a container or init container by name in a pod template spec +func FindContainerByName(name string, podTemplateSpec *corev1.PodTemplateSpec) *corev1.Container { + containerIdx := findIndexByName(name, podTemplateSpec.Spec.Containers) + if containerIdx != notFound { + return &podTemplateSpec.Spec.Containers[containerIdx] + } + + initIdx := findIndexByName(name, podTemplateSpec.Spec.InitContainers) + if initIdx != notFound { + return &podTemplateSpec.Spec.InitContainers[initIdx] + } + + return nil +} + +func WithDefaultSecurityContextsModifications() (Modification, container.Modification) { + managedSecurityContext := envvar.ReadBool(ManagedSecurityContextEnv) // nolint:forbidigo + configureContainerSecurityContext := container.NOOP() + configurePodSpecSecurityContext := NOOP() + if !managedSecurityContext { + configurePodSpecSecurityContext = WithSecurityContext(DefaultPodSecurityContext()) + configureContainerSecurityContext = container.WithSecurityContext(container.DefaultSecurityContext()) + } + + return configurePodSpecSecurityContext, configureContainerSecurityContext +} diff --git a/pkg/kube/podtemplatespec/podspec_template_test.go b/pkg/kube/podtemplatespec/podspec_template_test.go new file mode 100644 index 000000000..832c2821f --- /dev/null +++ b/pkg/kube/podtemplatespec/podspec_template_test.go @@ -0,0 +1,617 @@ +package podtemplatespec + +import ( + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/merge" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" +) + +func TestPodTemplateSpec(t *testing.T) { + volumeMount1 := corev1.VolumeMount{ + Name: "vol-1", + } + volumeMount2 := corev1.VolumeMount{ + Name: "vol-2", + } + + runAsUser := int64(1111) + runAsGroup := int64(2222) + fsGroup := int64(3333) + + p := New( + WithVolume(corev1.Volume{ + Name: "vol-1", + }), + WithVolume(corev1.Volume{ + Name: "vol-2", + }), + WithSecurityContext(corev1.PodSecurityContext{ + RunAsUser: &runAsUser, + RunAsGroup: &runAsGroup, + FSGroup: &fsGroup, + }), + WithImagePullSecrets("pull-secrets"), + WithInitContainerByIndex(0, container.Apply( + container.WithName("init-container-0"), + container.WithImage("init-image"), + container.WithVolumeMounts([]corev1.VolumeMount{volumeMount1}), + container.WithSecurityContext(container.DefaultSecurityContext()), + )), + WithContainerByIndex(0, container.Apply( + container.WithName("container-0"), + container.WithImage("image"), + container.WithVolumeMounts([]corev1.VolumeMount{volumeMount1}), + container.WithSecurityContext(container.DefaultSecurityContext()), + )), + WithContainerByIndex(1, container.Apply( + container.WithName("container-1"), + container.WithImage("image"), + container.WithSecurityContext(container.DefaultSecurityContext()), + )), + WithVolumeMounts("init-container-0", volumeMount2), + WithVolumeMounts("container-0", volumeMount2), + WithVolumeMounts("container-1", volumeMount1, volumeMount2), + ) + + assert.Len(t, p.Spec.Volumes, 2) + assert.Equal(t, p.Spec.Volumes[0].Name, "vol-1") + assert.Equal(t, p.Spec.Volumes[1].Name, "vol-2") + + expectedRunAsUser := int64(1111) + expectedRunAsGroup := int64(2222) + expectedFsGroup := int64(3333) + assert.Equal(t, &expectedRunAsUser, p.Spec.SecurityContext.RunAsUser) + assert.Equal(t, &expectedRunAsGroup, p.Spec.SecurityContext.RunAsGroup) + assert.Equal(t, &expectedFsGroup, p.Spec.SecurityContext.FSGroup) + + assert.Len(t, p.Spec.ImagePullSecrets, 1) + assert.Equal(t, "pull-secrets", p.Spec.ImagePullSecrets[0].Name) + + assert.Len(t, p.Spec.InitContainers, 1) + assert.Equal(t, "init-container-0", p.Spec.InitContainers[0].Name) + assert.Equal(t, "init-image", p.Spec.InitContainers[0].Image) + assert.Equal(t, []corev1.VolumeMount{volumeMount1, volumeMount2}, p.Spec.InitContainers[0].VolumeMounts) + assert.Equal(t, container.DefaultSecurityContext(), *p.Spec.InitContainers[0].SecurityContext) + + assert.Len(t, p.Spec.Containers, 2) + + assert.Equal(t, "container-0", p.Spec.Containers[0].Name) + assert.Equal(t, "image", p.Spec.Containers[0].Image) + assert.Equal(t, []corev1.VolumeMount{volumeMount1, volumeMount2}, p.Spec.Containers[0].VolumeMounts) + assert.Equal(t, container.DefaultSecurityContext(), *p.Spec.Containers[0].SecurityContext) + + assert.Equal(t, "container-1", p.Spec.Containers[1].Name) + assert.Equal(t, "image", p.Spec.Containers[1].Image) + assert.Equal(t, []corev1.VolumeMount{volumeMount1, volumeMount2}, p.Spec.Containers[1].VolumeMounts) + assert.Equal(t, container.DefaultSecurityContext(), *p.Spec.Containers[1].SecurityContext) +} + +func TestPodTemplateSpec_MultipleEditsToContainer(t *testing.T) { + p := New( + WithContainerByIndex(0, + container.Apply( + container.WithName("container-0"), + )), + WithContainerByIndex(0, + container.Apply( + container.WithImage("image"), + )), + WithContainerByIndex(0, + container.Apply( + container.WithImagePullPolicy(corev1.PullAlways), + )), + WithContainer("container-0", container.Apply( + container.WithCommand([]string{"cmd"}), + )), + ) + + assert.Len(t, p.Spec.Containers, 1) + c := p.Spec.Containers[0] + assert.Equal(t, "container-0", c.Name) + assert.Equal(t, "image", c.Image) + assert.Equal(t, corev1.PullAlways, c.ImagePullPolicy) + assert.Equal(t, "cmd", c.Command[0]) +} + +func TestMerge(t *testing.T) { + defaultSpec := getDefaultPodSpec() + customSpec := getCustomPodSpec() + + mergedSpec := merge.PodTemplateSpecs(defaultSpec, customSpec) + + initContainerDefault := getDefaultContainer() + initContainerDefault.Name = "init-container-default" + + initContainerCustom := getCustomContainer() + initContainerCustom.Name = "init-container-custom" + + expected := corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-default-name", + Namespace: "my-default-namespace", + Labels: map[string]string{ + "app": "operator", + "custom": "some", + }, + }, + Spec: corev1.PodSpec{ + NodeSelector: map[string]string{ + "node-0": "node-0", + "node-1": "node-1", + }, + ServiceAccountName: "my-service-account-override", + TerminationGracePeriodSeconds: int64Ref(11), + ActiveDeadlineSeconds: int64Ref(10), + NodeName: "my-node-name", + RestartPolicy: corev1.RestartPolicyAlways, + Containers: []corev1.Container{ + getDefaultContainer(), + getCustomContainer(), + }, + InitContainers: []corev1.Container{ + initContainerCustom, + initContainerDefault, + }, + Volumes: []corev1.Volume{}, + Affinity: affinity("zone", "custom"), + }, + } + assert.Equal(t, expected.Name, mergedSpec.Name) + assert.Equal(t, expected.Namespace, mergedSpec.Namespace) + assert.Equal(t, expected.Labels["app"], mergedSpec.Labels["app"]) + assert.Equal(t, expected.Labels["custom"], mergedSpec.Labels["custom"]) + assert.Equal(t, expected.Spec.NodeSelector["node-0"], mergedSpec.Spec.NodeSelector["node-0"]) + assert.Equal(t, expected.Spec.NodeSelector["node-1"], mergedSpec.Spec.NodeSelector["node-1"]) + assert.Equal(t, expected.Spec.ServiceAccountName, mergedSpec.Spec.ServiceAccountName) + assert.Equal(t, expected.Spec.TerminationGracePeriodSeconds, mergedSpec.Spec.TerminationGracePeriodSeconds) + assert.Equal(t, expected.Spec.ActiveDeadlineSeconds, mergedSpec.Spec.ActiveDeadlineSeconds) + assert.Equal(t, expected.Spec.NodeName, mergedSpec.Spec.NodeName) + assert.Equal(t, expected.Spec.RestartPolicy, mergedSpec.Spec.RestartPolicy) + assert.Equal(t, expected.Spec.Volumes, mergedSpec.Spec.Volumes) + assert.Equal(t, expected.Spec.Affinity.PodAntiAffinity, mergedSpec.Spec.Affinity.PodAntiAffinity) + assert.Equal(t, expected.Spec.Affinity.PodAffinity, mergedSpec.Spec.Affinity.PodAffinity) + assert.Equal(t, expected.Spec.Affinity.NodeAffinity, mergedSpec.Spec.Affinity.NodeAffinity) + assert.Equal(t, expected.Spec.Containers, mergedSpec.Spec.Containers) + assert.Equal(t, expected.Spec.InitContainers, mergedSpec.Spec.InitContainers) +} + +func TestMergeFromEmpty(t *testing.T) { + defaultPodSpec := corev1.PodTemplateSpec{} + customPodSpecTemplate := getCustomPodSpec() + + mergedPodTemplateSpec := merge.PodTemplateSpecs(defaultPodSpec, customPodSpecTemplate) + assert.Equal(t, customPodSpecTemplate, mergedPodTemplateSpec) +} + +func TestMergeWithEmpty(t *testing.T) { + defaultPodSpec := getDefaultPodSpec() + customPodSpecTemplate := corev1.PodTemplateSpec{} + + mergedPodTemplateSpec := merge.PodTemplateSpecs(defaultPodSpec, customPodSpecTemplate) + + assert.Equal(t, defaultPodSpec, mergedPodTemplateSpec) +} + +func TestMultipleMerges(t *testing.T) { + defaultPodSpec := getDefaultPodSpec() + customPodSpecTemplate := getCustomPodSpec() + + referenceSpec := merge.PodTemplateSpecs(defaultPodSpec, customPodSpecTemplate) + + mergedSpec := defaultPodSpec + + // multiple merges must give the same result + for i := 0; i < 3; i++ { + mergedSpec := merge.PodTemplateSpecs(mergedSpec, customPodSpecTemplate) + assert.Equal(t, referenceSpec, mergedSpec) + } +} + +func TestMergeEnvironmentVariables(t *testing.T) { + otherDefaultContainer := getDefaultContainer() + otherDefaultContainer.Env = append(otherDefaultContainer.Env, corev1.EnvVar{ + Name: "name1", + Value: "val1", + }) + + overrideOtherDefaultContainer := getDefaultContainer() + overrideOtherDefaultContainer.Env = append(overrideOtherDefaultContainer.Env, corev1.EnvVar{ + Name: "name2", + Value: "val2", + }) + overrideOtherDefaultContainer.Env = append(overrideOtherDefaultContainer.Env, corev1.EnvVar{ + Name: "name1", + Value: "changedValue", + }) + + defaultSpec := getDefaultPodSpec() + defaultSpec.Spec.Containers = []corev1.Container{otherDefaultContainer} + + customSpec := getCustomPodSpec() + customSpec.Spec.Containers = []corev1.Container{overrideOtherDefaultContainer} + + mergedSpec := merge.PodTemplateSpecs(defaultSpec, customSpec) + + mergedContainer := mergedSpec.Spec.Containers[0] + + assert.Len(t, mergedContainer.Env, 2) + assert.Equal(t, mergedContainer.Env[0].Name, "name1") + assert.Equal(t, mergedContainer.Env[0].Value, "changedValue") + assert.Equal(t, mergedContainer.Env[1].Name, "name2") + assert.Equal(t, mergedContainer.Env[1].Value, "val2") +} + +func TestMergeTolerations(t *testing.T) { + tests := []struct { + name string + defaultTolerations []corev1.Toleration + overrideTolerations []corev1.Toleration + expectedTolerations []corev1.Toleration + }{ + { + // In case the calling code specifies default tolerations, + // they should be kept when there are no overrides. + name: "Overriding with nil tolerations", + defaultTolerations: []corev1.Toleration{ + { + Key: "key1", + Value: "value1", + Operator: corev1.TolerationOpEqual, + }, + { + Key: "key1", + Value: "value2", + Operator: corev1.TolerationOpExists, + }, + }, + overrideTolerations: nil, + expectedTolerations: []corev1.Toleration{ + { + Key: "key1", + Value: "value1", + Operator: corev1.TolerationOpEqual, + }, + { + Key: "key1", + Value: "value2", + Operator: corev1.TolerationOpExists, + }, + }, + }, + { + // If the override is specifying an empty list of tolerations, + // they should replace default tolerations. + name: "Overriding with empty tolerations", + defaultTolerations: []corev1.Toleration{ + { + Key: "key1", + Value: "value1", + Operator: corev1.TolerationOpEqual, + }, + }, + overrideTolerations: []corev1.Toleration{}, + expectedTolerations: []corev1.Toleration{}, + }, + { + // Overriding toleration should replace a nil original toleration. + name: "Overriding when default toleration is nil", + defaultTolerations: nil, + overrideTolerations: []corev1.Toleration{ + { + Key: "key1", + Value: "value1", + Operator: corev1.TolerationOpEqual, + }, + { + Key: "key1", + Value: "value2", + Operator: corev1.TolerationOpExists, + }, + }, + expectedTolerations: []corev1.Toleration{ + { + Key: "key1", + Value: "value1", + Operator: corev1.TolerationOpEqual, + }, + { + Key: "key1", + Value: "value2", + Operator: corev1.TolerationOpExists, + }, + }, + }, + { + // Overriding toleration should replace any original toleration. + name: "Overriding when original toleration is not nil", + defaultTolerations: []corev1.Toleration{ + { + Key: "key1", + Value: "value3", + Operator: corev1.TolerationOpEqual, + }, + { + Key: "key1", + Value: "value4", + Operator: corev1.TolerationOpExists, + }, + }, + overrideTolerations: []corev1.Toleration{ + { + Key: "key1", + Value: "value1", + Operator: corev1.TolerationOpEqual, + }, + { + Key: "key1", + Value: "value2", + Operator: corev1.TolerationOpExists, + }, + }, + expectedTolerations: []corev1.Toleration{ + { + Key: "key1", + Value: "value1", + Operator: corev1.TolerationOpEqual, + }, + { + Key: "key1", + Value: "value2", + Operator: corev1.TolerationOpExists, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defaultSpec := getDefaultPodSpec() + defaultSpec.Spec.Tolerations = tt.defaultTolerations + overrideSpec := getDefaultPodSpec() + overrideSpec.Spec.Tolerations = tt.overrideTolerations + + mergedSpec := merge.PodTemplateSpecs(defaultSpec, overrideSpec) + assert.Equal(t, tt.expectedTolerations, mergedSpec.Spec.Tolerations) + }) + } +} + +func TestMergeContainer(t *testing.T) { + vol0 := corev1.VolumeMount{Name: "container-0.volume-mount-0"} + sideCarVol := corev1.VolumeMount{Name: "container-1.volume-mount-0"} + + anotherVol := corev1.VolumeMount{Name: "another-mount"} + + overrideDefaultContainer := corev1.Container{Name: "container-0"} + overrideDefaultContainer.Image = "overridden" + overrideDefaultContainer.ReadinessProbe = &corev1.Probe{PeriodSeconds: 20} + + otherDefaultContainer := getDefaultContainer() + otherDefaultContainer.Name = "default-side-car" + otherDefaultContainer.VolumeMounts = []corev1.VolumeMount{sideCarVol} + + overrideOtherDefaultContainer := otherDefaultContainer + overrideOtherDefaultContainer.Env = []corev1.EnvVar{{Name: "env_var", Value: "xxx"}} + overrideOtherDefaultContainer.VolumeMounts = []corev1.VolumeMount{anotherVol} + + defaultSpec := getDefaultPodSpec() + defaultSpec.Spec.Containers = []corev1.Container{getDefaultContainer(), otherDefaultContainer} + + customSpec := getCustomPodSpec() + customSpec.Spec.Containers = []corev1.Container{getCustomContainer(), overrideDefaultContainer, overrideOtherDefaultContainer} + + mergedSpec := merge.PodTemplateSpecs(defaultSpec, customSpec) + + assert.Len(t, mergedSpec.Spec.Containers, 3) + assert.Equal(t, getCustomContainer(), mergedSpec.Spec.Containers[1]) + + firstExpected := corev1.Container{ + Name: "container-0", + VolumeMounts: []corev1.VolumeMount{vol0}, + Image: "overridden", + Command: []string{}, + Args: []string{}, + Ports: []corev1.ContainerPort{}, + ReadinessProbe: &corev1.Probe{ + // only "periodSeconds" was overwritten - other fields stayed untouched + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{Path: "/foo"}, + }, + PeriodSeconds: 20, + }, + } + assert.Equal(t, firstExpected, mergedSpec.Spec.Containers[0]) + + secondExpected := corev1.Container{ + Name: "default-side-car", + Image: "image-0", + VolumeMounts: []corev1.VolumeMount{anotherVol, sideCarVol}, + Command: []string{}, + Args: []string{}, + Ports: []corev1.ContainerPort{}, + Env: []corev1.EnvVar{ + { + Name: "env_var", + Value: "xxx", + }, + }, + ReadinessProbe: otherDefaultContainer.ReadinessProbe, + } + assert.Equal(t, secondExpected, mergedSpec.Spec.Containers[2]) +} + +func TestMergeVolumes_DoesNotAddDuplicatesWithSameName(t *testing.T) { + defaultPodSpec := getDefaultPodSpec() + defaultPodSpec.Spec.Volumes = append(defaultPodSpec.Spec.Volumes, corev1.Volume{ + Name: "new-volume", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "old-host-path", + }, + }, + }) + defaultPodSpec.Spec.Volumes = append(defaultPodSpec.Spec.Volumes, corev1.Volume{ + Name: "new-volume-2", + }) + + overridePodSpec := getDefaultPodSpec() + overridePodSpec.Spec.Volumes = append(overridePodSpec.Spec.Volumes, corev1.Volume{ + Name: "new-volume", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "updated-host-path", + }, + }, + }) + overridePodSpec.Spec.Volumes = append(overridePodSpec.Spec.Volumes, corev1.Volume{ + Name: "new-volume-3", + }) + + mergedPodSpecTemplate := merge.PodTemplateSpecs(defaultPodSpec, overridePodSpec) + + assert.Len(t, mergedPodSpecTemplate.Spec.Volumes, 3) + assert.Equal(t, "new-volume", mergedPodSpecTemplate.Spec.Volumes[0].Name) + assert.Equal(t, "updated-host-path", mergedPodSpecTemplate.Spec.Volumes[0].VolumeSource.HostPath.Path) + assert.Equal(t, "new-volume-2", mergedPodSpecTemplate.Spec.Volumes[1].Name) + assert.Equal(t, "new-volume-3", mergedPodSpecTemplate.Spec.Volumes[2].Name) +} + +func TestAddVolumes(t *testing.T) { + volumeModification := WithVolume(corev1.Volume{ + Name: "new-volume", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "old-host-path", + }, + }}, + ) + + toAddVolumes := []corev1.Volume{ + { + Name: "new-volume", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "new-host-path", + }, + }, + }, + { + Name: "new-volume-2", + }, + } + + volumesModification := WithVolumes(toAddVolumes) + + p := New(volumeModification, volumesModification) + assert.Len(t, p.Spec.Volumes, 2) + assert.Equal(t, "new-volume", p.Spec.Volumes[0].Name) + assert.Equal(t, "new-volume-2", p.Spec.Volumes[1].Name) + assert.Equal(t, "new-host-path", p.Spec.Volumes[0].VolumeSource.HostPath.Path) +} + +func int64Ref(i int64) *int64 { + return &i +} + +func getDefaultPodSpec() corev1.PodTemplateSpec { + initContainer := getDefaultContainer() + initContainer.Name = "init-container-default" + + return corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-default-name", + Namespace: "my-default-namespace", + Labels: map[string]string{"app": "operator"}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + NodeSelector: map[string]string{ + "node-0": "node-0", + }, + ServiceAccountName: "my-default-service-account", + TerminationGracePeriodSeconds: int64Ref(12), + ActiveDeadlineSeconds: int64Ref(10), + Containers: []corev1.Container{getDefaultContainer()}, + InitContainers: []corev1.Container{initContainer}, + Affinity: affinity("hostname", "default"), + Volumes: []corev1.Volume{}, + }, + } +} + +func getCustomPodSpec() corev1.PodTemplateSpec { + initContainer := getCustomContainer() + initContainer.Name = "init-container-custom" + + return corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"custom": "some"}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + NodeSelector: map[string]string{ + "node-1": "node-1", + }, + ServiceAccountName: "my-service-account-override", + TerminationGracePeriodSeconds: int64Ref(11), + NodeName: "my-node-name", + RestartPolicy: corev1.RestartPolicyAlways, + Containers: []corev1.Container{getCustomContainer()}, + InitContainers: []corev1.Container{initContainer}, + Affinity: affinity("zone", "custom"), + Volumes: []corev1.Volume{}, + }, + } +} + +func affinity(antiAffinityKey, nodeAffinityKey string) *corev1.Affinity { + return &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{{ + PodAffinityTerm: corev1.PodAffinityTerm{ + TopologyKey: antiAffinityKey, + }, + }}, + }, + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{NodeSelectorTerms: []corev1.NodeSelectorTerm{{ + MatchFields: []corev1.NodeSelectorRequirement{{ + Key: nodeAffinityKey, + }}, + }}}, + }, + } +} + +func getDefaultContainer() corev1.Container { + return corev1.Container{ + Args: []string{}, + Command: []string{}, + Ports: []corev1.ContainerPort{}, + Name: "container-0", + Image: "image-0", + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{HTTPGet: &corev1.HTTPGetAction{ + Path: "/foo", + }}, + PeriodSeconds: 10, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "container-0.volume-mount-0", + }, + }, + } +} + +func getCustomContainer() corev1.Container { + return corev1.Container{ + Name: "container-1", + Image: "image-1", + } +} diff --git a/pkg/kube/probes/probes.go b/pkg/kube/probes/probes.go new file mode 100644 index 000000000..6b11feb8f --- /dev/null +++ b/pkg/kube/probes/probes.go @@ -0,0 +1,63 @@ +package probes + +import corev1 "k8s.io/api/core/v1" + +type Modification func(*corev1.Probe) + +func Apply(funcs ...Modification) Modification { + return func(probe *corev1.Probe) { + for _, f := range funcs { + f(probe) + } + } +} + +func New(funcs ...Modification) corev1.Probe { + probe := corev1.Probe{} + for _, f := range funcs { + f(&probe) + } + return probe +} + +func WithExecCommand(cmd []string) Modification { + return func(probe *corev1.Probe) { + if probe.ProbeHandler.Exec == nil { + probe.ProbeHandler.Exec = &corev1.ExecAction{} + } + probe.ProbeHandler.Exec.Command = cmd + } +} + +func WithFailureThreshold(failureThreshold int) Modification { + return func(probe *corev1.Probe) { + probe.FailureThreshold = int32(failureThreshold) + } +} + +func WithInitialDelaySeconds(initialDelaySeconds int) Modification { + return func(probe *corev1.Probe) { + probe.InitialDelaySeconds = int32(initialDelaySeconds) + } +} +func WithSuccessThreshold(successThreshold int) Modification { + return func(probe *corev1.Probe) { + probe.SuccessThreshold = int32(successThreshold) + } +} +func WithPeriodSeconds(periodSeconds int) Modification { + return func(probe *corev1.Probe) { + probe.PeriodSeconds = int32(periodSeconds) + } +} +func WithTimeoutSeconds(timeoutSeconds int) Modification { + return func(probe *corev1.Probe) { + probe.TimeoutSeconds = int32(timeoutSeconds) + } +} + +func WithHandler(handler corev1.ProbeHandler) Modification { + return func(probe *corev1.Probe) { + probe.ProbeHandler = handler + } +} diff --git a/pkg/kube/resourcerequirements/resource_requirements.go b/pkg/kube/resourcerequirements/resource_requirements.go index 010010482..33bc28b04 100644 --- a/pkg/kube/resourcerequirements/resource_requirements.go +++ b/pkg/kube/resourcerequirements/resource_requirements.go @@ -63,3 +63,10 @@ func BuildDefaultStorageRequirements() corev1.ResourceList { res[corev1.ResourceStorage] = g10 return res } + +func BuildStorageRequirements(amount string) corev1.ResourceList { + g10, _ := resource.ParseQuantity(amount) + res := corev1.ResourceList{} + res[corev1.ResourceStorage] = g10 + return res +} diff --git a/pkg/kube/secret/secret.go b/pkg/kube/secret/secret.go new file mode 100644 index 000000000..93f9b64ea --- /dev/null +++ b/pkg/kube/secret/secret.go @@ -0,0 +1,212 @@ +package secret + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains" + + corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Getter interface { + GetSecret(ctx context.Context, objectKey client.ObjectKey) (corev1.Secret, error) +} + +type Updater interface { + UpdateSecret(ctx context.Context, secret corev1.Secret) error +} + +type Creator interface { + CreateSecret(ctx context.Context, secret corev1.Secret) error +} + +type Deleter interface { + DeleteSecret(ctx context.Context, key client.ObjectKey) error +} + +type GetUpdater interface { + Getter + Updater +} + +type GetUpdateCreator interface { + Getter + Updater + Creator +} + +type GetUpdateCreateDeleter interface { + Getter + Updater + Creator + Deleter +} + +func ReadKey(ctx context.Context, getter Getter, key string, objectKey client.ObjectKey) (string, error) { + data, err := ReadStringData(ctx, getter, objectKey) + if err != nil { + return "", err + } + if val, ok := data[key]; ok { + return val, nil + } + return "", fmt.Errorf(`key "%s" not present in the Secret %s/%s`, key, objectKey.Namespace, objectKey.Name) +} + +// ReadByteData reads the Data field of the secret with the given objectKey +func ReadByteData(ctx context.Context, getter Getter, objectKey client.ObjectKey) (map[string][]byte, error) { + secret, err := getter.GetSecret(ctx, objectKey) + if err != nil { + return nil, err + } + return secret.Data, nil +} + +// ReadStringData reads the StringData field of the secret with the given objectKey +func ReadStringData(ctx context.Context, getter Getter, key client.ObjectKey) (map[string]string, error) { + secret, err := getter.GetSecret(ctx, key) + if err != nil { + return nil, err + } + + return dataToStringData(secret.Data), nil +} + +func dataToStringData(data map[string][]byte) map[string]string { + stringData := make(map[string]string) + for k, v := range data { + stringData[k] = string(v) + } + return stringData +} + +// UpdateField updates a single field in the secret with the provided objectKey +func UpdateField(ctx context.Context, getUpdater GetUpdater, objectKey client.ObjectKey, key, value string) error { + secret, err := getUpdater.GetSecret(ctx, objectKey) + if err != nil { + return err + } + secret.Data[key] = []byte(value) + return getUpdater.UpdateSecret(ctx, secret) +} + +// CreateOrUpdate creates the Secret if it doesn't exist, other wise it updates it +func CreateOrUpdate(ctx context.Context, getUpdateCreator GetUpdateCreator, secret corev1.Secret) error { + if err := getUpdateCreator.UpdateSecret(ctx, secret); err != nil { + if SecretNotExist(err) { + return getUpdateCreator.CreateSecret(ctx, secret) + } else { + return err + } + } + return nil +} + +// HasAllKeys returns true if the provided secret contains an element for every +// key provided. False if a single element is absent +func HasAllKeys(secret corev1.Secret, keys ...string) bool { + for _, key := range keys { + if _, ok := secret.Data[key]; !ok { + return false + } + } + return true +} + +// EnsureSecretWithKey makes sure the Secret with the given name has a key with the given value if the key is not already present. +// if the key is present, it will return the existing value associated with this key. +func EnsureSecretWithKey(ctx context.Context, secretGetUpdateCreateDeleter GetUpdateCreateDeleter, nsName types.NamespacedName, ownerReferences []metav1.OwnerReference, key, value string) (string, error) { + existingSecret, err0 := secretGetUpdateCreateDeleter.GetSecret(ctx, nsName) + if err0 != nil { + if SecretNotExist(err0) { + s := Builder(). + SetNamespace(nsName.Namespace). + SetName(nsName.Name). + SetField(key, value). + SetOwnerReferences(ownerReferences). + Build() + + if err1 := secretGetUpdateCreateDeleter.CreateSecret(ctx, s); err1 != nil { + return "", err1 + } + return value, nil + } + return "", err0 + } + return string(existingSecret.Data[key]), nil +} + +// CopySecret copies secret object(data) from one cluster client to another, the from and to cluster-client can belong to the same or different clusters +func CopySecret(ctx context.Context, fromClient Getter, toClient GetUpdateCreator, sourceSecretNsName, destNsName types.NamespacedName) error { + s, err := fromClient.GetSecret(ctx, sourceSecretNsName) + if err != nil { + return err + } + + secretCopy := Builder(). + SetName(destNsName.Name). + SetNamespace(destNsName.Namespace). + SetByteData(s.Data). + SetDataType(s.Type). + Build() + + return CreateOrUpdate(ctx, toClient, secretCopy) +} + +// Exists return whether a secret with the given namespaced name exists +func Exists(ctx context.Context, secretGetter Getter, nsName types.NamespacedName) (bool, error) { + _, err := secretGetter.GetSecret(ctx, nsName) + + if err != nil { + if apiErrors.IsNotFound(err) { + return false, nil + } + return false, err + } + return true, nil +} + +// HasOwnerReferences checks whether an existing secret has a given set of owner references. +func HasOwnerReferences(secret corev1.Secret, ownerRefs []metav1.OwnerReference) bool { + secretRefs := secret.GetOwnerReferences() + for _, ref := range ownerRefs { + if !contains.OwnerReferences(secretRefs, ref) { + return false + } + } + return true +} + +// CreateOrUpdateIfNeeded creates a secret if it doesn't exist, or updates it if needed. +func CreateOrUpdateIfNeeded(ctx context.Context, getUpdateCreator GetUpdateCreator, secret corev1.Secret) error { + // Check if the secret exists + oldSecret, err := getUpdateCreator.GetSecret(ctx, types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}) + if err != nil { + if apiErrors.IsNotFound(err) { + return getUpdateCreator.CreateSecret(ctx, secret) + } + return err + } + + // Our secret builder never sets or uses secret.stringData, so we should only rely on secret.Data + if reflect.DeepEqual(secret.Data, oldSecret.Data) { + return nil + } + + // They are different so we need to update it + return getUpdateCreator.UpdateSecret(ctx, secret) +} + +func SecretNotExist(err error) bool { + if err == nil { + return false + } + return apiErrors.IsNotFound(err) || strings.Contains(err.Error(), "secret not found") +} diff --git a/pkg/kube/secret/secret_builder.go b/pkg/kube/secret/secret_builder.go new file mode 100644 index 000000000..f5c3b4c2b --- /dev/null +++ b/pkg/kube/secret/secret_builder.go @@ -0,0 +1,87 @@ +package secret + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type builder struct { + data map[string][]byte + dataType corev1.SecretType + labels map[string]string + name string + namespace string + ownerReferences []metav1.OwnerReference +} + +func (b *builder) SetName(name string) *builder { + b.name = name + return b +} + +func (b *builder) SetNamespace(namespace string) *builder { + b.namespace = namespace + return b +} + +func (b *builder) SetField(key, value string) *builder { + b.data[key] = []byte(value) + return b +} + +func (b *builder) SetOwnerReferences(ownerReferences []metav1.OwnerReference) *builder { + b.ownerReferences = ownerReferences + return b +} + +func (b *builder) SetLabels(labels map[string]string) *builder { + newLabels := make(map[string]string, len(labels)) + for k, v := range labels { + newLabels[k] = v + } + b.labels = newLabels + return b +} + +func (b *builder) SetByteData(stringData map[string][]byte) *builder { + newStringDataBytes := make(map[string][]byte, len(stringData)) + for k, v := range stringData { + newStringDataBytes[k] = v + } + b.data = newStringDataBytes + return b +} +func (b *builder) SetStringMapToData(stringData map[string]string) *builder { + newStringDataBytes := make(map[string][]byte, len(stringData)) + for k, v := range stringData { + newStringDataBytes[k] = []byte(v) + } + b.data = newStringDataBytes + return b +} + +func (b *builder) SetDataType(dataType corev1.SecretType) *builder { + b.dataType = dataType + return b +} + +func (b builder) Build() corev1.Secret { + return corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.name, + Namespace: b.namespace, + OwnerReferences: b.ownerReferences, + Labels: b.labels, + }, + Data: b.data, + Type: b.dataType, + } +} + +func Builder() *builder { + return &builder{ + labels: map[string]string{}, + data: map[string][]byte{}, + ownerReferences: []metav1.OwnerReference{}, + } +} diff --git a/pkg/kube/secret/secret_test.go b/pkg/kube/secret/secret_test.go new file mode 100644 index 000000000..71810e32d --- /dev/null +++ b/pkg/kube/secret/secret_test.go @@ -0,0 +1,236 @@ +package secret + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type secretGetter struct { + secret corev1.Secret +} + +func (c secretGetter) GetSecret(ctx context.Context, objectKey client.ObjectKey) (corev1.Secret, error) { + if c.secret.Name == objectKey.Name && c.secret.Namespace == objectKey.Namespace { + return c.secret, nil + } + return corev1.Secret{}, notFoundError() +} + +func newGetter(s corev1.Secret) Getter { + return secretGetter{ + secret: s, + } +} + +func TestReadKey(t *testing.T) { + ctx := context.Background() + getter := newGetter( + Builder(). + SetName("name"). + SetNamespace("namespace"). + SetField("key1", "value1"). + SetField("key2", "value2"). + Build(), + ) + + value, err := ReadKey(ctx, getter, "key1", nsName("namespace", "name")) + assert.Equal(t, "value1", value) + assert.NoError(t, err) + + value, err = ReadKey(ctx, getter, "key2", nsName("namespace", "name")) + assert.Equal(t, "value2", value) + assert.NoError(t, err) + + _, err = ReadKey(ctx, getter, "key3", nsName("namespace", "name")) + assert.Error(t, err) +} + +func TestReadData(t *testing.T) { + getter := newGetter( + Builder(). + SetName("name"). + SetNamespace("namespace"). + SetField("key1", "value1"). + SetField("key2", "value2"). + Build(), + ) + t.Run("ReadStringData", func(t *testing.T) { + ctx := context.Background() + stringData, err := ReadStringData(ctx, getter, nsName("namespace", "name")) + assert.NoError(t, err) + + assert.Contains(t, stringData, "key1") + assert.Contains(t, stringData, "key2") + + assert.Equal(t, "value1", stringData["key1"]) + assert.Equal(t, "value2", stringData["key2"]) + }) + + t.Run("ReadByteData", func(t *testing.T) { + ctx := context.Background() + data, err := ReadByteData(ctx, getter, nsName("namespace", "name")) + assert.NoError(t, err) + + assert.Contains(t, data, "key1") + assert.Contains(t, data, "key2") + + assert.Equal(t, []byte("value1"), data["key1"]) + assert.Equal(t, []byte("value2"), data["key2"]) + }) + +} + +func nsName(namespace, name string) types.NamespacedName { + return types.NamespacedName{Name: name, Namespace: namespace} +} + +func notFoundError() error { + return &errors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}} +} + +type secretGetUpdater struct { + secret corev1.Secret +} + +func (c secretGetUpdater) GetSecret(ctx context.Context, objectKey client.ObjectKey) (corev1.Secret, error) { + if c.secret.Name == objectKey.Name && c.secret.Namespace == objectKey.Namespace { + return c.secret, nil + } + return corev1.Secret{}, notFoundError() +} + +func (c *secretGetUpdater) UpdateSecret(ctx context.Context, secret corev1.Secret) error { + c.secret = secret + return nil +} + +func newGetUpdater(s corev1.Secret) GetUpdater { + return &secretGetUpdater{ + secret: s, + } +} + +func TestUpdateField(t *testing.T) { + ctx := context.Background() + getUpdater := newGetUpdater( + Builder(). + SetName("name"). + SetNamespace("namespace"). + SetField("field1", "value1"). + SetField("field2", "value2"). + Build(), + ) + err := UpdateField(ctx, getUpdater, nsName("namespace", "name"), "field1", "newValue") + assert.NoError(t, err) + val, _ := ReadKey(ctx, getUpdater, "field1", nsName("namespace", "name")) + assert.Equal(t, "newValue", val) + val2, _ := ReadKey(ctx, getUpdater, "field2", nsName("namespace", "name")) + assert.Equal(t, "value2", val2) +} + +type mockSecretGetUpdateCreateDeleter struct { + secrets map[client.ObjectKey]corev1.Secret + apiCalls int +} + +func (c *mockSecretGetUpdateCreateDeleter) DeleteSecret(ctx context.Context, key client.ObjectKey) error { + delete(c.secrets, key) + c.apiCalls += 1 + return nil +} + +func (c *mockSecretGetUpdateCreateDeleter) UpdateSecret(ctx context.Context, secret corev1.Secret) error { + c.secrets[types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}] = secret + c.apiCalls += 1 + return nil +} + +func (c *mockSecretGetUpdateCreateDeleter) CreateSecret(ctx context.Context, secret corev1.Secret) error { + return c.UpdateSecret(ctx, secret) +} + +func (c *mockSecretGetUpdateCreateDeleter) GetSecret(ctx context.Context, objectKey client.ObjectKey) (corev1.Secret, error) { + c.apiCalls += 1 + if s, ok := c.secrets[objectKey]; !ok { + return corev1.Secret{}, notFoundError() + } else { + return s, nil + } +} + +func TestCreateOrUpdateIfNeededCreate(t *testing.T) { + ctx := context.Background() + mock := &mockSecretGetUpdateCreateDeleter{ + secrets: map[client.ObjectKey]corev1.Secret{}, + apiCalls: 0, + } + + secret := getDefaultSecret() + + // first time it does not exist, we create it + err := CreateOrUpdateIfNeeded(ctx, mock, secret) + assert.NoError(t, err) + assert.Equal(t, 2, mock.apiCalls) // 2 calls -> get + creation +} + +func TestCreateOrUpdateIfNeededUpdate(t *testing.T) { + ctx := context.Background() + mock := &mockSecretGetUpdateCreateDeleter{ + secrets: map[client.ObjectKey]corev1.Secret{}, + apiCalls: 0, + } + secret := getDefaultSecret() + + { + err := mock.CreateSecret(ctx, secret) + assert.NoError(t, err) + mock.apiCalls = 0 + } + + { + secret.Data = map[string][]byte{"test": {1, 2, 3}} + // secret differs -> we update + err := CreateOrUpdateIfNeeded(ctx, mock, secret) + assert.NoError(t, err) + assert.Equal(t, 2, mock.apiCalls) // 2 calls -> get + update + } +} + +func TestCreateOrUpdateIfNeededEqual(t *testing.T) { + ctx := context.Background() + mock := &mockSecretGetUpdateCreateDeleter{ + secrets: map[client.ObjectKey]corev1.Secret{}, + apiCalls: 0, + } + secret := getDefaultSecret() + + { + err := mock.CreateSecret(ctx, secret) + assert.NoError(t, err) + mock.apiCalls = 0 + } + + { + // the secret already exists, so we only call get + err := CreateOrUpdateIfNeeded(ctx, mock, secret) + assert.NoError(t, err) + assert.Equal(t, 1, mock.apiCalls) // 1 call -> get + } +} + +func getDefaultSecret() corev1.Secret { + secret := + Builder(). + SetName("secret"). + SetNamespace("mdb.Namespace"). + SetStringMapToData(map[string]string{"password": "my-password"}). + Build() + return secret +} diff --git a/pkg/kube/service/service.go b/pkg/kube/service/service.go index 761ff806c..abb749acf 100644 --- a/pkg/kube/service/service.go +++ b/pkg/kube/service/service.go @@ -1,36 +1,46 @@ package service -import corev1 "k8s.io/api/core/v1" - -// Merge merges `source` into `dest`. Both arguments will remain unchanged -// a new service will be created and returned. -// The "merging" process is arbitrary and it only handle specific attributes -func Merge(dest corev1.Service, source corev1.Service) corev1.Service { - for k, v := range source.ObjectMeta.Annotations { - dest.ObjectMeta.Annotations[k] = v - } - - for k, v := range source.ObjectMeta.Labels { - dest.ObjectMeta.Labels[k] = v - } - - var nodePort int32 = 0 - if len(dest.Spec.Ports) > 0 { - // Save the NodePort for later, in case this ServicePort is changed. - nodePort = dest.Spec.Ports[0].NodePort - } - - if len(source.Spec.Ports) > 0 { - dest.Spec.Ports = source.Spec.Ports - - if nodePort > 0 && source.Spec.Ports[0].NodePort == 0 { - // There *is* a nodePort defined already, and a new one is not being passed - dest.Spec.Ports[0].NodePort = nodePort - } - } - - dest.Spec.Type = source.Spec.Type - dest.Spec.LoadBalancerIP = source.Spec.LoadBalancerIP - dest.Spec.ExternalTrafficPolicy = source.Spec.ExternalTrafficPolicy - return dest +import ( + "context" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Getter interface { + GetService(ctx context.Context, objectKey client.ObjectKey) (corev1.Service, error) +} + +type Updater interface { + UpdateService(ctx context.Context, service corev1.Service) error +} + +type Creator interface { + CreateService(ctx context.Context, service corev1.Service) error +} + +type Deleter interface { + DeleteService(ctx context.Context, objectKey client.ObjectKey) error +} + +type GetDeleter interface { + Getter + Deleter +} + +type GetUpdater interface { + Getter + Updater +} + +type GetUpdateCreator interface { + Getter + Updater + Creator +} + +type GetUpdateCreateDeleter interface { + Getter + Updater + Creator + Deleter } diff --git a/pkg/kube/service/service_builder.go b/pkg/kube/service/service_builder.go index df1731615..bbaabe98f 100644 --- a/pkg/kube/service/service_builder.go +++ b/pkg/kube/service/service_builder.go @@ -10,9 +10,8 @@ type builder struct { namespace string clusterIp string serviceType corev1.ServiceType - servicePort corev1.ServicePort + servicePort []corev1.ServicePort labels map[string]string - port int32 loadBalancerIP string publishNotReady bool ownerReferences []metav1.OwnerReference @@ -56,18 +55,11 @@ func (b *builder) SetClusterIP(clusterIP string) *builder { return b } -func (b *builder) SetPort(port int32) *builder { - b.servicePort.Port = port - return b -} - -func (b *builder) SetPortName(portName string) *builder { - b.servicePort.Name = portName - return b -} +func (b *builder) AddPort(port *corev1.ServicePort) *builder { + if port != nil { + b.servicePort = append(b.servicePort, *port) + } -func (b *builder) SetNodePort(port int32) *builder { - b.servicePort.NodePort = port return b } @@ -106,7 +98,7 @@ func (b *builder) Build() corev1.Service { LoadBalancerIP: b.loadBalancerIP, Type: b.serviceType, ClusterIP: b.clusterIp, - Ports: []corev1.ServicePort{b.servicePort}, + Ports: b.servicePort, Selector: b.selector, }, } @@ -118,5 +110,6 @@ func Builder() *builder { ownerReferences: []metav1.OwnerReference{}, selector: map[string]string{}, annotations: map[string]string{}, + servicePort: []corev1.ServicePort{}, } } diff --git a/pkg/kube/statefulset/merge_statefulset_test.go b/pkg/kube/statefulset/merge_statefulset_test.go new file mode 100644 index 000000000..7ae2acd53 --- /dev/null +++ b/pkg/kube/statefulset/merge_statefulset_test.go @@ -0,0 +1,284 @@ +package statefulset + +import ( + "reflect" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/merge" + + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestGetLabelSelectorRequirementByKey(t *testing.T) { + type args struct { + labelSelectorRequirements []metav1.LabelSelectorRequirement + key string + } + tests := []struct { + name string + args args + want *metav1.LabelSelectorRequirement + }{ + { + name: "Returns nil if the element is not present", + args: args{ + labelSelectorRequirements: []metav1.LabelSelectorRequirement{ + { + Key: "test-key", + }, + }, + key: "not-found", + }, + want: nil, + }, + { + name: "Finds the element if the key matches an element present.", + args: args{ + labelSelectorRequirements: []metav1.LabelSelectorRequirement{ + { + Key: "test-key", + }, + }, + key: "test-key", + }, + want: &metav1.LabelSelectorRequirement{ + + Key: "test-key", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := merge.LabelSelectorRequirementByKey(tt.args.labelSelectorRequirements, tt.args.key); !reflect.DeepEqual(got, tt.want) { + t.Errorf("getLabelSelectorRequirementByKey() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMergeSpec(t *testing.T) { + + original := New( + WithName("original"), + WithServiceName("original-svc-name"), + WithReplicas(3), + WithRevisionHistoryLimit(10), + WithPodManagementPolicyType(appsv1.OrderedReadyPodManagement), + WithSelector(&metav1.LabelSelector{ + MatchLabels: map[string]string{ + "a": "1", + "b": "2", + "c": "3", + "e": "4", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key-0", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"A", "B", "C"}, + }, + { + Key: "key-2", + Operator: metav1.LabelSelectorOpExists, + Values: []string{"F", "D", "E"}, + }, + }, + }), + ) + + override := New( + WithName("override"), + WithServiceName("override-svc-name"), + WithReplicas(5), + WithRevisionHistoryLimit(15), + WithPodManagementPolicyType(appsv1.ParallelPodManagement), + WithSelector(&metav1.LabelSelector{ + MatchLabels: map[string]string{ + "a": "10", + "b": "2", + "c": "30", + "d": "40", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key-0", + Operator: metav1.LabelSelectorOpDoesNotExist, + Values: []string{"Z"}, + }, + { + Key: "key-1", + Operator: metav1.LabelSelectorOpExists, + Values: []string{"A", "B", "C", "D"}, + }, + }, + }), + ) + + mergedSpec := merge.StatefulSetSpecs(original.Spec, override.Spec) + + t.Run("Primitive fields of spec have been merged correctly", func(t *testing.T) { + assert.Equal(t, "override-svc-name", mergedSpec.ServiceName) + assert.Equal(t, int32(5), *mergedSpec.Replicas) + assert.Equal(t, int32(15), *mergedSpec.RevisionHistoryLimit) + assert.Equal(t, appsv1.ParallelPodManagement, mergedSpec.PodManagementPolicy) + }) + + matchLabels := mergedSpec.Selector.MatchLabels + assert.Len(t, matchLabels, 5) + + t.Run("Match labels have been merged correctly", func(t *testing.T) { + assert.Equal(t, "10", matchLabels["a"]) + assert.Equal(t, "2", matchLabels["b"]) + assert.Equal(t, "30", matchLabels["c"]) + assert.Equal(t, "40", matchLabels["d"]) + assert.Equal(t, "4", matchLabels["e"]) + }) + + t.Run("Test Match Expressions have been merged correctly", func(t *testing.T) { + matchExpressions := mergedSpec.Selector.MatchExpressions + assert.Len(t, matchExpressions, 3) + t.Run("Elements are sorted in alphabetical order", func(t *testing.T) { + assert.Equal(t, "key-0", matchExpressions[0].Key) + assert.Equal(t, "key-1", matchExpressions[1].Key) + assert.Equal(t, "key-2", matchExpressions[2].Key) + }) + + t.Run("Test operator merging", func(t *testing.T) { + assert.Equal(t, metav1.LabelSelectorOpDoesNotExist, matchExpressions[0].Operator) + assert.Equal(t, metav1.LabelSelectorOpExists, matchExpressions[1].Operator) + assert.Equal(t, metav1.LabelSelectorOpExists, matchExpressions[2].Operator) + }) + + t.Run("Test values are merged and sorted", func(t *testing.T) { + assert.Equal(t, []string{"A", "B", "C", "Z"}, matchExpressions[0].Values) + assert.Equal(t, []string{"A", "B", "C", "D"}, matchExpressions[1].Values) + assert.Equal(t, []string{"D", "E", "F"}, matchExpressions[2].Values) + }) + }) +} + +func TestMergeSpecLabelSelector(t *testing.T) { + tests := []struct { + name string + original appsv1.StatefulSet + override appsv1.StatefulSet + expected *metav1.LabelSelector + }{ + { + name: "Empty label selectors in both sources", + original: New(WithName("original")), + override: New(WithName("override")), + expected: nil, + }, + { + name: "Empty original label selector", + original: New(WithName("original")), + override: New(WithName("override"), WithSelector(&metav1.LabelSelector{ + MatchLabels: map[string]string{ + "a": "10", + "b": "2", + }, + })), + expected: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "a": "10", + "b": "2", + }, + }, + }, + { + name: "Empty override label selector", + original: New(WithName("original"), WithSelector(&metav1.LabelSelector{ + MatchLabels: map[string]string{ + "a": "10", + "b": "2", + }, + })), + override: New(WithName("override")), + expected: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "a": "10", + "b": "2", + }, + }, + }, + { + name: "Merge values label selectors from both resources", + original: New(WithName("original"), WithSelector(&metav1.LabelSelector{ + MatchLabels: map[string]string{ + "a": "1", + "b": "2", + "c": "3", + "e": "4", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key-0", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"A", "B", "C"}, + }, + { + Key: "key-2", + Operator: metav1.LabelSelectorOpExists, + Values: []string{"F", "D", "E"}, + }, + }, + })), + override: New(WithName("override"), WithSelector(&metav1.LabelSelector{ + MatchLabels: map[string]string{ + "a": "10", + "b": "2", + "c": "30", + "d": "40", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key-0", + Operator: metav1.LabelSelectorOpDoesNotExist, + Values: []string{"Z"}, + }, + { + Key: "key-1", + Operator: metav1.LabelSelectorOpExists, + Values: []string{"A", "B", "C", "D"}, + }, + }, + })), + expected: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "a": "10", + "b": "2", + "c": "30", + "d": "40", + "e": "4", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "key-0", + Operator: metav1.LabelSelectorOpDoesNotExist, + Values: []string{"A", "B", "C", "Z"}, + }, + { + Key: "key-1", + Operator: metav1.LabelSelectorOpExists, + Values: []string{"A", "B", "C", "D"}, + }, + { + Key: "key-2", + Operator: metav1.LabelSelectorOpExists, + Values: []string{"D", "E", "F"}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mergedSpec := merge.StatefulSets(tt.original, tt.override) + assert.Equal(t, tt.expected, mergedSpec.Spec.Selector) + }) + } +} diff --git a/pkg/kube/statefulset/statefulset.go b/pkg/kube/statefulset/statefulset.go index 3455f4eb6..d6e7660cb 100644 --- a/pkg/kube/statefulset/statefulset.go +++ b/pkg/kube/statefulset/statefulset.go @@ -1,22 +1,91 @@ package statefulset import ( - "reflect" + "context" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/merge" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/imdario/mergo" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" ) +const ( + notFound = -1 +) + +type Getter interface { + GetStatefulSet(ctx context.Context, objectKey client.ObjectKey) (appsv1.StatefulSet, error) +} + +type Updater interface { + UpdateStatefulSet(ctx context.Context, sts appsv1.StatefulSet) (appsv1.StatefulSet, error) +} + +type Creator interface { + CreateStatefulSet(ctx context.Context, sts appsv1.StatefulSet) error +} + +type Deleter interface { + DeleteStatefulSet(ctx context.Context, objectKey client.ObjectKey) error +} + +type GetUpdater interface { + Getter + Updater +} + +type GetUpdateCreator interface { + Getter + Updater + Creator +} + +type GetUpdateCreateDeleter interface { + Getter + Updater + Creator + Deleter +} + +// CreateOrUpdate creates the given StatefulSet if it doesn't exist, +// or updates it if it does. +func CreateOrUpdate(ctx context.Context, getUpdateCreator GetUpdateCreator, statefulSet appsv1.StatefulSet) (appsv1.StatefulSet, error) { + if sts, err := getUpdateCreator.UpdateStatefulSet(ctx, statefulSet); err != nil { + if apiErrors.IsNotFound(err) { + return statefulSet, getUpdateCreator.CreateStatefulSet(ctx, statefulSet) + } else { + return appsv1.StatefulSet{}, err + } + } else { + return sts, nil + } +} + +// GetAndUpdate applies the provided function to the most recent version of the object +func GetAndUpdate(ctx context.Context, getUpdater GetUpdater, nsName types.NamespacedName, updateFunc func(*appsv1.StatefulSet)) (appsv1.StatefulSet, error) { + sts, err := getUpdater.GetStatefulSet(ctx, nsName) + if err != nil { + return appsv1.StatefulSet{}, err + } + // apply the function on the most recent version of the resource + updateFunc(&sts) + return getUpdater.UpdateStatefulSet(ctx, sts) +} + // VolumeMountData contains values required for the MountVolume function type VolumeMountData struct { Name string MountPath string Volume corev1.Volume + ReadOnly bool } -func CreateVolumeFromConfigMap(name, sourceName string) corev1.Volume { - return corev1.Volume{ +func CreateVolumeFromConfigMap(name, sourceName string, options ...func(v *corev1.Volume)) corev1.Volume { + volume := &corev1.Volume{ Name: name, VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ @@ -26,17 +95,29 @@ func CreateVolumeFromConfigMap(name, sourceName string) corev1.Volume { }, }, } + + for _, option := range options { + option(volume) + } + return *volume } -func CreateVolumeFromSecret(name, sourceName string) corev1.Volume { - return corev1.Volume{ +func CreateVolumeFromSecret(name, sourceName string, options ...func(v *corev1.Volume)) corev1.Volume { + permission := int32(416) + volumeMount := &corev1.Volume{ Name: name, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: sourceName, + SecretName: sourceName, + DefaultMode: &permission, }, }, } + for _, option := range options { + option(volumeMount) + } + return *volumeMount + } func CreateVolumeFromEmptyDir(name string) corev1.Volume { @@ -61,6 +142,20 @@ func CreateVolumeMount(name, path string, options ...func(*corev1.VolumeMount)) return *volumeMount } +// NOOP is a valid Modification which applies no changes +func NOOP() Modification { + return func(sts *appsv1.StatefulSet) {} +} + +func WithSecretDefaultMode(mode *int32) func(*corev1.Volume) { + return func(v *corev1.Volume) { + if v.VolumeSource.Secret == nil { + v.VolumeSource.Secret = &corev1.SecretVolumeSource{} + } + v.VolumeSource.Secret.DefaultMode = mode + } +} + // WithSubPath sets the SubPath for this VolumeMount func WithSubPath(subPath string) func(*corev1.VolumeMount) { return func(v *corev1.VolumeMount) { @@ -75,19 +170,179 @@ func WithReadOnly(readonly bool) func(*corev1.VolumeMount) { } } -func IsReady(sts appsv1.StatefulSet) bool { - replicas := *sts.Spec.Replicas - allUpdated := replicas == sts.Status.UpdatedReplicas - allReady := replicas == sts.Status.ReadyReplicas - return allUpdated && allReady +func IsReady(sts appsv1.StatefulSet, expectedReplicas int) bool { + allUpdated := int32(expectedReplicas) == sts.Status.UpdatedReplicas + allReady := int32(expectedReplicas) == sts.Status.ReadyReplicas + atExpectedGeneration := sts.Generation == sts.Status.ObservedGeneration + return allUpdated && allReady && atExpectedGeneration } -// HaveEqualSpec accepts a StatefulSet builtSts, and a second existingSts, and compares -// the Spec of both inputs but only comparing the fields that were specified in builtSts -func HaveEqualSpec(builtSts appsv1.StatefulSet, existingSts appsv1.StatefulSet) (bool, error) { - stsToMerge := *existingSts.DeepCopyObject().(*appsv1.StatefulSet) - if err := mergo.Merge(&stsToMerge, builtSts, mergo.WithOverride); err != nil { - return false, err +type Modification func(*appsv1.StatefulSet) + +func New(mods ...Modification) appsv1.StatefulSet { + sts := appsv1.StatefulSet{} + for _, mod := range mods { + mod(&sts) } - return reflect.DeepEqual(stsToMerge.Spec, existingSts.Spec), nil + return sts +} + +func Apply(funcs ...Modification) func(*appsv1.StatefulSet) { + return func(sts *appsv1.StatefulSet) { + for _, f := range funcs { + f(sts) + } + } +} + +func WithName(name string) Modification { + return func(sts *appsv1.StatefulSet) { + sts.Name = name + } +} + +func WithNamespace(namespace string) Modification { + return func(sts *appsv1.StatefulSet) { + sts.Namespace = namespace + } +} + +func WithServiceName(svcName string) Modification { + return func(sts *appsv1.StatefulSet) { + sts.Spec.ServiceName = svcName + } +} + +func WithLabels(labels map[string]string) Modification { + return func(set *appsv1.StatefulSet) { + set.Labels = copyMap(labels) + } +} + +func WithAnnotations(annotations map[string]string) Modification { + return func(set *appsv1.StatefulSet) { + set.Annotations = merge.StringToStringMap(set.Annotations, annotations) + } +} + +func WithMatchLabels(matchLabels map[string]string) Modification { + return func(set *appsv1.StatefulSet) { + if set.Spec.Selector == nil { + set.Spec.Selector = &metav1.LabelSelector{} + } + set.Spec.Selector.MatchLabels = copyMap(matchLabels) + } +} +func WithOwnerReference(ownerRefs []metav1.OwnerReference) Modification { + ownerReference := make([]metav1.OwnerReference, len(ownerRefs)) + copy(ownerReference, ownerRefs) + return func(set *appsv1.StatefulSet) { + set.OwnerReferences = ownerReference + } +} + +func WithReplicas(replicas int) Modification { + stsReplicas := int32(replicas) + return func(sts *appsv1.StatefulSet) { + sts.Spec.Replicas = &stsReplicas + } +} + +func WithRevisionHistoryLimit(revisionHistoryLimit int) Modification { + rhl := int32(revisionHistoryLimit) + return func(sts *appsv1.StatefulSet) { + sts.Spec.RevisionHistoryLimit = &rhl + } +} + +func WithPodManagementPolicyType(policyType appsv1.PodManagementPolicyType) Modification { + return func(set *appsv1.StatefulSet) { + set.Spec.PodManagementPolicy = policyType + } +} + +func WithSelector(selector *metav1.LabelSelector) Modification { + return func(set *appsv1.StatefulSet) { + set.Spec.Selector = selector + } +} + +func WithUpdateStrategyType(strategyType appsv1.StatefulSetUpdateStrategyType) Modification { + return func(set *appsv1.StatefulSet) { + set.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{ + Type: strategyType, + } + } +} + +func WithPodSpecTemplate(templateFunc func(*corev1.PodTemplateSpec)) Modification { + return func(set *appsv1.StatefulSet) { + template := &set.Spec.Template + templateFunc(template) + } +} + +func WithVolumeClaim(name string, f func(*corev1.PersistentVolumeClaim)) Modification { + return func(set *appsv1.StatefulSet) { + idx := findVolumeClaimIndexByName(name, set.Spec.VolumeClaimTemplates) + if idx == notFound { + set.Spec.VolumeClaimTemplates = append(set.Spec.VolumeClaimTemplates, corev1.PersistentVolumeClaim{}) + idx = len(set.Spec.VolumeClaimTemplates) - 1 + } + pvc := &set.Spec.VolumeClaimTemplates[idx] + f(pvc) + } +} + +func WithVolumeClaimTemplates(pv []corev1.PersistentVolumeClaim) Modification { + pvCopy := make([]corev1.PersistentVolumeClaim, len(pv)) + copy(pvCopy, pv) + return func(set *appsv1.StatefulSet) { + set.Spec.VolumeClaimTemplates = pvCopy + } +} + +func WithCustomSpecs(spec appsv1.StatefulSetSpec) Modification { + return func(set *appsv1.StatefulSet) { + set.Spec = merge.StatefulSetSpecs(set.Spec, spec) + } +} + +func WithObjectMetadata(labels map[string]string, annotations map[string]string) Modification { + return func(set *appsv1.StatefulSet) { + WithLabels(labels)(set) + WithAnnotations(annotations)(set) + } +} + +func findVolumeClaimIndexByName(name string, pvcs []corev1.PersistentVolumeClaim) int { + for idx, pvc := range pvcs { + if pvc.Name == name { + return idx + } + } + return notFound +} + +func VolumeMountWithNameExists(mounts []corev1.VolumeMount, volumeName string) bool { + for _, mount := range mounts { + if mount.Name == volumeName { + return true + } + } + return false +} + +// ResetUpdateStrategy resets the statefulset update strategy to RollingUpdate. +// If a version change is in progress, it doesn't do anything. +func ResetUpdateStrategy(ctx context.Context, mdb annotations.Versioned, kubeClient GetUpdater) error { + if !mdb.IsChangingVersion() { + return nil + } + + // if we changed the version, we need to reset the UpdatePolicy back to OnUpdate + _, err := GetAndUpdate(ctx, kubeClient, mdb.NamespacedName(), func(sts *appsv1.StatefulSet) { + sts.Spec.UpdateStrategy.Type = appsv1.RollingUpdateStatefulSetStrategyType + }) + return err } diff --git a/pkg/kube/statefulset/statefulset_builder.go b/pkg/kube/statefulset/statefulset_builder.go index 6571013b2..09a52b75b 100644 --- a/pkg/kube/statefulset/statefulset_builder.go +++ b/pkg/kube/statefulset/statefulset_builder.go @@ -106,8 +106,8 @@ func (s *Builder) AddVolumes(volumes []corev1.Volume) *Builder { return s } -// getContainerIndexByName returns the index of the container with containerName -func (s Builder) getContainerIndexByName(containerName string) (int, error) { +// GetContainerIndexByName returns the index of the container with containerName. +func (s Builder) GetContainerIndexByName(containerName string) (int, error) { for i, c := range s.podTemplateSpec.Spec.Containers { if c.Name == containerName { return i, nil @@ -116,15 +116,18 @@ func (s Builder) getContainerIndexByName(containerName string) (int, error) { return -1, fmt.Errorf("no container with name [%s] found", containerName) } -func (s *Builder) AddVolumeAndMount(containerName string, volumeMountData VolumeMountData) *Builder { +func (s *Builder) AddVolumeAndMount(volumeMountData VolumeMountData, containerNames ...string) *Builder { s.AddVolume(volumeMountData.Volume) - s.AddVolumeMount(containerName, - corev1.VolumeMount{ - Name: volumeMountData.Name, - ReadOnly: true, - MountPath: volumeMountData.MountPath, - }, - ) + for _, containerName := range containerNames { + s.AddVolumeMount( + containerName, + corev1.VolumeMount{ + Name: volumeMountData.Name, + ReadOnly: volumeMountData.ReadOnly, + MountPath: volumeMountData.MountPath, + }, + ) + } return s } @@ -132,7 +135,7 @@ func (s Builder) buildPodTemplateSpec() (corev1.PodTemplateSpec, error) { podTemplateSpec := s.podTemplateSpec.DeepCopy() var errs error for containerName, volumeMounts := range s.volumeMountsPerContainer { - idx, err := s.getContainerIndexByName(containerName) + idx, err := s.GetContainerIndexByName(containerName) if err != nil { errs = multierror.Append(errs, err) // other containers may have valid mounts @@ -151,7 +154,7 @@ func (s Builder) buildPodTemplateSpec() (corev1.PodTemplateSpec, error) { } for containerName, overrideReadinessProbe := range s.readinessProbePerContainer { - idx, err := s.getContainerIndexByName(containerName) + idx, err := s.GetContainerIndexByName(containerName) if err != nil { errs = multierror.Append(errs, err) continue diff --git a/pkg/kube/statefulset/statefulset_test.go b/pkg/kube/statefulset/statefulset_test.go index bd94123b6..ad67cb3c3 100644 --- a/pkg/kube/statefulset/statefulset_test.go +++ b/pkg/kube/statefulset/statefulset_test.go @@ -29,25 +29,25 @@ func TestGetContainerIndexByName(t *testing.T) { } stsBuilder := defaultStatefulSetBuilder().SetPodTemplateSpec(podTemplateWithContainers(containers)) - idx, err := stsBuilder.getContainerIndexByName("container-0") + idx, err := stsBuilder.GetContainerIndexByName("container-0") assert.NoError(t, err) assert.NotEqual(t, -1, idx) assert.Equal(t, 0, idx) - idx, err = stsBuilder.getContainerIndexByName("container-1") + idx, err = stsBuilder.GetContainerIndexByName("container-1") assert.NoError(t, err) assert.NotEqual(t, -1, idx) assert.Equal(t, 1, idx) - idx, err = stsBuilder.getContainerIndexByName("container-2") + idx, err = stsBuilder.GetContainerIndexByName("container-2") assert.NoError(t, err) assert.NotEqual(t, -1, idx) assert.Equal(t, 2, idx) - idx, err = stsBuilder.getContainerIndexByName("doesnt-exist") + idx, err = stsBuilder.GetContainerIndexByName("doesnt-exist") assert.Error(t, err) assert.Equal(t, -1, idx) @@ -63,7 +63,7 @@ func TestAddVolumeAndMount(t *testing.T) { Volume: CreateVolumeFromConfigMap("mount-name", "config-map"), } - stsBuilder = defaultStatefulSetBuilder().SetPodTemplateSpec(podTemplateWithContainers([]corev1.Container{{Name: "container-name"}})).AddVolumeAndMount("container-name", vmd) + stsBuilder = defaultStatefulSetBuilder().SetPodTemplateSpec(podTemplateWithContainers([]corev1.Container{{Name: "container-name"}})).AddVolumeAndMount(vmd, "container-name") sts, err = stsBuilder.Build() // assert container was correctly updated with the volumes @@ -78,7 +78,7 @@ func TestAddVolumeAndMount(t *testing.T) { assert.NotNil(t, sts.Spec.Template.Spec.Volumes[0].VolumeSource.ConfigMap, "volume should have been configured from a config map source") assert.Nil(t, sts.Spec.Template.Spec.Volumes[0].VolumeSource.Secret, "volume should not have been configured from a secret source") - stsBuilder = defaultStatefulSetBuilder().SetPodTemplateSpec(podTemplateWithContainers([]corev1.Container{{Name: "container-0"}, {Name: "container-1"}})).AddVolumeAndMount("container-0", vmd) + stsBuilder = defaultStatefulSetBuilder().SetPodTemplateSpec(podTemplateWithContainers([]corev1.Container{{Name: "container-0"}, {Name: "container-1"}})).AddVolumeAndMount(vmd, "container-0") sts, err = stsBuilder.Build() assert.NoError(t, err, "volume should successfully mount when the container exists") @@ -90,7 +90,7 @@ func TestAddVolumeAndMount(t *testing.T) { } // add a 2nd container to previously defined stsBuilder - sts, err = stsBuilder.AddVolumeAndMount("container-1", secretVmd).Build() + sts, err = stsBuilder.AddVolumeAndMount(secretVmd, "container-1").Build() assert.NoError(t, err, "volume should successfully mount when the container exists") assert.Len(t, sts.Spec.Template.Spec.Containers[1].VolumeMounts, 1, "volume mount should have been added to the container in the stateful set") @@ -98,7 +98,8 @@ func TestAddVolumeAndMount(t *testing.T) { assert.Equal(t, sts.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath, "mount-path-secret") assert.Len(t, sts.Spec.Template.Spec.Volumes, 2) - assert.Equal(t, sts.Spec.Template.Spec.Volumes[1].Name, "mount-name-secret") + assert.Equal(t, "mount-name-secret", sts.Spec.Template.Spec.Volumes[1].Name) + assert.Equal(t, int32(416), *sts.Spec.Template.Spec.Volumes[1].Secret.DefaultMode) assert.Nil(t, sts.Spec.Template.Spec.Volumes[1].VolumeSource.ConfigMap, "volume should not have been configured from a config map source") assert.NotNil(t, sts.Spec.Template.Spec.Volumes[1].VolumeSource.Secret, "volume should have been configured from a secret source") @@ -205,72 +206,64 @@ func TestCreateVolumeMountWithMultipleOptions(t *testing.T) { assert.True(t, mount.ReadOnly) } -func TestHaveEqualSpec(t *testing.T) { - t.Run("Identical StatefulSet", func(t *testing.T) { - builtSts, _ := defaultStatefulSetBuilder().Build() - existingSts, _ := defaultStatefulSetBuilder().Build() - areEqual, err := HaveEqualSpec(builtSts, existingSts) - assert.NoError(t, err) - assert.True(t, areEqual, "When both stateful sets are identical, these should be considered equivalent") - }) - t.Run("Built StatefulSet is different from existing StatefulSet", func(t *testing.T) { - builtSts, _ := defaultStatefulSetBuilder().SetUpdateStrategy(appsv1.OnDeleteStatefulSetStrategyType).Build() - existingSts, _ := defaultStatefulSetBuilder().Build() - areEqual, err := HaveEqualSpec(builtSts, existingSts) - assert.NoError(t, err) - assert.False(t, areEqual, "We have specified a field that is different from the existing StatefulSet, so these should be considered different") - }) - t.Run("Existing StatefulSet has values we don't specify", func(t *testing.T) { - builtSts, _ := defaultStatefulSetBuilder().Build() - existingSts, _ := defaultStatefulSetBuilder().Build() - revHistoryList := int32(30) - existingSts.Spec.RevisionHistoryLimit = &revHistoryList - - areEqual, err := HaveEqualSpec(builtSts, existingSts) - assert.NoError(t, err) - assert.True(t, areEqual, "Specs should be considered equal even though the existing StatefulSet has fields we are not interested in") - }) - - t.Run("Metadata differences", func(t *testing.T) { - builtSts, _ := defaultStatefulSetBuilder().SetName("different-name").Build() - existingSts, _ := defaultStatefulSetBuilder().Build() - areEqual, err := HaveEqualSpec(builtSts, existingSts) - assert.NoError(t, err) - assert.True(t, areEqual, "Metadata differences should not be considered, we are just looking at spec") - }) - - t.Run("Change to PodSpecTemplate", func(t *testing.T) { - builtSts, _ := defaultStatefulSetBuilder().Build() - existingSts, _ := defaultStatefulSetBuilder().Build() - t.Run("Same Container added", func(t *testing.T) { - builtSts.Spec.Template.Spec.Containers = []corev1.Container{{Name: "container-1"}} - existingSts.Spec.Template.Spec.Containers = []corev1.Container{{Name: "container-1"}} - areEqual, err := HaveEqualSpec(builtSts, existingSts) - assert.NoError(t, err) - assert.True(t, areEqual, "Having the same container should be equal") - }) - - t.Run("Existing StatefulSet has init containers", func(t *testing.T) { - builtSts.Spec.Template.Spec.Containers = []corev1.Container{{Name: "container-1"}} - existingSts.Spec.Template.Spec.Containers = []corev1.Container{{Name: "container-1"}} - existingSts.Spec.Template.Spec.InitContainers = []corev1.Container{{Name: "container-1-init"}} - areEqual, err := HaveEqualSpec(builtSts, existingSts) - assert.NoError(t, err) - assert.True(t, areEqual, "The existing StatefulSet has a field we have not touched in the spec (initContainers), this should be ignored in spec comparison") - }) - t.Run("Different Container added", func(t *testing.T) { - builtSts.Spec.Template.Spec.Containers = []corev1.Container{{Name: "container-2"}} - existingSts.Spec.Template.Spec.Containers = []corev1.Container{{Name: "container-3"}} - areEqual, err := HaveEqualSpec(builtSts, existingSts) - assert.NoError(t, err) - assert.False(t, areEqual, "Metadata differences should not be considered, we are just looking at spec") - }) - t.Run("Image Change", func(t *testing.T) { - builtSts.Spec.Template.Spec.Containers = []corev1.Container{{Name: "container-1", Image: "image-1"}} - existingSts.Spec.Template.Spec.Containers = []corev1.Container{{Name: "container-1", Image: "image-2"}} - areEqual, err := HaveEqualSpec(builtSts, existingSts) - assert.NoError(t, err) - assert.False(t, areEqual, "A single different field in an element in a list should result in the specs being different") - }) - }) +func TestWithAnnotations(t *testing.T) { + sts, err := defaultStatefulSetBuilder().Build() + assert.NoError(t, err) + + assert.Len(t, sts.Annotations, 0) + + // Test that it works when there are no annotations + WithAnnotations(map[string]string{ + "foo": "bar", + })(&sts) + assert.Equal(t, "bar", sts.Annotations["foo"]) + + // test that WithAnnotations merges the maps + WithAnnotations(map[string]string{ + "bar": "baz", + })(&sts) + assert.Equal(t, "bar", sts.Annotations["foo"]) + assert.Equal(t, "baz", sts.Annotations["bar"]) + + // Test that we can override a key + WithAnnotations(map[string]string{ + "foo": "baz", + })(&sts) + assert.Equal(t, "baz", sts.Annotations["foo"]) + + // handles nil values gracefully + WithAnnotations(nil)(&sts) + assert.Len(t, sts.Annotations, 2) +} + +func TestWithObjectMetadata(t *testing.T) { + sts, err := defaultStatefulSetBuilder().Build() + assert.NoError(t, err) + assert.Len(t, sts.Labels, 0) + assert.Len(t, sts.Annotations, 0) + + // handles nil values gracefully + { + WithObjectMetadata(nil, nil)(&sts) + } + + // Test that it works when there are no annotations + { + WithObjectMetadata(map[string]string{"label": "a"}, map[string]string{"annotation": "b"})(&sts) + assert.Equal(t, "b", sts.Annotations["annotation"]) + assert.Equal(t, "a", sts.Labels["label"]) + } + + // test that WithObjectMetadata merges the maps + { + WithObjectMetadata(map[string]string{"label2": "a"}, map[string]string{"annotation2": "b"})(&sts) + assert.Equal(t, "b", sts.Annotations["annotation"]) + assert.Equal(t, "b", sts.Annotations["annotation2"]) + } + + // Test that we can override a key + { + WithObjectMetadata(map[string]string{"label": "b"}, map[string]string{"annotation": "b"})(&sts) + assert.Equal(t, "b", sts.Annotations["annotation"]) + } } diff --git a/pkg/readiness/config/config.go b/pkg/readiness/config/config.go new file mode 100644 index 000000000..7f3e64714 --- /dev/null +++ b/pkg/readiness/config/config.go @@ -0,0 +1,116 @@ +package config + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + + "gopkg.in/natefinch/lumberjack.v2" + + "k8s.io/client-go/kubernetes" +) + +const ( + DefaultAgentHealthStatusFilePath = "/var/log/mongodb-mms-automation/agent-health-status.json" + AgentHealthStatusFilePathEnv = "AGENT_STATUS_FILEPATH" + WithAgentFileLogging = "MDB_WITH_AGENT_FILE_LOGGING" + + defaultLogPath = "/var/log/mongodb-mms-automation/readiness.log" + podNamespaceEnv = "POD_NAMESPACE" + automationConfigSecretEnv = "AUTOMATION_CONFIG_MAP" //nolint + logPathEnv = "LOG_FILE_PATH" + hostNameEnv = "HOSTNAME" + ReadinessProbeLoggerBackups = "READINESS_PROBE_LOGGER_BACKUPS" + ReadinessProbeLoggerMaxSize = "READINESS_PROBE_LOGGER_MAX_SIZE" + ReadinessProbeLoggerMaxAge = "READINESS_PROBE_LOGGER_MAX_AGE" + ReadinessProbeLoggerCompress = "READINESS_PROBE_LOGGER_COMPRESS" +) + +type Config struct { + ClientSet kubernetes.Interface + Namespace string + Hostname string + AutomationConfigSecretName string + HealthStatusReader io.Reader + LogFilePath string +} + +func BuildFromEnvVariables(clientSet kubernetes.Interface, isHeadless bool, file *os.File) (Config, error) { + logFilePath := GetEnvOrDefault(logPathEnv, defaultLogPath) + + var namespace, automationConfigName, hostname string + if isHeadless { + var ok bool + namespace, ok = os.LookupEnv(podNamespaceEnv) // nolint:forbidigo + if !ok { + return Config{}, fmt.Errorf("the '%s' environment variable must be set", podNamespaceEnv) + } + automationConfigName, ok = os.LookupEnv(automationConfigSecretEnv) // nolint:forbidigo + if !ok { + return Config{}, fmt.Errorf("the '%s' environment variable must be set", automationConfigSecretEnv) + } + hostname, ok = os.LookupEnv(hostNameEnv) // nolint:forbidigo + if !ok { + return Config{}, fmt.Errorf("the '%s' environment variable must be set", hostNameEnv) + } + } + + // Note, that we shouldn't close the file here - it will be closed very soon by the 'ioutil.ReadAll' + // in main.go + return Config{ + ClientSet: clientSet, + Namespace: namespace, + AutomationConfigSecretName: automationConfigName, + Hostname: hostname, + HealthStatusReader: file, + LogFilePath: logFilePath, + }, nil +} + +func GetLogger() *lumberjack.Logger { + logger := &lumberjack.Logger{ + Filename: readinessProbeLogFilePath(), + MaxBackups: readIntOrDefault(ReadinessProbeLoggerBackups, 5), + MaxSize: readIntOrDefault(ReadinessProbeLoggerMaxSize, 5), + MaxAge: readInt(ReadinessProbeLoggerMaxAge), + Compress: ReadBoolWitDefault(ReadinessProbeLoggerCompress, "false"), + } + return logger +} + +func readinessProbeLogFilePath() string { + return GetEnvOrDefault(logPathEnv, defaultLogPath) +} + +func GetEnvOrDefault(envVar, defaultValue string) string { + value := strings.TrimSpace(os.Getenv(envVar)) // nolint:forbidigo + if value == "" { + return defaultValue + } + return value +} + +// readInt returns the int value of an envvar of the given name. +// defaults to 0. +func readInt(envVarName string) int { + return readIntOrDefault(envVarName, 0) +} + +// readIntOrDefault returns the int value of an envvar of the given name. +// defaults to the given value if not specified. +func readIntOrDefault(envVarName string, defaultValue int) int { + envVar := GetEnvOrDefault(envVarName, strconv.Itoa(defaultValue)) + intValue, err := strconv.Atoi(envVar) + if err != nil { + return defaultValue + } + return intValue +} + +// ReadBoolWitDefault returns the boolean value of an envvar of the given name. +func ReadBoolWitDefault(envVarName string, defaultValue string) bool { + envVar := GetEnvOrDefault(envVarName, defaultValue) + return strings.TrimSpace(strings.ToLower(envVar)) == "true" +} diff --git a/pkg/readiness/headless/headless.go b/pkg/readiness/headless/headless.go new file mode 100644 index 000000000..18c28e23f --- /dev/null +++ b/pkg/readiness/headless/headless.go @@ -0,0 +1,78 @@ +package headless + +import ( + "context" + "fmt" + "io" + "os" + "strconv" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/health" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/pod" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/secret" + "go.uber.org/zap" +) + +const ( + acVersionPath string = "/var/lib/automation/config/acVersion/version" +) + +// PerformCheckHeadlessMode validates if the Agent has reached the correct goal state +// The state is fetched from K8s automation config Secret directly to avoid flakiness of mounting process +// Dev note: there is an alternative way to get current namespace: to read from +// /var/run/secrets/kubernetes.io/serviceaccount/namespace file (see +// https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod) +// though passing the namespace as an environment variable makes the code simpler for testing and saves an IO operation +func PerformCheckHeadlessMode(ctx context.Context, health health.Status, conf config.Config) (bool, error) { + var targetVersion int64 + var err error + + targetVersion, err = secret.ReadAutomationConfigVersionFromSecret(ctx, conf.Namespace, conf.ClientSet, conf.AutomationConfigSecretName) + if err != nil { + // this file is expected to be present in case of AppDB, there is no point trying to access it in + // community, it masks the underlying error + if _, pathErr := os.Stat(acVersionPath); !os.IsNotExist(pathErr) { + file, err := os.Open(acVersionPath) + if err != nil { + return false, err + } + defer file.Close() + + data, err := io.ReadAll(file) + if err != nil { + return false, err + } + + targetVersion, err = strconv.ParseInt(string(data), 10, 64) + if err != nil { + return false, err + } + } else { + return false, fmt.Errorf("failed to fetch automation-config secret name: %s, err: %s", conf.AutomationConfigSecretName, err) + } + } + + currentAgentVersion := readCurrentAgentInfo(health, targetVersion) + + if err = pod.PatchPodAnnotation(ctx, conf.Namespace, currentAgentVersion, conf.Hostname, conf.ClientSet); err != nil { + return false, err + } + + return targetVersion == currentAgentVersion, nil +} + +// readCurrentAgentInfo returns the version the Agent has reached and the rs member name +func readCurrentAgentInfo(health health.Status, targetVersion int64) int64 { + for _, v := range health.MmsStatus { + zap.S().Debugf("Automation Config version: %d, Agent last version: %d", targetVersion, v.LastGoalStateClusterConfigVersion) + return v.LastGoalStateClusterConfigVersion + } + + // If there are no plans, we always return target version. + // Previously we relied on IsInGoalState, but the agent started sometimes returning IsInGoalState=false when scaling down members. + // No plans will occur if the agent is just starting or if the current process is not in the process list in automation config. + // Either way this is not a blocker for the operator to perform necessary statefulset changes on it. + + return targetVersion +} diff --git a/pkg/readiness/headless/headless_test.go b/pkg/readiness/headless/headless_test.go new file mode 100644 index 000000000..d6f2f293c --- /dev/null +++ b/pkg/readiness/headless/headless_test.go @@ -0,0 +1,43 @@ +package headless + +import ( + "context" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/cmd/readiness/testdata" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/health" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +func TestPerformCheckHeadlessMode(t *testing.T) { + ctx := context.Background() + c := testConfig() + + c.ClientSet = fake.NewSimpleClientset(testdata.TestPod(c.Namespace, c.Hostname), testdata.TestSecret(c.Namespace, c.AutomationConfigSecretName, 11)) + status := health.Status{ + MmsStatus: map[string]health.MmsDirectorStatus{c.Hostname: { + LastGoalStateClusterConfigVersion: 10, + }}, + } + + achieved, err := PerformCheckHeadlessMode(ctx, status, c) + + require.NoError(t, err) + assert.False(t, achieved) + + thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(ctx, c.Hostname, metav1.GetOptions{}) + assert.Equal(t, map[string]string{"agent.mongodb.com/version": "10"}, thePod.Annotations) +} + +func testConfig() config.Config { + return config.Config{ + Namespace: "test-ns", + AutomationConfigSecretName: "test-mongodb-automation-config", + Hostname: "test-mongodb-0", + } +} diff --git a/pkg/readiness/health/health.go b/pkg/readiness/health/health.go new file mode 100644 index 000000000..c11a2a4ef --- /dev/null +++ b/pkg/readiness/health/health.go @@ -0,0 +1,90 @@ +package health + +import ( + "fmt" + "time" +) + +type replicationStatus int + +const ( + replicationStatusStartup replicationStatus = 0 + replicationStatusPrimary replicationStatus = 1 + replicationStatusSecondary replicationStatus = 2 + replicationStatusRecovering replicationStatus = 3 + replicationStatusStartup2 replicationStatus = 5 + replicationStatusUnknown replicationStatus = 6 + replicationStatusArbiter replicationStatus = 7 + replicationStatusDown replicationStatus = 8 + replicationStatusRollback replicationStatus = 9 + replicationStatusRemoved replicationStatus = 10 + replicationStatusUndefined replicationStatus = -1 +) + +type Status struct { + Statuses map[string]processStatus `json:"statuses"` + MmsStatus map[string]MmsDirectorStatus `json:"mmsStatus"` +} + +type processStatus struct { + IsInGoalState bool `json:"IsInGoalState"` + LastMongoUpTime int64 `json:"LastMongoUpTime"` + ExpectedToBeUp bool `json:"ExpectedToBeUp"` + ReplicaStatus *replicationStatus `json:"ReplicationStatus"` +} + +func (h processStatus) String() string { + return fmt.Sprintf("ExpectedToBeUp: %t, IsInGoalState: %t, LastMongoUpTime: %v", h.ExpectedToBeUp, + h.IsInGoalState, time.Unix(h.LastMongoUpTime, 0)) +} + +// These structs are copied from go_planner mmsdirectorstatus.go. Some fields are pruned as not used. +type MmsDirectorStatus struct { + Name string `json:"name"` + LastGoalStateClusterConfigVersion int64 `json:"lastGoalVersionAchieved"` + Plans []*PlanStatus `json:"plans"` +} + +type PlanStatus struct { + Moves []*MoveStatus `json:"moves"` + Started *time.Time `json:"started"` + Completed *time.Time `json:"completed"` +} + +type MoveStatus struct { + Steps []*StepStatus `json:"steps"` +} +type StepStatus struct { + Step string `json:"step"` + StepDoc string `json:"stepDoc"` + IsWaitStep bool `json:"isWaitStep"` + Started *time.Time `json:"started"` + Completed *time.Time `json:"completed"` + Result string `json:"result"` +} + +// IsReadyState will return true, meaning a *ready state* in the sense that this Process can +// accept read operations. +// It returns true if the managed process is mongos or standalone (replicationStatusUndefined) +// or if the agent doesn't publish the replica status (older agents) +func (h processStatus) IsReadyState() bool { + if h.ReplicaStatus == nil { + return true + } + status := *h.ReplicaStatus + if status == replicationStatusUndefined { + return true + } + + switch status { + case + // There are no other states in which the MongoDB + // server could that would mean a Ready State. + replicationStatusPrimary, + replicationStatusSecondary, + replicationStatusArbiter: + return true + } + + return false +} diff --git a/pkg/readiness/health/health_test.go b/pkg/readiness/health/health_test.go new file mode 100644 index 000000000..b4003e088 --- /dev/null +++ b/pkg/readiness/health/health_test.go @@ -0,0 +1,32 @@ +package health + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestIsReadyState checks that Primary, Secondary, Arbiter, and Undefined always result +// in Ready State. +func TestIsReadyStateNotPrimaryNorSecondary(t *testing.T) { + status := []replicationStatus{replicationStatusUndefined, replicationStatusPrimary, replicationStatusSecondary, replicationStatusArbiter} + + for i := range status { + h := processStatus{ReplicaStatus: &status[i]} + assert.True(t, h.IsReadyState()) + } +} + +// TestIsNotReady any of these states will result on a Database not being ready. +func TestIsNotReady(t *testing.T) { + status := []replicationStatus{ + replicationStatusStartup, replicationStatusRecovering, replicationStatusStartup2, + replicationStatusUnknown, replicationStatusDown, + replicationStatusRollback, replicationStatusRemoved, + } + + for i := range status { + h := processStatus{ReplicaStatus: &status[i]} + assert.False(t, h.IsReadyState()) + } +} diff --git a/pkg/readiness/pod/podannotation.go b/pkg/readiness/pod/podannotation.go new file mode 100644 index 000000000..d36bda37f --- /dev/null +++ b/pkg/readiness/pod/podannotation.go @@ -0,0 +1,44 @@ +package pod + +import ( + "context" + "strconv" + "strings" + + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/client-go/kubernetes" +) + +const mongodbAgentVersionAnnotation = "agent.mongodb.com/version" + +func PatchPodAnnotation(ctx context.Context, podNamespace string, lastVersionAchieved int64, memberName string, clientSet kubernetes.Interface) error { + pod, err := clientSet.CoreV1().Pods(podNamespace).Get(ctx, memberName, metav1.GetOptions{}) + if err != nil { + return err + } + + var payload []patchValue + + if len(pod.Annotations) == 0 { + payload = append(payload, patchValue{ + Op: "add", + Path: "/metadata/annotations", + Value: make(map[string]string), + }) + } + mdbAgentVersion := strconv.FormatInt(lastVersionAchieved, 10) + payload = append(payload, patchValue{ + Op: "add", + Path: "/metadata/annotations/" + strings.Replace(mongodbAgentVersionAnnotation, "/", "~1", -1), + Value: mdbAgentVersion, + }) + + patcher := NewKubernetesPodPatcher(clientSet) + updatedPod, err := patcher.patchPod(ctx, podNamespace, memberName, payload) + if updatedPod != nil { + zap.S().Debugf("Updated Pod annotation: %v (%s)", pod.Annotations, memberName) + } + return err +} diff --git a/pkg/readiness/pod/podannotation_test.go b/pkg/readiness/pod/podannotation_test.go new file mode 100644 index 000000000..b75382421 --- /dev/null +++ b/pkg/readiness/pod/podannotation_test.go @@ -0,0 +1,48 @@ +package pod + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +// Currently seems like the appending functionality on the library used by the fake +// implementation to simulate JSONPatch is broken: https://github.com/evanphx/json-patch/issues/138 +// The short term workaround is to have the annotation empty. + +// TestPatchPodAnnotation verifies that patching of the pod works correctly +func TestPatchPodAnnotation(t *testing.T) { + ctx := context.Background() + clientset := fake.NewSimpleClientset(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-replica-set-0", + Namespace: "test-ns", + Annotations: map[string]string{ + mongodbAgentVersionAnnotation: "", + }, + }, + }) + + pod, _ := clientset.CoreV1().Pods("test-ns").Get(ctx, "my-replica-set-0", metav1.GetOptions{}) + assert.Empty(t, pod.Annotations[mongodbAgentVersionAnnotation]) + + // adding the annotations + assert.NoError(t, PatchPodAnnotation(ctx, "test-ns", 1, "my-replica-set-0", clientset)) + pod, _ = clientset.CoreV1().Pods("test-ns").Get(ctx, "my-replica-set-0", metav1.GetOptions{}) + assert.Equal(t, map[string]string{"agent.mongodb.com/version": "1"}, pod.Annotations) + + // changing the annotations - no new annotations were added + assert.NoError(t, PatchPodAnnotation(ctx, "test-ns", 2, "my-replica-set-0", clientset)) + pod, _ = clientset.CoreV1().Pods("test-ns").Get(ctx, "my-replica-set-0", metav1.GetOptions{}) + assert.Equal(t, map[string]string{"agent.mongodb.com/version": "2"}, pod.Annotations) +} + +func TestUpdatePodAnnotationPodNotFound(t *testing.T) { + ctx := context.Background() + assert.True(t, apiErrors.IsNotFound(PatchPodAnnotation(ctx, "wrong-ns", 1, "my-replica-set-0", fake.NewSimpleClientset()))) +} diff --git a/pkg/readiness/pod/podpatcher.go b/pkg/readiness/pod/podpatcher.go new file mode 100644 index 000000000..5bea91f33 --- /dev/null +++ b/pkg/readiness/pod/podpatcher.go @@ -0,0 +1,33 @@ +package pod + +import ( + "context" + "encoding/json" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" +) + +type patchValue struct { + Op string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value"` +} + +type Patcher struct { + clientset kubernetes.Interface +} + +func NewKubernetesPodPatcher(clientSet kubernetes.Interface) Patcher { + return Patcher{clientset: clientSet} +} + +func (p Patcher) patchPod(ctx context.Context, namespace, podName string, payload []patchValue) (*v1.Pod, error) { + data, err := json.Marshal(payload) + if err != nil { + return nil, err + } + return p.clientset.CoreV1().Pods(namespace).Patch(ctx, podName, types.JSONPatchType, data, metav1.PatchOptions{}) +} diff --git a/pkg/readiness/secret/automationconfig.go b/pkg/readiness/secret/automationconfig.go new file mode 100644 index 000000000..b08ebded0 --- /dev/null +++ b/pkg/readiness/secret/automationconfig.go @@ -0,0 +1,31 @@ +package secret + +import ( + "context" + "encoding/json" + + "github.com/spf13/cast" + "k8s.io/client-go/kubernetes" +) + +const ( + automationConfigKey = "cluster-config.json" +) + +func ReadAutomationConfigVersionFromSecret(ctx context.Context, namespace string, clientSet kubernetes.Interface, automationConfigMap string) (int64, error) { + secretReader := newKubernetesSecretReader(clientSet) + theSecret, err := secretReader.ReadSecret(ctx, namespace, automationConfigMap) + if err != nil { + return -1, err + } + var existingDeployment map[string]interface{} + if err := json.Unmarshal(theSecret.Data[automationConfigKey], &existingDeployment); err != nil { + return -1, err + } + + version, ok := existingDeployment["version"] + if !ok { + return -1, err + } + return cast.ToInt64(version), nil +} diff --git a/pkg/readiness/secret/secretreader.go b/pkg/readiness/secret/secretreader.go new file mode 100644 index 000000000..aecb845e0 --- /dev/null +++ b/pkg/readiness/secret/secretreader.go @@ -0,0 +1,21 @@ +package secret + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +type reader struct { + clientset kubernetes.Interface +} + +func newKubernetesSecretReader(clientSet kubernetes.Interface) *reader { + return &reader{clientset: clientSet} +} + +func (r *reader) ReadSecret(ctx context.Context, namespace, secretName string) (*corev1.Secret, error) { + return r.clientset.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) +} diff --git a/pkg/util/apierrors/apierrors.go b/pkg/util/apierrors/apierrors.go new file mode 100644 index 000000000..94955c77c --- /dev/null +++ b/pkg/util/apierrors/apierrors.go @@ -0,0 +1,17 @@ +package apierrors + +import "strings" + +// objectModifiedText is an error indicating that we are trying to update a resource that has since been updated. +// in this case we just want to retry but not log it as an error. +var objectModifiedText = "the object has been modified; please apply your changes to the latest version and try again" + +// IsTransientError returns a boolean indicating if a given error is transient. +func IsTransientError(err error) bool { + return IsTransientMessage(err.Error()) +} + +// IsTransientMessage returns a boolean indicating if a given error message is transient. +func IsTransientMessage(msg string) bool { + return strings.Contains(strings.ToLower(msg), objectModifiedText) +} diff --git a/pkg/util/apierrors/apierrors_test.go b/pkg/util/apierrors/apierrors_test.go new file mode 100644 index 000000000..88c31e205 --- /dev/null +++ b/pkg/util/apierrors/apierrors_test.go @@ -0,0 +1,37 @@ +package apierrors + +import ( + "fmt" + "testing" +) + +func TestIsTransientError(t *testing.T) { + tests := []struct { + name string + err error + want bool + }{ + { + "Test Transient capitalised error", + fmt.Errorf("Error updating the status of the MongoDB resource: Operation cannot be fulfilled on mongodbcommunity.mongodb.com \"mdb0\": The object has been modified; please apply your changes to the latest version and try again"), + true, + }, + { + "Test Transient lower case error", + fmt.Errorf("error updating the status of the MongoDB resource: Operation cannot be fulfilled on mongodbcommunity.mongodb.com \"mdb0\": the object has been modified; please apply your changes to the latest version and try again"), + true, + }, + { + "Test Not Transient Error", + fmt.Errorf(" error found deployments.extensions \"default\" not found"), + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsTransientError(tt.err); got != tt.want { + t.Errorf("IsTransientError() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/util/constants/constants.go b/pkg/util/constants/constants.go new file mode 100644 index 000000000..18baf0116 --- /dev/null +++ b/pkg/util/constants/constants.go @@ -0,0 +1,14 @@ +package constants + +const ( + ExternalDB = "$external" + Sha256 = "SCRAM-SHA-256" + Sha1 = "MONGODB-CR" + X509 = "MONGODB-X509" + AutomationAgentKeyFilePathInContainer = "/var/lib/mongodb-mms-automation/authentication/keyfile" + AgentName = "mms-automation" + AgentPasswordKey = "password" + AgentKeyfileKey = "keyfile" + AgentPemFile = "agent-certs-pem" + AutomationAgentWindowsKeyFilePath = "%SystemDrive%\\MMSAutomation\\versions\\keyfile" +) diff --git a/pkg/util/contains/contains.go b/pkg/util/contains/contains.go new file mode 100644 index 000000000..1b0c8f2ee --- /dev/null +++ b/pkg/util/contains/contains.go @@ -0,0 +1,59 @@ +package contains + +import ( + "reflect" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func String(slice []string, s string) bool { + for _, elem := range slice { + if elem == s { + return true + } + } + return false +} + +func Sha256(slice []string) bool { + return String(slice, constants.Sha256) +} + +func Sha1(slice []string) bool { + return String(slice, constants.Sha1) +} + +func X509(slice []string) bool { + return String(slice, constants.X509) +} + +func NamespacedName(nsNames []types.NamespacedName, nsName types.NamespacedName) bool { + for _, elem := range nsNames { + if elem == nsName { + return true + } + } + return false +} + +func AccessMode(accessModes []corev1.PersistentVolumeAccessMode, mode corev1.PersistentVolumeAccessMode) bool { + for _, elem := range accessModes { + if elem == mode { + return true + } + } + return false +} + +func OwnerReferences(ownerRefs []metav1.OwnerReference, ownerRef metav1.OwnerReference) bool { + for _, elem := range ownerRefs { + if reflect.DeepEqual(elem, ownerRef) { + return true + } + } + return false +} diff --git a/pkg/util/envvar/envvars.go b/pkg/util/envvar/envvars.go new file mode 100644 index 000000000..00f054995 --- /dev/null +++ b/pkg/util/envvar/envvars.go @@ -0,0 +1,43 @@ +package envvar + +import ( + "os" + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +func MergeWithOverride(existing, desired []corev1.EnvVar) []corev1.EnvVar { + envMap := make(map[string]corev1.EnvVar) + for _, env := range existing { + envMap[env.Name] = env + } + + for _, env := range desired { + envMap[env.Name] = env + } + + var mergedEnv []corev1.EnvVar + for _, env := range envMap { + mergedEnv = append(mergedEnv, env) + } + + sort.SliceStable(mergedEnv, func(i, j int) bool { + return mergedEnv[i].Name < mergedEnv[j].Name + }) + return mergedEnv +} + +func GetEnvOrDefault(envVar, defaultValue string) string { + if val, ok := os.LookupEnv(envVar); ok { + return val + } + return defaultValue +} + +// ReadBool returns the boolean value of an envvar of the given name. +func ReadBool(envVarName string) bool { + envVar := GetEnvOrDefault(envVarName, "false") + return strings.TrimSpace(strings.ToLower(envVar)) == "true" +} diff --git a/pkg/util/envvar/envvars_test.go b/pkg/util/envvar/envvars_test.go new file mode 100644 index 000000000..175e61f00 --- /dev/null +++ b/pkg/util/envvar/envvars_test.go @@ -0,0 +1,17 @@ +package envvar + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetEnvOrDefault(t *testing.T) { + t.Setenv("env1", "val1") + + val := GetEnvOrDefault("env1", "defaultVal1") + assert.Equal(t, "val1", val) + + val2 := GetEnvOrDefault("env2", "defaultVal2") + assert.Equal(t, "defaultVal2", val2) +} diff --git a/pkg/util/functions/functions.go b/pkg/util/functions/functions.go new file mode 100644 index 000000000..5e8182050 --- /dev/null +++ b/pkg/util/functions/functions.go @@ -0,0 +1,38 @@ +package functions + +// RunSequentially executes a series of functions sequentially. Each function returns a boolean +// indicating if the function was successful, and an error indicating if something went wrong. +// if any function returns an error, an early exit happens. The first parameter indicates if the functions +// should run in the order provided. A value of false indicates they should run in reverse. +func RunSequentially(runSequentially bool, funcs ...func() (bool, error)) (bool, error) { + if runSequentially { + return runInOrder(funcs...) + } + return runReversed(funcs...) +} + +func runInOrder(funcs ...func() (bool, error)) (bool, error) { + for _, fn := range funcs { + successful, err := fn() + if err != nil { + return successful, err + } + if !successful { + return false, nil + } + } + return true, nil +} + +func runReversed(funcs ...func() (bool, error)) (bool, error) { + for i := len(funcs) - 1; i >= 0; i-- { + successful, err := funcs[i]() + if err != nil { + return successful, err + } + if !successful { + return false, nil + } + } + return true, nil +} diff --git a/pkg/util/generate/generate.go b/pkg/util/generate/generate.go new file mode 100644 index 000000000..338e0d1b8 --- /dev/null +++ b/pkg/util/generate/generate.go @@ -0,0 +1,88 @@ +package generate + +import ( + "crypto/rand" + "crypto/sha1" // nolint + "crypto/sha256" + "encoding/base64" + "hash" + "unicode" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scramcredentials" +) + +// final key must be between 6 and at most 1024 characters +func KeyFileContents() (string, error) { + return generateRandomString(500) +} + +// RandomValidDNS1123Label generates a random fixed-length string with characters in a certain range. +func RandomValidDNS1123Label(n int) (string, error) { + str, err := RandomFixedLengthStringOfSize(n) + if err != nil { + return "", err + } + + runes := []rune(str) + + // Make sure that any letters are lowercase and that if any non-alphanumeric characters appear they are set to '0'. + for i, r := range runes { + if unicode.IsLetter(r) { + runes[i] = unicode.ToLower(r) + } else if !unicode.IsNumber(r) { + runes[i] = rune('0') + } + } + + return string(runes), nil +} + +func RandomFixedLengthStringOfSize(n int) (string, error) { + b, err := generateRandomBytes(n) + return base64.URLEncoding.EncodeToString(b)[:n], err +} + +// Salts generates 2 different salts. The first is for the sha1 algorithm +// the second is for sha256 +func Salts() ([]byte, []byte, error) { + sha1Salt, err := salt(sha1.New) + if err != nil { + return nil, nil, err + } + + sha256Salt, err := salt(sha256.New) + if err != nil { + return nil, nil, err + } + return sha1Salt, sha256Salt, nil +} + +// salt will create a salt which can be used to compute Scram Sha credentials based on the given hashConstructor. +// sha1.New should be used for MONGODB-CR/SCRAM-SHA-1 and sha256.New should be used for SCRAM-SHA-256 +func salt(hashConstructor func() hash.Hash) ([]byte, error) { + saltSize := hashConstructor().Size() - scramcredentials.RFC5802MandatedSaltSize + salt, err := RandomFixedLengthStringOfSize(20) + + if err != nil { + return nil, err + } + shaBytes32 := sha256.Sum256([]byte(salt)) + + // the algorithms expect a salt of a specific size. + return shaBytes32[:saltSize], nil +} + +func generateRandomBytes(size int) ([]byte, error) { + b := make([]byte, size) + _, err := rand.Read(b) + if err != nil { + return nil, err + } + + return b, nil +} + +func generateRandomString(numBytes int) (string, error) { + b, err := generateRandomBytes(numBytes) + return base64.StdEncoding.EncodeToString(b), err +} diff --git a/pkg/util/merge/merge.go b/pkg/util/merge/merge.go new file mode 100644 index 000000000..df59bab84 --- /dev/null +++ b/pkg/util/merge/merge.go @@ -0,0 +1,663 @@ +package merge + +import ( + "sort" + "strings" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains" + corev1 "k8s.io/api/core/v1" +) + +// StringSlices accepts two slices of strings, and returns a string slice +// containing the distinct elements present in both. +func StringSlices(slice1, slice2 []string) []string { + mergedStrings := make([]string, 0) + mergedStrings = append(mergedStrings, slice1...) + for _, s := range slice2 { + if !contains.String(mergedStrings, s) { + mergedStrings = append(mergedStrings, s) + } + } + return mergedStrings +} + +// StringToStringMap merges two string maps together with the second map +// overriding any values also specified in the first. +func StringToStringMap(map1, map2 map[string]string) map[string]string { + if map1 == nil && map2 == nil { + return nil + } + mergedMap := make(map[string]string) + for k, v := range map1 { + mergedMap[k] = v + } + for k, v := range map2 { + mergedMap[k] = v + } + return mergedMap +} + +// StringToBoolMap merges two string-to-bool maps together with the second map +// overriding any values also specified in the first. +func StringToBoolMap(map1, map2 map[string]bool) map[string]bool { + mergedMap := make(map[string]bool) + for k, v := range map1 { + mergedMap[k] = v + } + for k, v := range map2 { + mergedMap[k] = v + } + return mergedMap +} + +// Containers merges two slices of containers merging each item by container name. +func Containers(defaultContainers, overrideContainers []corev1.Container) []corev1.Container { + mergedContainerMap := map[string]corev1.Container{} + + originalMap := createContainerMap(defaultContainers) + overrideMap := createContainerMap(overrideContainers) + + for k, v := range originalMap { + mergedContainerMap[k] = v + } + + for k, v := range overrideMap { + if orig, ok := originalMap[k]; ok { + mergedContainerMap[k] = Container(orig, v) + } else { + mergedContainerMap[k] = v + } + } + + var mergedContainers []corev1.Container + for _, v := range mergedContainerMap { + mergedContainers = append(mergedContainers, v) + } + + sort.SliceStable(mergedContainers, func(i, j int) bool { + return mergedContainers[i].Name < mergedContainers[j].Name + }) + return mergedContainers + +} + +func createContainerMap(containers []corev1.Container) map[string]corev1.Container { + m := make(map[string]corev1.Container) + for _, v := range containers { + m[v.Name] = v + } + return m +} + +func Container(defaultContainer, overrideContainer corev1.Container) corev1.Container { + merged := defaultContainer + + if overrideContainer.Name != "" { + merged.Name = overrideContainer.Name + } + + if overrideContainer.Image != "" { + merged.Image = overrideContainer.Image + } + + merged.Command = defaultContainer.Command + if len(overrideContainer.Command) > 0 { + merged.Command = overrideContainer.Command + } + merged.Args = defaultContainer.Args + if len(overrideContainer.Args) > 0 { + merged.Args = overrideContainer.Args + } + + if overrideContainer.WorkingDir != "" { + merged.WorkingDir = overrideContainer.WorkingDir + } + + merged.Ports = ContainerPortSlicesByName(defaultContainer.Ports, overrideContainer.Ports) + merged.Env = Envs(defaultContainer.Env, overrideContainer.Env) + merged.Resources = ResourceRequirements(defaultContainer.Resources, overrideContainer.Resources) + merged.VolumeMounts = VolumeMounts(defaultContainer.VolumeMounts, overrideContainer.VolumeMounts) + merged.VolumeDevices = VolumeDevices(defaultContainer.VolumeDevices, overrideContainer.VolumeDevices) + merged.LivenessProbe = Probe(defaultContainer.LivenessProbe, overrideContainer.LivenessProbe) + merged.ReadinessProbe = Probe(defaultContainer.ReadinessProbe, overrideContainer.ReadinessProbe) + merged.StartupProbe = Probe(defaultContainer.StartupProbe, overrideContainer.StartupProbe) + merged.Lifecycle = LifeCycle(defaultContainer.Lifecycle, overrideContainer.Lifecycle) + + if overrideContainer.TerminationMessagePath != "" { + merged.TerminationMessagePath = overrideContainer.TerminationMessagePath + } + + if overrideContainer.TerminationMessagePolicy != "" { + merged.TerminationMessagePolicy = overrideContainer.TerminationMessagePolicy + } + + if overrideContainer.ImagePullPolicy != "" { + merged.ImagePullPolicy = overrideContainer.ImagePullPolicy + } + + merged.SecurityContext = SecurityContext(defaultContainer.SecurityContext, overrideContainer.SecurityContext) + + if overrideContainer.Stdin { + merged.Stdin = overrideContainer.Stdin + } + + if overrideContainer.StdinOnce { + merged.StdinOnce = overrideContainer.StdinOnce + } + + if overrideContainer.TTY { + merged.TTY = overrideContainer.TTY + } + + return merged +} + +// Probe merges the contents of two probes together. +func Probe(original, override *corev1.Probe) *corev1.Probe { + if override == nil { + return original + } + if original == nil { + return override + } + merged := *original + if override.Exec != nil { + merged.Exec = override.Exec + } + if override.HTTPGet != nil { + merged.HTTPGet = override.HTTPGet + } + if override.TCPSocket != nil { + merged.TCPSocket = override.TCPSocket + } + if override.InitialDelaySeconds != 0 { + merged.InitialDelaySeconds = override.InitialDelaySeconds + } + if override.TimeoutSeconds != 0 { + merged.TimeoutSeconds = override.TimeoutSeconds + } + if override.PeriodSeconds != 0 { + merged.PeriodSeconds = override.PeriodSeconds + } + + if override.SuccessThreshold != 0 { + merged.SuccessThreshold = override.SuccessThreshold + } + + if override.FailureThreshold != 0 { + merged.FailureThreshold = override.FailureThreshold + } + return &merged +} + +// LifeCycle merges two LifeCycles. +func LifeCycle(original, override *corev1.Lifecycle) *corev1.Lifecycle { + if override == nil { + return original + } + if original == nil { + return override + } + merged := *original + + if override.PostStart != nil { + merged.PostStart = override.PostStart + } + if override.PreStop != nil { + merged.PreStop = override.PreStop + } + return &merged +} + +// SecurityContext merges two security contexts. +func SecurityContext(original, override *corev1.SecurityContext) *corev1.SecurityContext { + if override == nil { + return original + } + if original == nil { + return override + } + merged := *original + + if override.Capabilities != nil { + merged.Capabilities = override.Capabilities + } + + if override.Privileged != nil { + merged.Privileged = override.Privileged + } + + if override.SELinuxOptions != nil { + merged.SELinuxOptions = override.SELinuxOptions + } + + if override.WindowsOptions != nil { + merged.WindowsOptions = override.WindowsOptions + } + if override.RunAsUser != nil { + merged.RunAsUser = override.RunAsUser + } + if override.RunAsGroup != nil { + merged.RunAsGroup = override.RunAsGroup + } + if override.RunAsNonRoot != nil { + merged.RunAsNonRoot = override.RunAsNonRoot + } + if override.ReadOnlyRootFilesystem != nil { + merged.ReadOnlyRootFilesystem = override.ReadOnlyRootFilesystem + } + if override.AllowPrivilegeEscalation != nil { + merged.AllowPrivilegeEscalation = override.AllowPrivilegeEscalation + } + if override.ProcMount != nil { + merged.ProcMount = override.ProcMount + } + return &merged +} + +// VolumeDevices merges two slices of VolumeDevices by name. +func VolumeDevices(original, override []corev1.VolumeDevice) []corev1.VolumeDevice { + mergedDevicesMap := map[string]corev1.VolumeDevice{} + originalDevicesMap := createVolumeDevicesMap(original) + overrideDevicesMap := createVolumeDevicesMap(override) + + for k, v := range originalDevicesMap { + mergedDevicesMap[k] = v + } + + for k, v := range overrideDevicesMap { + if orig, ok := originalDevicesMap[k]; ok { + mergedDevicesMap[k] = mergeVolumeDevice(orig, v) + } else { + mergedDevicesMap[k] = v + } + } + + var mergedDevices []corev1.VolumeDevice + for _, v := range mergedDevicesMap { + mergedDevices = append(mergedDevices, v) + } + + sort.SliceStable(mergedDevices, func(i, j int) bool { + return mergedDevices[i].Name < mergedDevices[j].Name + }) + return mergedDevices +} + +func createVolumeDevicesMap(devices []corev1.VolumeDevice) map[string]corev1.VolumeDevice { + m := make(map[string]corev1.VolumeDevice) + for _, v := range devices { + m[v.Name] = v + } + return m +} + +func mergeVolumeDevice(original, override corev1.VolumeDevice) corev1.VolumeDevice { + merged := original + if override.Name != "" { + merged.Name = override.Name + } + if override.DevicePath != "" { + merged.DevicePath = override.DevicePath + } + return merged +} + +// Envs merges two slices of EnvVars using their name as the unique +// identifier. +func Envs(original, override []corev1.EnvVar) []corev1.EnvVar { + mergedEnvsMap := map[string]corev1.EnvVar{} + + originalMap := createEnvMap(original) + overrideMap := createEnvMap(override) + + for k, v := range originalMap { + mergedEnvsMap[k] = v + } + + for k, v := range overrideMap { + if orig, ok := originalMap[k]; ok { + mergedEnvsMap[k] = mergeSingleEnv(orig, v) + } else { + mergedEnvsMap[k] = v + } + } + + var mergedEnvs []corev1.EnvVar + for _, v := range mergedEnvsMap { + mergedEnvs = append(mergedEnvs, v) + } + + sort.SliceStable(mergedEnvs, func(i, j int) bool { + return mergedEnvs[i].Name < mergedEnvs[j].Name + }) + return mergedEnvs +} + +func mergeSingleEnv(original, override corev1.EnvVar) corev1.EnvVar { + merged := original + if override.Value != "" { + merged.Value = override.Value + merged.ValueFrom = nil + } + + if override.ValueFrom != nil { + merged.ValueFrom = override.ValueFrom + merged.Value = "" + } + return merged +} + +func createEnvMap(env []corev1.EnvVar) map[string]corev1.EnvVar { + m := make(map[string]corev1.EnvVar) + for _, e := range env { + m[e.Name] = e + } + return m +} + +// ResourceRequirements merges two resource requirements. +func ResourceRequirements(original, override corev1.ResourceRequirements) corev1.ResourceRequirements { + merged := original + if override.Limits != nil { + merged.Limits = override.Limits + } + + if override.Requests != nil { + merged.Requests = override.Requests + } + return merged +} + +// ContainerPorts merges all of the fields of the overridePort on top of the defaultPort +// if the fields don't have a zero value. Thw new ContainerPort is returned. +func ContainerPorts(defaultPort, overridePort corev1.ContainerPort) corev1.ContainerPort { + mergedPort := defaultPort + if overridePort.Name != "" { + mergedPort.Name = overridePort.Name + } + if overridePort.ContainerPort != 0 { + mergedPort.ContainerPort = overridePort.ContainerPort + } + if overridePort.HostPort != 0 { + mergedPort.HostPort = overridePort.HostPort + } + if overridePort.Protocol != "" { + mergedPort.Protocol = overridePort.Protocol + } + if overridePort.HostIP != "" { + mergedPort.HostIP = overridePort.HostIP + } + return mergedPort +} + +// ContainerPortSlicesByName takes two slices of corev1.ContainerPorts, these values are merged by name. +// if there are elements present in the overridePorts that are not present in defaultPorts, they are +// appended to the end. +func ContainerPortSlicesByName(defaultPorts, overridePorts []corev1.ContainerPort) []corev1.ContainerPort { + defaultPortMap := createContainerPortMap(defaultPorts) + overridePortsMap := createContainerPortMap(overridePorts) + + mergedPorts := make([]corev1.ContainerPort, 0) + + for portName, defaultPort := range defaultPortMap { + if overridePort, ok := overridePortsMap[portName]; ok { + mergedPorts = append(mergedPorts, ContainerPorts(defaultPort, overridePort)) + } else { + mergedPorts = append(mergedPorts, defaultPort) + } + } + + for portName, overridePort := range overridePortsMap { + if _, ok := defaultPortMap[portName]; !ok { + mergedPorts = append(mergedPorts, overridePort) + } + } + + sort.SliceStable(mergedPorts, func(i, j int) bool { + return mergedPorts[i].Name < mergedPorts[j].Name + }) + + return mergedPorts +} + +func createContainerPortMap(containerPorts []corev1.ContainerPort) map[string]corev1.ContainerPort { + containerPortMap := make(map[string]corev1.ContainerPort) + for _, m := range containerPorts { + containerPortMap[m.Name] = m + } + return containerPortMap +} + +// VolumeMounts merges two slices of volume mounts by name. +func VolumeMounts(original, override []corev1.VolumeMount) []corev1.VolumeMount { + mergedMountsMap := map[string]corev1.VolumeMount{} + originalMounts := createVolumeMountMap(original) + overrideMounts := createVolumeMountMap(override) + + for k, v := range originalMounts { + mergedMountsMap[k] = v + } + + for k, v := range overrideMounts { + if orig, ok := originalMounts[k]; ok { + mergedMountsMap[k] = VolumeMount(orig, v) + } else { + mergedMountsMap[k] = v + } + } + + var mergedMounts []corev1.VolumeMount + for _, mount := range mergedMountsMap { + mergedMounts = append(mergedMounts, mount) + } + + sort.SliceStable(mergedMounts, func(i, j int) bool { + return volumeMountToString(mergedMounts[i]) < volumeMountToString(mergedMounts[j]) + }) + + return mergedMounts +} + +// volumeMountToString returns a string consisting of all components of the given VolumeMount. +func volumeMountToString(v corev1.VolumeMount) string { + return strings.Join([]string{v.Name, v.MountPath, v.SubPath}, "_") +} + +func createVolumeMountMap(mounts []corev1.VolumeMount) map[string]corev1.VolumeMount { + m := make(map[string]corev1.VolumeMount) + for _, v := range mounts { + m[volumeMountToString(v)] = v + } + return m +} + +// VolumeMount merges two corev1.VolumeMounts. Any fields in the override take precedence +// over the values in the original. Thw new VolumeMount is returned. +func VolumeMount(original, override corev1.VolumeMount) corev1.VolumeMount { + merged := original + + if override.Name != "" { + merged.Name = override.Name + } + + if override.ReadOnly { + merged.ReadOnly = override.ReadOnly + } + + if override.MountPath != "" { + merged.MountPath = override.MountPath + } + + if override.SubPath != "" { + merged.SubPath = override.SubPath + } + + if override.MountPropagation != nil { + merged.MountPropagation = override.MountPropagation + } + + if override.SubPathExpr != "" { + merged.SubPathExpr = override.SubPathExpr + } + return merged +} + +func Volumes(defaultVolumes []corev1.Volume, overrideVolumes []corev1.Volume) []corev1.Volume { + defaultVolumesMap := createVolumesMap(defaultVolumes) + overrideVolumesMap := createVolumesMap(overrideVolumes) + mergedVolumes := []corev1.Volume{} + + for _, defaultVolume := range defaultVolumes { + mergedVolume := defaultVolume + if overrideVolume, ok := overrideVolumesMap[defaultVolume.Name]; ok { + mergedVolume = Volume(defaultVolume, overrideVolume) + } + mergedVolumes = append(mergedVolumes, mergedVolume) + } + + for _, overrideVolume := range overrideVolumes { + if _, ok := defaultVolumesMap[overrideVolume.Name]; ok { + // Already Merged + continue + } + mergedVolumes = append(mergedVolumes, overrideVolume) + } + + sort.SliceStable(mergedVolumes, func(i, j int) bool { + return mergedVolumes[i].Name < mergedVolumes[j].Name + }) + return mergedVolumes +} + +func Volume(defaultVolume corev1.Volume, overrideVolume corev1.Volume) corev1.Volume { + // Volume contains only Name and VolumeSource + + // Merge VolumeSource + overrideSource := overrideVolume.VolumeSource + defaultSource := defaultVolume.VolumeSource + mergedVolume := defaultVolume + + // Only one field must be non-nil. + // We merge if it is one of the ones we fill from the operator side: + // - EmptyDir + if overrideSource.EmptyDir != nil && defaultSource.EmptyDir != nil { + if overrideSource.EmptyDir.Medium != "" { + mergedVolume.EmptyDir.Medium = overrideSource.EmptyDir.Medium + } + if overrideSource.EmptyDir.SizeLimit != nil { + mergedVolume.EmptyDir.SizeLimit = overrideSource.EmptyDir.SizeLimit + } + return mergedVolume + } + // - Secret + if overrideSource.Secret != nil && defaultSource.Secret != nil { + if overrideSource.Secret.SecretName != "" { + mergedVolume.Secret.SecretName = overrideSource.Secret.SecretName + } + mergedVolume.Secret.Items = mergeKeyToPathItems(defaultSource.Secret.Items, overrideSource.Secret.Items) + if overrideSource.Secret.DefaultMode != nil { + mergedVolume.Secret.DefaultMode = overrideSource.Secret.DefaultMode + } + return mergedVolume + } + // - ConfigMap + if overrideSource.ConfigMap != nil && defaultSource.ConfigMap != nil { + if overrideSource.ConfigMap.LocalObjectReference.Name != "" { + mergedVolume.ConfigMap.LocalObjectReference.Name = overrideSource.ConfigMap.LocalObjectReference.Name + } + mergedVolume.ConfigMap.Items = mergeKeyToPathItems(defaultSource.ConfigMap.Items, overrideSource.ConfigMap.Items) + if overrideSource.ConfigMap.DefaultMode != nil { + mergedVolume.ConfigMap.DefaultMode = overrideSource.ConfigMap.DefaultMode + } + if overrideSource.ConfigMap.Optional != nil { + mergedVolume.ConfigMap.Optional = overrideSource.ConfigMap.Optional + } + return mergedVolume + } + + // Otherwise we assume that the user provides every field + // and we just assign it and nil every other field + // We also do that in the case the user provides one of the three listed above + // but our volume has a different non-nil entry. + + // this is effectively the same as just returning the overrideSource + mergedVolume.VolumeSource = overrideSource + return mergedVolume +} + +func createVolumesMap(volumes []corev1.Volume) map[string]corev1.Volume { + volumesMap := make(map[string]corev1.Volume) + for _, v := range volumes { + volumesMap[v.Name] = v + } + return volumesMap +} + +func mergeKeyToPathItems(defaultItems []corev1.KeyToPath, overrideItems []corev1.KeyToPath) []corev1.KeyToPath { + // Merge Items array by KeyToPath.Key entry + defaultItemsMap := createKeyToPathMap(defaultItems) + overrideItemsMap := createKeyToPathMap(overrideItems) + var mergedItems []corev1.KeyToPath + for _, defaultItem := range defaultItemsMap { + mergedKey := defaultItem + if overrideItem, ok := overrideItemsMap[defaultItem.Key]; ok { + // Need to merge + mergedKey = mergeKeyToPath(defaultItem, overrideItem) + } + mergedItems = append(mergedItems, mergedKey) + } + for _, overrideItem := range overrideItemsMap { + if _, ok := defaultItemsMap[overrideItem.Key]; ok { + // Already merged + continue + } + mergedItems = append(mergedItems, overrideItem) + + } + return mergedItems +} + +func mergeKeyToPath(defaultKey corev1.KeyToPath, overrideKey corev1.KeyToPath) corev1.KeyToPath { + if defaultKey.Key != overrideKey.Key { + // This should not happen as we always merge by Key. + // If it does, we return the default as something's wrong + return defaultKey + } + if overrideKey.Path != "" { + defaultKey.Path = overrideKey.Path + } + if overrideKey.Mode != nil { + defaultKey.Mode = overrideKey.Mode + } + return defaultKey +} + +func createKeyToPathMap(items []corev1.KeyToPath) map[string]corev1.KeyToPath { + itemsMap := make(map[string]corev1.KeyToPath) + for _, v := range items { + itemsMap[v.Key] = v + } + return itemsMap +} + +// Affinity merges two corev1.Affinity types. +func Affinity(defaultAffinity, overrideAffinity *corev1.Affinity) *corev1.Affinity { + if defaultAffinity == nil { + return overrideAffinity + } + if overrideAffinity == nil { + return defaultAffinity + } + mergedAffinity := defaultAffinity.DeepCopy() + if overrideAffinity.NodeAffinity != nil { + mergedAffinity.NodeAffinity = overrideAffinity.NodeAffinity + } + if overrideAffinity.PodAffinity != nil { + mergedAffinity.PodAffinity = overrideAffinity.PodAffinity + } + if overrideAffinity.PodAntiAffinity != nil { + mergedAffinity.PodAntiAffinity = overrideAffinity.PodAntiAffinity + } + return mergedAffinity +} diff --git a/pkg/util/merge/merge_automationconfigs.go b/pkg/util/merge/merge_automationconfigs.go new file mode 100644 index 000000000..5a6eeb69e --- /dev/null +++ b/pkg/util/merge/merge_automationconfigs.go @@ -0,0 +1,40 @@ +package merge + +import ( + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" +) + +// AutomationConfigs merges the values in "override" into the "original" Wrapper. +// Merging is done by name for processes. +func AutomationConfigs(original, override automationconfig.AutomationConfig) automationconfig.AutomationConfig { + original.Processes = mergeProcesses(original.Processes, override.Processes) + return original +} + +func mergeProcesses(original, override []automationconfig.Process) []automationconfig.Process { + mergedProcesses := append([]automationconfig.Process{}, original...) + for _, overrideProcess := range override { + correspondingIndex := getProcessIndexByName(overrideProcess.Name, original) + if correspondingIndex == -1 { + continue + } + mergedProcesses[correspondingIndex] = mergeProcess(original[correspondingIndex], overrideProcess) + } + return mergedProcesses +} + +func getProcessIndexByName(desiredProcessName string, originalProcesses []automationconfig.Process) int { + for i := range originalProcesses { + if originalProcesses[i].Name == desiredProcessName { + return i + } + } + return -1 +} + +func mergeProcess(original, override automationconfig.Process) automationconfig.Process { + // TODO: in order to override the disabled field, we just need this one field. We can handle all fields in a future change. + original.Disabled = override.Disabled + original.LogRotate = override.LogRotate + return original +} diff --git a/pkg/util/merge/merge_automationconfigs_test.go b/pkg/util/merge/merge_automationconfigs_test.go new file mode 100644 index 000000000..5e947eed0 --- /dev/null +++ b/pkg/util/merge/merge_automationconfigs_test.go @@ -0,0 +1,66 @@ +package merge + +import ( + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/stretchr/testify/assert" +) + +func TestMergeAutomationConfigs(t *testing.T) { + original, err := automationconfig.NewBuilder(). + SetName("test-ac"). + SetMembers(3). + Build() + + assert.NoError(t, err) + override, err := automationconfig.NewBuilder(). + SetName("test-ac"). + SetMembers(3). + AddProcessModification(func(i int, process *automationconfig.Process) { + // set a single process to be disabled. + process.Disabled = i == 1 + }).Build() + + assert.NoError(t, err) + + for _, p := range original.Processes { + assert.False(t, p.Disabled) + } + + assert.False(t, override.Processes[0].Disabled) + assert.True(t, override.Processes[1].Disabled) + assert.False(t, override.Processes[2].Disabled) + + mergedAc := AutomationConfigs(original, override) + assert.False(t, mergedAc.Processes[0].Disabled) + assert.True(t, mergedAc.Processes[1].Disabled) + assert.False(t, mergedAc.Processes[2].Disabled) +} + +func TestMergeAutomationConfigs_NonExistentMember(t *testing.T) { + original, err := automationconfig.NewBuilder(). + SetName("test-ac"). + SetMembers(3). + Build() + + assert.NoError(t, err) + override, err := automationconfig.NewBuilder(). + SetName("test-ac-0"). + SetMembers(3). + AddProcessModification(func(i int, process *automationconfig.Process) { + process.Disabled = i == 1 + }).Build() + + assert.NoError(t, err) + + assert.False(t, override.Processes[0].Disabled) + assert.True(t, override.Processes[1].Disabled) + assert.False(t, override.Processes[2].Disabled) + + mergedAc := AutomationConfigs(original, override) + + assert.False(t, mergedAc.Processes[0].Disabled) + assert.False(t, mergedAc.Processes[1].Disabled, "should not be updated as the name does not match.") + assert.False(t, mergedAc.Processes[2].Disabled) +} diff --git a/pkg/util/merge/merge_ephemeral_container.go b/pkg/util/merge/merge_ephemeral_container.go new file mode 100644 index 000000000..5853e41cf --- /dev/null +++ b/pkg/util/merge/merge_ephemeral_container.go @@ -0,0 +1,109 @@ +package merge + +import ( + "sort" + + corev1 "k8s.io/api/core/v1" +) + +// EphemeralContainers merges two slices of EphemeralContainers merging each item by container name. +func EphemeralContainers(defaultContainers, overrideContainers []corev1.EphemeralContainer) []corev1.EphemeralContainer { + mergedContainerMap := map[string]corev1.EphemeralContainer{} + + originalMap := createEphemeralContainerMap(defaultContainers) + overrideMap := createEphemeralContainerMap(overrideContainers) + + for k, v := range originalMap { + mergedContainerMap[k] = v + } + + for k, v := range overrideMap { + if orig, ok := originalMap[k]; ok { + mergedContainerMap[k] = EphemeralContainer(orig, v) + } else { + mergedContainerMap[k] = v + } + } + + var mergedContainers []corev1.EphemeralContainer + for _, v := range mergedContainerMap { + mergedContainers = append(mergedContainers, v) + } + + sort.SliceStable(mergedContainers, func(i, j int) bool { + return mergedContainers[i].Name < mergedContainers[j].Name + }) + return mergedContainers + +} + +// EphemeralContainer merges two EphemeralContainers together. +func EphemeralContainer(defaultContainer, overrideContainer corev1.EphemeralContainer) corev1.EphemeralContainer { + merged := defaultContainer + + if overrideContainer.Name != "" { + merged.Name = overrideContainer.Name + } + + if overrideContainer.Image != "" { + merged.Image = overrideContainer.Image + } + + merged.Command = StringSlices(defaultContainer.Command, overrideContainer.Command) + merged.Args = StringSlices(defaultContainer.Args, overrideContainer.Args) + + if overrideContainer.WorkingDir != "" { + merged.WorkingDir = overrideContainer.WorkingDir + } + + merged.Ports = ContainerPortSlicesByName(defaultContainer.Ports, overrideContainer.Ports) + merged.Env = Envs(defaultContainer.Env, overrideContainer.Env) + merged.Resources = ResourceRequirements(defaultContainer.Resources, overrideContainer.Resources) + merged.VolumeMounts = VolumeMounts(defaultContainer.VolumeMounts, overrideContainer.VolumeMounts) + merged.VolumeDevices = VolumeDevices(defaultContainer.VolumeDevices, overrideContainer.VolumeDevices) + merged.LivenessProbe = Probe(defaultContainer.LivenessProbe, overrideContainer.LivenessProbe) + merged.ReadinessProbe = Probe(defaultContainer.ReadinessProbe, overrideContainer.ReadinessProbe) + merged.StartupProbe = Probe(defaultContainer.StartupProbe, overrideContainer.StartupProbe) + merged.Lifecycle = LifeCycle(defaultContainer.Lifecycle, overrideContainer.Lifecycle) + + if overrideContainer.TerminationMessagePath != "" { + merged.TerminationMessagePath = overrideContainer.TerminationMessagePath + } + + if overrideContainer.TerminationMessagePolicy != "" { + merged.TerminationMessagePolicy = overrideContainer.TerminationMessagePolicy + } + + if overrideContainer.ImagePullPolicy != "" { + merged.ImagePullPolicy = overrideContainer.ImagePullPolicy + } + + merged.SecurityContext = SecurityContext(defaultContainer.SecurityContext, overrideContainer.SecurityContext) + + if overrideContainer.Stdin { + merged.Stdin = overrideContainer.Stdin + } + + if overrideContainer.StdinOnce { + merged.StdinOnce = overrideContainer.StdinOnce + } + + if overrideContainer.TTY { + merged.TTY = overrideContainer.TTY + } + + // EphemeralContainer only fields + if overrideContainer.TargetContainerName != "" { + merged.TargetContainerName = overrideContainer.TargetContainerName + } + + return merged +} + +func createEphemeralContainerMap(containers []corev1.EphemeralContainer) map[string]corev1.EphemeralContainer { + m := make(map[string]corev1.EphemeralContainer) + for _, v := range containers { + m[v.Name] = v + } + return m +} diff --git a/pkg/util/merge/merge_podtemplate_spec.go b/pkg/util/merge/merge_podtemplate_spec.go new file mode 100644 index 000000000..0e59a4f6e --- /dev/null +++ b/pkg/util/merge/merge_podtemplate_spec.go @@ -0,0 +1,246 @@ +package merge + +import ( + "sort" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains" + corev1 "k8s.io/api/core/v1" +) + +func PodTemplateSpecs(original, override corev1.PodTemplateSpec) corev1.PodTemplateSpec { + merged := original + + merged.Annotations = StringToStringMap(original.Annotations, override.Annotations) + merged.Labels = StringToStringMap(original.Labels, override.Labels) + merged.Spec.Volumes = Volumes(original.Spec.Volumes, override.Spec.Volumes) + merged.Spec.Containers = Containers(original.Spec.Containers, override.Spec.Containers) + merged.Spec.InitContainers = Containers(original.Spec.InitContainers, override.Spec.InitContainers) + + if override.Spec.EphemeralContainers != nil { + merged.Spec.EphemeralContainers = EphemeralContainers(original.Spec.EphemeralContainers, override.Spec.EphemeralContainers) + } + + if override.Spec.RestartPolicy != "" { + merged.Spec.RestartPolicy = override.Spec.RestartPolicy + } + + if override.Spec.TerminationGracePeriodSeconds != nil { + merged.Spec.TerminationGracePeriodSeconds = override.Spec.TerminationGracePeriodSeconds + } + if override.Spec.ActiveDeadlineSeconds != nil { + merged.Spec.ActiveDeadlineSeconds = override.Spec.ActiveDeadlineSeconds + } + + if override.Spec.DNSPolicy != "" { + merged.Spec.DNSPolicy = override.Spec.DNSPolicy + } + + if override.Spec.NodeSelector != nil { + merged.Spec.NodeSelector = StringToStringMap(original.Spec.NodeSelector, override.Spec.NodeSelector) + } + + if override.Spec.ServiceAccountName != "" { + merged.Spec.ServiceAccountName = override.Spec.ServiceAccountName + } + + if override.Spec.DeprecatedServiceAccount != "" { + merged.Spec.DeprecatedServiceAccount = override.Spec.DeprecatedServiceAccount + } + + if override.Spec.AutomountServiceAccountToken != nil { + merged.Spec.AutomountServiceAccountToken = override.Spec.AutomountServiceAccountToken + } + + if override.Spec.NodeName != "" { + merged.Spec.NodeName = override.Spec.NodeName + } + + if override.Spec.HostNetwork { + merged.Spec.HostNetwork = override.Spec.HostNetwork + } + + if override.Spec.HostPID { + merged.Spec.HostPID = override.Spec.HostPID + } + + if override.Spec.ShareProcessNamespace != nil { + merged.Spec.ShareProcessNamespace = override.Spec.ShareProcessNamespace + } + + if override.Spec.SecurityContext != nil { + merged.Spec.SecurityContext = override.Spec.SecurityContext + } + + if override.Spec.ImagePullSecrets != nil { + merged.Spec.ImagePullSecrets = override.Spec.ImagePullSecrets + } + + if override.Spec.Hostname != "" { + merged.Spec.Hostname = override.Spec.Hostname + } + + if override.Spec.Subdomain != "" { + merged.Spec.Subdomain = override.Spec.Subdomain + } + + if override.Spec.Affinity != nil { + merged.Spec.Affinity = Affinity(original.Spec.Affinity, override.Spec.Affinity) + } + + if override.Spec.SchedulerName != "" { + merged.Spec.SchedulerName = override.Spec.SchedulerName + } + + if override.Spec.Tolerations != nil { + merged.Spec.Tolerations = override.Spec.Tolerations + } + + merged.Spec.HostAliases = HostAliases(original.Spec.HostAliases, override.Spec.HostAliases) + + if override.Spec.PriorityClassName != "" { + merged.Spec.PriorityClassName = override.Spec.PriorityClassName + } + + if override.Spec.Priority != nil { + merged.Spec.Priority = override.Spec.Priority + } + + if override.Spec.DNSConfig != nil { + merged.Spec.DNSConfig = PodDNSConfig(original.Spec.DNSConfig, override.Spec.DNSConfig) + } + + if override.Spec.ReadinessGates != nil { + merged.Spec.ReadinessGates = override.Spec.ReadinessGates + } + + if override.Spec.RuntimeClassName != nil { + merged.Spec.RuntimeClassName = override.Spec.RuntimeClassName + } + + if override.Spec.EnableServiceLinks != nil { + merged.Spec.EnableServiceLinks = override.Spec.EnableServiceLinks + } + + if override.Spec.PreemptionPolicy != nil { + merged.Spec.PreemptionPolicy = override.Spec.PreemptionPolicy + } + + if override.Spec.Overhead != nil { + merged.Spec.Overhead = override.Spec.Overhead + } + + if override.Spec.TopologySpreadConstraints != nil { + merged.Spec.TopologySpreadConstraints = TopologySpreadConstraints(original.Spec.TopologySpreadConstraints, override.Spec.TopologySpreadConstraints) + } + + return merged +} + +func TopologySpreadConstraints(original, override []corev1.TopologySpreadConstraint) []corev1.TopologySpreadConstraint { + originalMap := createTopologySpreadConstraintMap(original) + overrideMap := createTopologySpreadConstraintMap(override) + + mergedMap := map[string]corev1.TopologySpreadConstraint{} + + for k, v := range originalMap { + mergedMap[k] = v + } + for k, v := range overrideMap { + if originalValue, ok := mergedMap[k]; ok { + mergedMap[k] = TopologySpreadConstraint(originalValue, v) + } else { + mergedMap[k] = v + } + } + var mergedElements []corev1.TopologySpreadConstraint + for _, v := range mergedMap { + mergedElements = append(mergedElements, v) + } + return mergedElements +} + +func TopologySpreadConstraint(original, override corev1.TopologySpreadConstraint) corev1.TopologySpreadConstraint { + merged := original + if override.LabelSelector != nil { + merged.LabelSelector = override.LabelSelector + } + if override.MaxSkew != 0 { + merged.MaxSkew = override.MaxSkew + } + if override.WhenUnsatisfiable != "" { + merged.WhenUnsatisfiable = override.WhenUnsatisfiable + } + return merged +} + +func createTopologySpreadConstraintMap(constraints []corev1.TopologySpreadConstraint) map[string]corev1.TopologySpreadConstraint { + m := make(map[string]corev1.TopologySpreadConstraint) + for _, v := range constraints { + m[v.TopologyKey] = v + } + return m +} + +// HostAliases merges two slices of HostAliases together. Any shared hostnames with a given +// ip are merged together into fewer entries. +func HostAliases(originalAliases, overrideAliases []corev1.HostAlias) []corev1.HostAlias { + m := make(map[string]corev1.HostAlias) + for _, original := range originalAliases { + m[original.IP] = original + } + + for _, override := range overrideAliases { + if _, ok := m[override.IP]; ok { + var mergedHostNames []string + mergedHostNames = append(mergedHostNames, m[override.IP].Hostnames...) + for _, hn := range override.Hostnames { + if !contains.String(mergedHostNames, hn) { + mergedHostNames = append(mergedHostNames, hn) + } + } + m[override.IP] = corev1.HostAlias{ + IP: override.IP, + Hostnames: mergedHostNames, + } + } else { + m[override.IP] = override + } + } + + var mergedHostAliases []corev1.HostAlias + for _, v := range m { + mergedHostAliases = append(mergedHostAliases, v) + } + + sort.SliceStable(mergedHostAliases, func(i, j int) bool { + return mergedHostAliases[i].IP < mergedHostAliases[j].IP + }) + + return mergedHostAliases +} + +func PodDNSConfig(originalDNSConfig, overrideDNSConfig *corev1.PodDNSConfig) *corev1.PodDNSConfig { + if overrideDNSConfig == nil { + return originalDNSConfig + } + + if originalDNSConfig == nil { + return overrideDNSConfig + } + + merged := originalDNSConfig.DeepCopy() + + if overrideDNSConfig.Options != nil { + merged.Options = overrideDNSConfig.Options + } + + if overrideDNSConfig.Nameservers != nil { + merged.Nameservers = StringSlices(merged.Nameservers, overrideDNSConfig.Nameservers) + } + + if overrideDNSConfig.Searches != nil { + merged.Searches = StringSlices(merged.Searches, overrideDNSConfig.Searches) + } + + return merged +} diff --git a/pkg/util/merge/merge_service_spec.go b/pkg/util/merge/merge_service_spec.go new file mode 100644 index 000000000..7896c31ed --- /dev/null +++ b/pkg/util/merge/merge_service_spec.go @@ -0,0 +1,74 @@ +package merge + +import ( + corev1 "k8s.io/api/core/v1" +) + +// ServiceSpec merges two ServiceSpecs together. +// The implementation merges Selector instead of overriding it. +func ServiceSpec(defaultSpec, overrideSpec corev1.ServiceSpec) corev1.ServiceSpec { + mergedSpec := defaultSpec + mergedSpec.Selector = StringToStringMap(defaultSpec.Selector, overrideSpec.Selector) + + if len(overrideSpec.Ports) != 0 { + mergedSpec.Ports = overrideSpec.Ports + } + + if len(overrideSpec.Type) != 0 { + mergedSpec.Type = overrideSpec.Type + } + + if len(overrideSpec.LoadBalancerIP) != 0 { + mergedSpec.LoadBalancerIP = overrideSpec.LoadBalancerIP + } + + if overrideSpec.LoadBalancerClass != nil { + mergedSpec.LoadBalancerClass = overrideSpec.LoadBalancerClass + } + + if len(overrideSpec.ExternalName) != 0 { + mergedSpec.ExternalName = overrideSpec.ExternalName + } + + if len(overrideSpec.ExternalTrafficPolicy) != 0 { + mergedSpec.ExternalTrafficPolicy = overrideSpec.ExternalTrafficPolicy + } + + if overrideSpec.InternalTrafficPolicy != nil { + mergedSpec.InternalTrafficPolicy = overrideSpec.InternalTrafficPolicy + } + + if overrideSpec.PublishNotReadyAddresses { + mergedSpec.PublishNotReadyAddresses = overrideSpec.PublishNotReadyAddresses + } + + if overrideSpec.HealthCheckNodePort != 0 { + mergedSpec.HealthCheckNodePort = overrideSpec.HealthCheckNodePort + } + + if len(overrideSpec.LoadBalancerSourceRanges) != 0 { + mergedSpec.LoadBalancerSourceRanges = overrideSpec.LoadBalancerSourceRanges + } + + if len(overrideSpec.ExternalIPs) != 0 { + mergedSpec.ExternalIPs = overrideSpec.ExternalIPs + } + + if overrideSpec.SessionAffinityConfig != nil { + mergedSpec.SessionAffinityConfig = overrideSpec.SessionAffinityConfig + } + + if len(overrideSpec.SessionAffinity) != 0 { + mergedSpec.SessionAffinity = overrideSpec.SessionAffinity + } + + if len(overrideSpec.ClusterIP) != 0 { + mergedSpec.ClusterIP = overrideSpec.ClusterIP + } + + if len(overrideSpec.ClusterIPs) != 0 { + mergedSpec.ClusterIPs = overrideSpec.ClusterIPs + } + + return mergedSpec +} diff --git a/pkg/util/merge/merge_statefulset.go b/pkg/util/merge/merge_statefulset.go new file mode 100644 index 000000000..a6a9f4879 --- /dev/null +++ b/pkg/util/merge/merge_statefulset.go @@ -0,0 +1,207 @@ +package merge + +import ( + "sort" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +// StatefulSets merges two StatefulSets together. +func StatefulSets(defaultStatefulSet, overrideStatefulSet appsv1.StatefulSet) appsv1.StatefulSet { + mergedSts := defaultStatefulSet + mergedSts.Labels = StringToStringMap(defaultStatefulSet.Labels, overrideStatefulSet.Labels) + if overrideStatefulSet.Namespace != "" { + mergedSts.Namespace = overrideStatefulSet.Namespace + } + if overrideStatefulSet.Name != "" { + mergedSts.Name = overrideStatefulSet.Name + } + mergedSts.Spec = StatefulSetSpecs(defaultStatefulSet.Spec, overrideStatefulSet.Spec) + return mergedSts +} + +// StatefulSetSpecs merges two StatefulSetSpecs together. +func StatefulSetSpecs(defaultSpec, overrideSpec appsv1.StatefulSetSpec) appsv1.StatefulSetSpec { + mergedSpec := defaultSpec + if overrideSpec.Replicas != nil { + mergedSpec.Replicas = overrideSpec.Replicas + } + + mergedSpec.Selector = LabelSelectors(defaultSpec.Selector, overrideSpec.Selector) + + if overrideSpec.PodManagementPolicy != "" { + mergedSpec.PodManagementPolicy = overrideSpec.PodManagementPolicy + } + + if overrideSpec.RevisionHistoryLimit != nil { + mergedSpec.RevisionHistoryLimit = overrideSpec.RevisionHistoryLimit + } + + if overrideSpec.UpdateStrategy.Type != "" { + mergedSpec.UpdateStrategy.Type = overrideSpec.UpdateStrategy.Type + } + + if overrideSpec.UpdateStrategy.RollingUpdate != nil { + mergedSpec.UpdateStrategy.RollingUpdate = overrideSpec.UpdateStrategy.RollingUpdate + } + + if overrideSpec.ServiceName != "" { + mergedSpec.ServiceName = overrideSpec.ServiceName + } + + mergedSpec.Template = PodTemplateSpecs(defaultSpec.Template, overrideSpec.Template) + mergedSpec.VolumeClaimTemplates = VolumeClaimTemplates(defaultSpec.VolumeClaimTemplates, overrideSpec.VolumeClaimTemplates) + return mergedSpec +} + +func LabelSelectors(originalLabelSelector, overrideLabelSelector *metav1.LabelSelector) *metav1.LabelSelector { + // we have only specified a label selector in the override + if overrideLabelSelector == nil { + return originalLabelSelector + } + // we have only specified a label selector in the original + if originalLabelSelector == nil { + return overrideLabelSelector + } + + // we have specified both, so we must merge them + mergedLabelSelector := &metav1.LabelSelector{} + mergedLabelSelector.MatchLabels = StringToStringMap(originalLabelSelector.MatchLabels, overrideLabelSelector.MatchLabels) + mergedLabelSelector.MatchExpressions = LabelSelectorRequirements(originalLabelSelector.MatchExpressions, overrideLabelSelector.MatchExpressions) + return mergedLabelSelector +} + +// LabelSelectorRequirements accepts two slices of LabelSelectorRequirement. Any LabelSelectorRequirement in the override +// slice that has the same key as one from the original is merged. Otherwise they are appended to the list. +func LabelSelectorRequirements(original, override []metav1.LabelSelectorRequirement) []metav1.LabelSelectorRequirement { + mergedLsrs := make([]metav1.LabelSelectorRequirement, 0) + for _, originalLsr := range original { + mergedLsr := originalLsr + overrideLsr := LabelSelectorRequirementByKey(override, originalLsr.Key) + if overrideLsr != nil { + if overrideLsr.Operator != "" { + mergedLsr.Operator = overrideLsr.Operator + } + if overrideLsr.Values != nil { + mergedLsr.Values = StringSlices(originalLsr.Values, overrideLsr.Values) + } + } + sort.SliceStable(mergedLsr.Values, func(i, j int) bool { + return mergedLsr.Values[i] < mergedLsr.Values[j] + }) + + mergedLsrs = append(mergedLsrs, mergedLsr) + } + + // we need to add any override lsrs that do not exist in the original + for _, overrideLsr := range override { + existing := LabelSelectorRequirementByKey(original, overrideLsr.Key) + if existing == nil { + sort.SliceStable(overrideLsr.Values, func(i, j int) bool { + return overrideLsr.Values[i] < overrideLsr.Values[j] + }) + mergedLsrs = append(mergedLsrs, overrideLsr) + } + } + + // sort them by key + sort.SliceStable(mergedLsrs, func(i, j int) bool { + return mergedLsrs[i].Key < mergedLsrs[j].Key + }) + + return mergedLsrs +} + +// LabelSelectorRequirementByKey returns the LabelSelectorRequirement with the given key if present in the slice. +// returns nil if not present. +func LabelSelectorRequirementByKey(labelSelectorRequirements []metav1.LabelSelectorRequirement, key string) *metav1.LabelSelectorRequirement { + for _, lsr := range labelSelectorRequirements { + if lsr.Key == key { + return &lsr + } + } + return nil +} + +func VolumeClaimTemplates(defaultTemplates []corev1.PersistentVolumeClaim, overrideTemplates []corev1.PersistentVolumeClaim) []corev1.PersistentVolumeClaim { + defaultMountsMap := createVolumeClaimMap(defaultTemplates) + overrideMountsMap := createVolumeClaimMap(overrideTemplates) + + mergedMap := map[string]corev1.PersistentVolumeClaim{} + + for _, vct := range defaultMountsMap { + mergedMap[vct.Name] = vct + } + + for _, overrideClaim := range overrideMountsMap { + if defaultClaim, ok := defaultMountsMap[overrideClaim.Name]; ok { + mergedMap[overrideClaim.Name] = PersistentVolumeClaim(defaultClaim, overrideClaim) + } else { + mergedMap[overrideClaim.Name] = overrideClaim + } + } + + var mergedVolumes []corev1.PersistentVolumeClaim + for _, v := range mergedMap { + mergedVolumes = append(mergedVolumes, v) + } + + sort.SliceStable(mergedVolumes, func(i, j int) bool { + return mergedVolumes[i].Name < mergedVolumes[j].Name + }) + + return mergedVolumes +} + +func createVolumeClaimMap(volumeMounts []corev1.PersistentVolumeClaim) map[string]corev1.PersistentVolumeClaim { + mountMap := make(map[string]corev1.PersistentVolumeClaim) + for _, m := range volumeMounts { + mountMap[m.Name] = m + } + return mountMap +} + +func PersistentVolumeClaim(defaultPvc corev1.PersistentVolumeClaim, overridePvc corev1.PersistentVolumeClaim) corev1.PersistentVolumeClaim { + if overridePvc.Namespace != "" { + defaultPvc.Namespace = overridePvc.Namespace + } + + defaultPvc.Labels = StringToStringMap(defaultPvc.Labels, overridePvc.Labels) + defaultPvc.Annotations = StringToStringMap(defaultPvc.Annotations, overridePvc.Annotations) + + if overridePvc.Spec.VolumeMode != nil { + defaultPvc.Spec.VolumeMode = overridePvc.Spec.VolumeMode + } + + if overridePvc.Spec.StorageClassName != nil { + defaultPvc.Spec.StorageClassName = overridePvc.Spec.StorageClassName + } + + for _, accessMode := range overridePvc.Spec.AccessModes { + if !contains.AccessMode(defaultPvc.Spec.AccessModes, accessMode) { + defaultPvc.Spec.AccessModes = append(defaultPvc.Spec.AccessModes, accessMode) + } + } + + if overridePvc.Spec.Selector != nil { + defaultPvc.Spec.Selector = overridePvc.Spec.Selector + } + + if overridePvc.Spec.Resources.Limits != nil { + defaultPvc.Spec.Resources.Limits = overridePvc.Spec.Resources.Limits + } + + if overridePvc.Spec.Resources.Requests != nil { + defaultPvc.Spec.Resources.Requests = overridePvc.Spec.Resources.Requests + } + + if overridePvc.Spec.DataSource != nil { + defaultPvc.Spec.DataSource = overridePvc.Spec.DataSource + } + + return defaultPvc +} diff --git a/pkg/util/merge/merge_test.go b/pkg/util/merge/merge_test.go new file mode 100644 index 000000000..2d35355ac --- /dev/null +++ b/pkg/util/merge/merge_test.go @@ -0,0 +1,672 @@ +package merge + +import ( + "reflect" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/probes" + + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" +) + +func TestMergeStringSlices(t *testing.T) { + type args struct { + original []string + override []string + } + tests := []struct { + name string + args args + want []string + }{ + { + name: "Does not include duplicate entries", + args: args{ + original: []string{"a", "b", "c"}, + override: []string{"a", "c"}, + }, + want: []string{"a", "b", "c"}, + }, + { + name: "Adds elements from override", + args: args{ + original: []string{"a", "b", "c"}, + override: []string{"a", "b", "c", "d", "e"}, + }, + want: []string{"a", "b", "c", "d", "e"}, + }, + { + name: "Doesn't panic with nil input", + args: args{ + original: nil, + override: nil, + }, + want: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := StringSlices(tt.args.original, tt.args.override); !reflect.DeepEqual(got, tt.want) { + t.Errorf("MergeStringSlices() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMergeServices(t *testing.T) { + type args struct { + original corev1.ServiceSpec + override corev1.ServiceSpec + } + tests := []struct { + name string + args args + want corev1.ServiceSpec + }{ + { + name: "Overrides a few example spec values", + args: args{ + original: corev1.ServiceSpec{}, + override: corev1.ServiceSpec{ + Type: "LoadBalancer", + ExternalName: "externalName", + ExternalTrafficPolicy: "some-non-existing-policy", + HealthCheckNodePort: 123, + PublishNotReadyAddresses: true, + }, + }, + want: corev1.ServiceSpec{ + Type: "LoadBalancer", + ExternalName: "externalName", + ExternalTrafficPolicy: "some-non-existing-policy", + HealthCheckNodePort: 123, + PublishNotReadyAddresses: true, + }, + }, + { + name: "Merge labels", + args: args{ + original: corev1.ServiceSpec{ + Selector: map[string]string{"test1": "true"}, + }, + override: corev1.ServiceSpec{ + Selector: map[string]string{"test2": "true"}, + }, + }, + want: corev1.ServiceSpec{ + Selector: map[string]string{"test1": "true", "test2": "true"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ServiceSpec(tt.args.original, tt.args.override); !reflect.DeepEqual(got, tt.want) { + t.Errorf("%v, want %v", got, tt.want) + } + }) + } +} + +func TestMergeContainer(t *testing.T) { + defaultQuantity := resource.NewQuantity(int64(10), resource.DecimalExponent) + + defaultContainer := container.New( + container.WithName("default-container"), + container.WithCommand([]string{"a", "b", "c"}), + container.WithImage("default-image"), + container.WithImagePullPolicy(corev1.PullAlways), + container.WithWorkDir("default-work-dir"), + container.WithArgs([]string{"arg0", "arg1"}), + container.WithLivenessProbe(probes.Apply( + probes.WithInitialDelaySeconds(10), + probes.WithFailureThreshold(20), + probes.WithExecCommand([]string{"exec", "command", "liveness"}), + )), + container.WithReadinessProbe(probes.Apply( + probes.WithInitialDelaySeconds(20), + probes.WithFailureThreshold(30), + probes.WithExecCommand([]string{"exec", "command", "readiness"}), + )), + container.WithVolumeDevices([]corev1.VolumeDevice{ + { + Name: "name-0", + DevicePath: "original-path-0", + }, + { + Name: "name-1", + DevicePath: "original-path-1", + }, + }), + container.WithVolumeMounts([]corev1.VolumeMount{ + { + Name: "volume-mount-0", + ReadOnly: false, + MountPath: "original-mount-path", + SubPath: "original-sub-path", + MountPropagation: nil, + SubPathExpr: "original-sub-path-expr", + }, + { + Name: "volume-mount-1", + ReadOnly: false, + MountPath: "original-mount-path-1", + SubPath: "original-sub-path-1", + MountPropagation: nil, + SubPathExpr: "original-sub-path-expr-1", + }, + }), + container.WithResourceRequirements( + corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "limit": *defaultQuantity, + }, + }), + container.WithEnvs( + corev1.EnvVar{ + Name: "env0", + Value: "val1", + }, + corev1.EnvVar{ + Name: "env3", + Value: "val3", + }), + ) + + t.Run("Override Fields", func(t *testing.T) { + overrideQuantity := resource.NewQuantity(int64(15), resource.BinarySI) + + overrideContainer := container.New( + container.WithName("override-container"), + container.WithCommand([]string{"d", "f", "e"}), + container.WithImage("override-image"), + container.WithWorkDir("override-work-dir"), + container.WithArgs([]string{"arg3", "arg2"}), + container.WithLivenessProbe(probes.Apply( + probes.WithInitialDelaySeconds(15), + probes.WithExecCommand([]string{"exec", "command", "override"}), + )), + container.WithReadinessProbe(probes.Apply( + probes.WithInitialDelaySeconds(5), + probes.WithFailureThreshold(6), + probes.WithExecCommand([]string{"exec", "command", "readiness", "override"}), + )), + container.WithVolumeDevices([]corev1.VolumeDevice{ + { + Name: "name-0", + DevicePath: "override-path-0", + }, + { + Name: "name-2", + DevicePath: "override-path-2", + }, + }), + container.WithVolumeMounts([]corev1.VolumeMount{ + { + Name: "volume-mount-1", + ReadOnly: true, + MountPath: "override-mount-path-1", + SubPath: "override-sub-path-1", + MountPropagation: nil, + SubPathExpr: "override-sub-path-expr-1", + }, + }), + container.WithResourceRequirements( + corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "limits": *overrideQuantity, + }, + Requests: corev1.ResourceList{ + "requests": *overrideQuantity, + }, + }), + container.WithEnvs( + corev1.EnvVar{ + Name: "env0", + Value: "val2", + }, + corev1.EnvVar{ + Name: "env3", + ValueFrom: &corev1.EnvVarSource{}, + }, + ), + ) + mergedContainer := Container(defaultContainer, overrideContainer) + assert.Equal(t, overrideContainer.Name, mergedContainer.Name, "Name was overridden, and should be used.") + assert.Equal(t, []string{"d", "f", "e"}, mergedContainer.Command, "Command specified in the override container overrides the default container.") + assert.Equal(t, overrideContainer.Image, mergedContainer.Image, "Image was overridden, and should be used.") + assert.Equal(t, defaultContainer.ImagePullPolicy, mergedContainer.ImagePullPolicy, "No ImagePullPolicy was specified in the override, so the default should be used.") + assert.Equal(t, overrideContainer.WorkingDir, mergedContainer.WorkingDir) + assert.Equal(t, []string{"arg3", "arg2"}, mergedContainer.Args, "Args specified in the override container overrides the default container.") + + assert.Equal(t, overrideContainer.Resources, mergedContainer.Resources) + + t.Run("Env are overridden", func(t *testing.T) { + assert.Len(t, mergedContainer.Env, 2) + assert.Equal(t, "env0", mergedContainer.Env[0].Name) + assert.Equal(t, "val2", mergedContainer.Env[0].Value) + assert.Equal(t, "env3", mergedContainer.Env[1].Name) + assert.Equal(t, "", mergedContainer.Env[1].Value) + assert.NotNil(t, mergedContainer.Env[1].ValueFrom) + }) + + t.Run("Probes are overridden", func(t *testing.T) { + t.Run("Liveness probe", func(t *testing.T) { + livenessProbe := mergedContainer.LivenessProbe + + assert.NotNil(t, livenessProbe) + assert.Equal(t, int32(15), livenessProbe.InitialDelaySeconds, "value is specified in override and so should be used.") + assert.Equal(t, int32(20), livenessProbe.FailureThreshold, "value is not specified in override so the original should be used.") + assert.Equal(t, []string{"exec", "command", "override"}, livenessProbe.Exec.Command, "value is not specified in override so the original should be used.") + }) + t.Run("Readiness probe", func(t *testing.T) { + readinessProbe := mergedContainer.ReadinessProbe + assert.NotNil(t, readinessProbe) + assert.Equal(t, int32(5), readinessProbe.InitialDelaySeconds, "value is specified in override and so should be used.") + assert.Equal(t, int32(6), readinessProbe.FailureThreshold, "value is not specified in override so the original should be used.") + assert.Equal(t, []string{"exec", "command", "readiness", "override"}, readinessProbe.Exec.Command, "value is not specified in override so the original should be used.") + }) + }) + + t.Run("Volume Devices are overridden", func(t *testing.T) { + volumeDevices := mergedContainer.VolumeDevices + assert.Len(t, volumeDevices, 3) + t.Run("VolumeDevice0 was updated", func(t *testing.T) { + vd0 := volumeDevices[0] + assert.Equal(t, "name-0", vd0.Name) + assert.Equal(t, "override-path-0", vd0.DevicePath) + }) + t.Run("VolumeDevice1 remained unchanged", func(t *testing.T) { + vd1 := volumeDevices[1] + assert.Equal(t, "name-1", vd1.Name) + assert.Equal(t, "original-path-1", vd1.DevicePath) + }) + t.Run("VolumeDevice2 was updated", func(t *testing.T) { + vd2 := volumeDevices[2] + assert.Equal(t, "name-2", vd2.Name) + assert.Equal(t, "override-path-2", vd2.DevicePath) + }) + }) + + t.Run("Volume Mounts are overridden", func(t *testing.T) { + volumeMounts := mergedContainer.VolumeMounts + assert.Len(t, volumeMounts, 3, "volume mounts can have the same name, the uniqueness is the combination of name, path and subpath") + t.Run("First VolumeMount is still present", func(t *testing.T) { + vm0 := volumeMounts[0] + assert.Equal(t, "volume-mount-0", vm0.Name) + assert.False(t, vm0.ReadOnly) + assert.Equal(t, "original-mount-path", vm0.MountPath) + assert.Equal(t, "original-sub-path", vm0.SubPath) + assert.Equal(t, "original-sub-path-expr", vm0.SubPathExpr) + }) + t.Run("Second VolumeMount has merged values", func(t *testing.T) { + assert.Equal(t, volumeMounts[0], defaultContainer.VolumeMounts[0]) + assert.Equal(t, volumeMounts[1], defaultContainer.VolumeMounts[1]) + assert.Equal(t, volumeMounts[2], overrideContainer.VolumeMounts[0]) + }) + }) + }) + + t.Run("No Override Fields", func(t *testing.T) { + mergedContainer := Container(defaultContainer, corev1.Container{}) + assert.Equal(t, defaultContainer.Name, mergedContainer.Name, "Name was not overridden, and should not be used.") + assert.Equal(t, defaultContainer.Image, mergedContainer.Image, "Image was not overridden, and should not be used.") + assert.Equal(t, defaultContainer.ImagePullPolicy, mergedContainer.ImagePullPolicy, "No ImagePullPolicy was specified in the override, so the default should be used.") + assert.Equal(t, defaultContainer.WorkingDir, mergedContainer.WorkingDir) + + assert.Equal(t, defaultContainer.Resources, mergedContainer.Resources) + + t.Run("No Overriden Env", func(t *testing.T) { + assert.Len(t, mergedContainer.Env, 2) + assert.Equal(t, "env0", mergedContainer.Env[0].Name) + assert.Equal(t, "val1", mergedContainer.Env[0].Value) + assert.Equal(t, "env3", mergedContainer.Env[1].Name) + assert.Equal(t, "val3", mergedContainer.Env[1].Value) + assert.Nil(t, mergedContainer.Env[1].ValueFrom) + }) + + t.Run("Probes are not overridden", func(t *testing.T) { + t.Run("Liveness probe", func(t *testing.T) { + livenessProbe := mergedContainer.LivenessProbe + + assert.NotNil(t, livenessProbe) + assert.Equal(t, int32(10), livenessProbe.InitialDelaySeconds, "value is not specified in override so the original should be used.") + assert.Equal(t, int32(20), livenessProbe.FailureThreshold, "value is not specified in override so the original should be used.") + assert.Equal(t, []string{"exec", "command", "liveness"}, livenessProbe.Exec.Command, "value is not specified in override so the original should be used.") + }) + t.Run("Readiness probe", func(t *testing.T) { + readinessProbe := mergedContainer.ReadinessProbe + assert.NotNil(t, readinessProbe) + assert.Equal(t, int32(20), readinessProbe.InitialDelaySeconds, "value is not specified in override so the original should be used.") + assert.Equal(t, int32(30), readinessProbe.FailureThreshold, "value is not specified in override so the original should be used.") + assert.Equal(t, []string{"exec", "command", "readiness"}, readinessProbe.Exec.Command, "value is not specified in override so the original should be used.") + }) + }) + + t.Run("Volume Devices are not overridden", func(t *testing.T) { + volumeDevices := mergedContainer.VolumeDevices + assert.Len(t, volumeDevices, 2) + t.Run("VolumeDevice0 was updated", func(t *testing.T) { + vd0 := volumeDevices[0] + assert.Equal(t, "name-0", vd0.Name) + assert.Equal(t, "original-path-0", vd0.DevicePath) + }) + t.Run("VolumeDevice1 remained unchanged", func(t *testing.T) { + vd1 := volumeDevices[1] + assert.Equal(t, "name-1", vd1.Name) + assert.Equal(t, "original-path-1", vd1.DevicePath) + }) + }) + + t.Run("Volume Mounts are not overridden", func(t *testing.T) { + volumeMounts := mergedContainer.VolumeMounts + assert.Len(t, volumeMounts, 2) + t.Run("First VolumeMount is still present and unchanged", func(t *testing.T) { + vm0 := volumeMounts[0] + assert.Equal(t, "volume-mount-0", vm0.Name) + assert.False(t, vm0.ReadOnly) + assert.Equal(t, "original-mount-path", vm0.MountPath) + assert.Equal(t, "original-sub-path", vm0.SubPath) + assert.Equal(t, "original-sub-path-expr", vm0.SubPathExpr) + }) + t.Run("Second VolumeMount is still present and unchanged", func(t *testing.T) { + vm1 := volumeMounts[1] + assert.Equal(t, "volume-mount-1", vm1.Name) + assert.False(t, vm1.ReadOnly) + assert.Equal(t, "original-mount-path-1", vm1.MountPath) + assert.Equal(t, "original-sub-path-1", vm1.SubPath) + assert.Equal(t, "original-sub-path-expr-1", vm1.SubPathExpr) + }) + }) + + }) +} + +func TestMergeContainerPort(t *testing.T) { + original := corev1.ContainerPort{ + Name: "original-port", + HostPort: 10, + ContainerPort: 10, + Protocol: corev1.ProtocolTCP, + HostIP: "4.3.2.1", + } + + t.Run("Override Fields", func(t *testing.T) { + override := corev1.ContainerPort{ + Name: "override-port", + HostPort: 1, + ContainerPort: 5, + Protocol: corev1.ProtocolUDP, + HostIP: "1.2.3.4", + } + mergedPort := ContainerPorts(original, override) + + assert.Equal(t, override.Name, mergedPort.Name) + assert.Equal(t, override.HostPort, mergedPort.HostPort) + assert.Equal(t, override.ContainerPort, mergedPort.ContainerPort) + assert.Equal(t, override.HostIP, mergedPort.HostIP) + assert.Equal(t, override.ContainerPort, mergedPort.ContainerPort) + + }) + + t.Run("No Override Fields", func(t *testing.T) { + mergedPort := ContainerPorts(original, corev1.ContainerPort{}) + assert.Equal(t, original.Name, mergedPort.Name) + assert.Equal(t, original.HostPort, mergedPort.HostPort) + assert.Equal(t, original.ContainerPort, mergedPort.ContainerPort) + assert.Equal(t, original.HostIP, mergedPort.HostIP) + assert.Equal(t, original.ContainerPort, mergedPort.ContainerPort) + }) +} + +func TestMergeVolumeMount(t *testing.T) { + hostToContainer := corev1.MountPropagationHostToContainer + hostToContainerRef := &hostToContainer + original := corev1.VolumeMount{ + Name: "override-name", + ReadOnly: true, + MountPath: "override-mount-path", + SubPath: "override-sub-path", + MountPropagation: hostToContainerRef, + SubPathExpr: "override-sub-path-expr", + } + + t.Run("With Override", func(t *testing.T) { + bidirectional := corev1.MountPropagationBidirectional + bidirectionalRef := &bidirectional + override := corev1.VolumeMount{ + Name: "override-name", + ReadOnly: true, + MountPath: "override-mount-path", + SubPath: "override-sub-path", + MountPropagation: bidirectionalRef, + SubPathExpr: "override-sub-path-expr", + } + mergedVolumeMount := VolumeMount(original, override) + + assert.Equal(t, override.Name, mergedVolumeMount.Name) + assert.Equal(t, override.ReadOnly, mergedVolumeMount.ReadOnly) + assert.Equal(t, override.MountPath, mergedVolumeMount.MountPath) + assert.Equal(t, override.MountPropagation, mergedVolumeMount.MountPropagation) + assert.Equal(t, override.SubPathExpr, mergedVolumeMount.SubPathExpr) + }) + + t.Run("No Override", func(t *testing.T) { + mergedVolumeMount := VolumeMount(original, corev1.VolumeMount{}) + + assert.Equal(t, original.Name, mergedVolumeMount.Name) + assert.Equal(t, original.ReadOnly, mergedVolumeMount.ReadOnly) + assert.Equal(t, original.MountPath, mergedVolumeMount.MountPath) + assert.Equal(t, original.MountPropagation, mergedVolumeMount.MountPropagation) + assert.Equal(t, original.SubPathExpr, mergedVolumeMount.SubPathExpr) + }) +} + +func TestContainerPortSlicesByName(t *testing.T) { + + original := []corev1.ContainerPort{ + { + Name: "original-port-0", + HostPort: 10, + ContainerPort: 10, + Protocol: corev1.ProtocolTCP, + HostIP: "1.2.3.4", + }, + { + Name: "original-port-1", + HostPort: 20, + ContainerPort: 20, + Protocol: corev1.ProtocolTCP, + HostIP: "1.2.3.5", + }, + { + Name: "original-port-2", + HostPort: 30, + ContainerPort: 30, + Protocol: corev1.ProtocolTCP, + HostIP: "1.2.3.6", + }, + } + + override := []corev1.ContainerPort{ + { + Name: "original-port-0", + HostPort: 50, + ContainerPort: 50, + Protocol: corev1.ProtocolTCP, + HostIP: "1.2.3.10", + }, + { + Name: "original-port-1", + HostPort: 60, + ContainerPort: 60, + Protocol: corev1.ProtocolTCP, + HostIP: "1.2.3.50", + }, + { + Name: "original-port-3", + HostPort: 40, + ContainerPort: 40, + Protocol: corev1.ProtocolTCP, + HostIP: "1.2.3.6", + }, + } + + merged := ContainerPortSlicesByName(original, override) + + assert.Len(t, merged, 4, "There are 4 distinct names between the two slices.") + + t.Run("Test Port 0", func(t *testing.T) { + assert.Equal(t, "original-port-0", merged[0].Name, "The name should remain unchanged") + assert.Equal(t, int32(50), merged[0].HostPort, "The HostPort should have been overridden") + assert.Equal(t, int32(50), merged[0].ContainerPort, "The ContainerPort should have been overridden") + assert.Equal(t, "1.2.3.10", merged[0].HostIP, "The HostIP should have been overridden") + assert.Equal(t, corev1.ProtocolTCP, merged[0].Protocol, "The Protocol should remain unchanged") + }) + t.Run("Test Port 1", func(t *testing.T) { + assert.Equal(t, "original-port-1", merged[1].Name, "The name should remain unchanged") + assert.Equal(t, int32(60), merged[1].HostPort, "The HostPort should have been overridden") + assert.Equal(t, int32(60), merged[1].ContainerPort, "The ContainerPort should have been overridden") + assert.Equal(t, "1.2.3.50", merged[1].HostIP, "The HostIP should have been overridden") + assert.Equal(t, corev1.ProtocolTCP, merged[1].Protocol, "The Protocol should remain unchanged") + }) + t.Run("Test Port 2", func(t *testing.T) { + assert.Equal(t, "original-port-2", merged[2].Name, "The name should remain unchanged") + assert.Equal(t, int32(30), merged[2].HostPort, "The HostPort should remain unchanged") + assert.Equal(t, int32(30), merged[2].ContainerPort, "The ContainerPort should remain unchanged") + assert.Equal(t, "1.2.3.6", merged[2].HostIP, "The HostIP should remain unchanged") + assert.Equal(t, corev1.ProtocolTCP, merged[2].Protocol, "The Protocol should remain unchanged") + }) + t.Run("Test Port 3", func(t *testing.T) { + assert.Equal(t, "original-port-3", merged[3].Name, "The name should remain unchanged") + assert.Equal(t, int32(40), merged[3].HostPort, "The HostPort should have been overridden") + assert.Equal(t, int32(40), merged[3].ContainerPort, "The ContainerPort should have been overridden") + assert.Equal(t, "1.2.3.6", merged[3].HostIP, "The HostIP should have been overridden") + assert.Equal(t, corev1.ProtocolTCP, merged[3].Protocol, "The Protocol should remain unchanged") + }) + +} + +func TestMergeSecurityContext(t *testing.T) { + privileged := true + windowsRunAsUserName := "username" + runAsGroup := int64(4) + original := &corev1.SecurityContext{ + Capabilities: nil, + Privileged: &privileged, + WindowsOptions: &corev1.WindowsSecurityContextOptions{ + RunAsUserName: &windowsRunAsUserName, + }, + RunAsGroup: &runAsGroup, + } + + runAsGroup = int64(6) + override := &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{ + Add: []corev1.Capability{ + "123", + "456", + }, + }, + Privileged: &privileged, + WindowsOptions: &corev1.WindowsSecurityContextOptions{ + RunAsUserName: &windowsRunAsUserName, + }, + RunAsGroup: &runAsGroup, + } + + merged := SecurityContext(original, override) + + assert.Equal(t, int64(6), *merged.RunAsGroup) + assert.Equal(t, "username", *merged.WindowsOptions.RunAsUserName) + assert.Equal(t, override.Capabilities, merged.Capabilities) + assert.True(t, *override.Privileged) +} + +func TestMergeVolumesSecret(t *testing.T) { + permission := int32(416) + vol0 := []corev1.Volume{{Name: "volume", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "Secret-name"}}}} + vol1 := []corev1.Volume{{Name: "volume", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{DefaultMode: &permission}}}} + mergedVolumes := Volumes(vol0, vol1) + assert.Len(t, mergedVolumes, 1) + volume := mergedVolumes[0] + assert.Equal(t, "volume", volume.Name) + assert.Equal(t, corev1.SecretVolumeSource{SecretName: "Secret-name", DefaultMode: &permission}, *volume.Secret) +} + +func TestMergeNonNilValueNotFilledByOperator(t *testing.T) { + // Tests that providing a custom volume with a volume source + // That the operator does not manage overwrites the original + vol0 := []corev1.Volume{{Name: "volume", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "Secret-name"}}}} + vol1 := []corev1.Volume{{Name: "volume", VolumeSource: corev1.VolumeSource{GCEPersistentDisk: &corev1.GCEPersistentDiskVolumeSource{}}}} + mergedVolumes := Volumes(vol0, vol1) + assert.Len(t, mergedVolumes, 1) + volume := mergedVolumes[0] + assert.Equal(t, "volume", volume.Name) + assert.Equal(t, corev1.GCEPersistentDiskVolumeSource{}, *volume.GCEPersistentDisk) + assert.Nil(t, volume.Secret) +} + +func TestMergeNonNilValueFilledByOperatorButDifferent(t *testing.T) { + // Tests that providing a custom volume with a volume source + // That the operator does manage, but different from the one + // That already exists, overwrites the original + vol0 := []corev1.Volume{{Name: "volume", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "Secret-name"}}}} + vol1 := []corev1.Volume{{Name: "volume", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}} + mergedVolumes := Volumes(vol0, vol1) + assert.Len(t, mergedVolumes, 1) + volume := mergedVolumes[0] + assert.Equal(t, "volume", volume.Name) + assert.Equal(t, corev1.EmptyDirVolumeSource{}, *volume.EmptyDir) + assert.Nil(t, volume.Secret) +} + +func TestMergeVolumeAddVolume(t *testing.T) { + vol0 := []corev1.Volume{{Name: "volume0", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{}}}} + vol1 := []corev1.Volume{{Name: "volume1", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}} + mergedVolumes := Volumes(vol0, vol1) + assert.Len(t, mergedVolumes, 2) + volume0 := mergedVolumes[0] + assert.Equal(t, "volume0", volume0.Name) + assert.Equal(t, corev1.SecretVolumeSource{}, *volume0.Secret) + volume1 := mergedVolumes[1] + assert.Equal(t, "volume1", volume1.Name) + assert.Equal(t, corev1.EmptyDirVolumeSource{}, *volume1.EmptyDir) +} + +func TestMergeHostAliases(t *testing.T) { + ha0 := []corev1.HostAlias{ + { + IP: "1.2.3.4", + Hostnames: []string{ + "abc", "def", + }, + }, + { + IP: "1.2.3.5", + Hostnames: []string{ + "abc", + }, + }, + } + + ha1 := []corev1.HostAlias{ + { + IP: "1.2.3.4", + Hostnames: []string{ + "abc", "def", "ghi", + }, + }, + } + + merged := HostAliases(ha0, ha1) + + assert.Len(t, merged, 2) + assert.Equal(t, "1.2.3.4", merged[0].IP) + assert.Equal(t, []string{"abc", "def", "ghi"}, merged[0].Hostnames) + assert.Equal(t, "1.2.3.5", merged[1].IP) + assert.Equal(t, []string{"abc"}, merged[1].Hostnames) +} diff --git a/pkg/util/result/reconciliationresults.go b/pkg/util/result/reconciliationresults.go new file mode 100644 index 000000000..51f9a3268 --- /dev/null +++ b/pkg/util/result/reconciliationresults.go @@ -0,0 +1,41 @@ +package result + +import ( + "time" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// StateComplete returns the result required for the State Machine +// to execute the next State in the next reconciliation. +func StateComplete() (reconcile.Result, error, bool) { + return retry(0, true) +} + +// RetryState returns the result required for the State Machine to +// execute this state in the next reconciliation. +func RetryState(after int) (reconcile.Result, error, bool) { + return retry(after, false) +} + +// FailedState returns the result required for the State to retry +// the current State. +func FailedState() (reconcile.Result, error, bool) { + return RetryState(1) +} + +func retry(after int, isComplete bool) (reconcile.Result, error, bool) { + return reconcile.Result{Requeue: true, RequeueAfter: time.Second * time.Duration(after)}, nil, isComplete +} + +func OK() (reconcile.Result, error) { + return reconcile.Result{}, nil +} + +func Retry(after int) (reconcile.Result, error) { + return reconcile.Result{Requeue: true, RequeueAfter: time.Second * time.Duration(after)}, nil +} + +func Failed() (reconcile.Result, error) { + return Retry(0) +} diff --git a/pkg/util/scale/scale.go b/pkg/util/scale/scale.go new file mode 100644 index 000000000..afdae1b81 --- /dev/null +++ b/pkg/util/scale/scale.go @@ -0,0 +1,60 @@ +package scale + +// ReplicaSetScaler is an interface which is able to scale up and down a replicaset +// a single member at a time +type ReplicaSetScaler interface { + DesiredReplicas() int + CurrentReplicas() int + ForcedIndividualScaling() bool +} + +// ReplicasThisReconciliation returns the number of replicas that should be configured +// for that reconciliation. As of MongoDB 4.4 we can only scale members up / down 1 at a time. +func ReplicasThisReconciliation(replicaSetScaler ReplicaSetScaler) int { + // the current replica set members will be 0 when we are creating a new deployment + // if this is the case, we want to jump straight to the desired members and not make changes incrementally + + if replicaSetScaler.CurrentReplicas() == replicaSetScaler.DesiredReplicas() { + return replicaSetScaler.DesiredReplicas() + } + + if !replicaSetScaler.ForcedIndividualScaling() { + // Short-circuit to scale up all at once + if replicaSetScaler.CurrentReplicas() == 0 { + return replicaSetScaler.DesiredReplicas() + } + } + + if IsScalingDown(replicaSetScaler) { + return replicaSetScaler.CurrentReplicas() - 1 + } + + return replicaSetScaler.CurrentReplicas() + 1 + +} + +func IsStillScaling(replicaSetScaler ReplicaSetScaler) bool { + return ReplicasThisReconciliation(replicaSetScaler) != replicaSetScaler.DesiredReplicas() +} + +func IsScalingDown(replicaSetScaler ReplicaSetScaler) bool { + return replicaSetScaler.DesiredReplicas() < replicaSetScaler.CurrentReplicas() +} + +func IsScalingUp(replicaSetScaler ReplicaSetScaler) bool { + return replicaSetScaler.DesiredReplicas() > replicaSetScaler.CurrentReplicas() && replicaSetScaler.CurrentReplicas() != 0 +} + +func HasZeroReplicas(replicaSetScaler ReplicaSetScaler) bool { + return replicaSetScaler.CurrentReplicas() == 0 +} + +// AnyAreStillScaling reports true if any of one the provided members is still scaling +func AnyAreStillScaling(scalers ...ReplicaSetScaler) bool { + for _, s := range scalers { + if IsStillScaling(s) { + return true + } + } + return false +} diff --git a/pkg/util/state/statemachine.go b/pkg/util/state/statemachine.go new file mode 100644 index 000000000..d506e280e --- /dev/null +++ b/pkg/util/state/statemachine.go @@ -0,0 +1,186 @@ +package state + +import ( + "fmt" + + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// State should provide a unique name, and a Reconcile function. +// This function gets called by the Machine. The first two returned values +// are returned to the caller, while the 3rd value is used to indicate if the +// State completed successfully. A value of true will move onto the next State, +// a value of false will repeat this State until true is returned. +type State struct { + // Name should be a unique identifier of the State + Name string + + // Reconcile should perform the actual reconciliation of the State. + // The reconcile.Result and error should be returned from the controller. + // the boolean value indicates that the State has been successfully completed. + Reconcile func() (reconcile.Result, error, bool) + + // OnEnter executes before the Reconcile function is called. + OnEnter func() error +} + +// transition represents a transition between two states. +type transition struct { + from, to State + predicate TransitionPredicate +} + +// Saver saves the next state name that should be reconciled. +// If a transition is A -> B, after A finishes reconciling `SaveNextState("B")` will be called. +type Saver interface { + SaveNextState(nsName types.NamespacedName, stateName string) error +} + +// Loader should return the value saved by Saver. +type Loader interface { + LoadNextState(nsName types.NamespacedName) (string, error) +} + +// SaveLoader can both load and save the name of a state. +type SaveLoader interface { + Saver + Loader +} + +// TransitionPredicate is used to indicate if two States should be connected. +type TransitionPredicate func() bool + +var FromBool = func(b bool) TransitionPredicate { + return func() bool { + return b + } +} + +// directTransition can be used to ensure two states are directly linked. +var directTransition = FromBool(true) + +// Machine allows for several States to be registered via "AddTransition" +// When calling Reconcile, the corresponding State will be used based on the values +// stored/loaded from the SaveLoader. A Machine corresponds to a single Kubernetes resource. +type Machine struct { + allTransitions map[string][]transition + currentState *State + logger *zap.SugaredLogger + saveLoader SaveLoader + states map[string]State + nsName types.NamespacedName +} + +// NewStateMachine returns a Machine, it must be set up with calls to "AddTransition(s1, s2, predicate)" +// before Reconcile is called. +func NewStateMachine(saver SaveLoader, nsName types.NamespacedName, logger *zap.SugaredLogger) *Machine { + return &Machine{ + allTransitions: map[string][]transition{}, + logger: logger, + saveLoader: saver, + states: map[string]State{}, + nsName: nsName, + } +} + +// Reconcile will reconcile the currently active State. This method should be called +// from the controllers. +func (m *Machine) Reconcile() (reconcile.Result, error) { + + if err := m.determineState(); err != nil { + m.logger.Errorf("error initializing starting state: %s", err) + return reconcile.Result{}, err + } + + m.logger.Infof("Reconciling state: [%s]", m.currentState.Name) + + if m.currentState.OnEnter != nil { + if err := m.currentState.OnEnter(); err != nil { + m.logger.Debugf("Error reconciling state [%s]: %s", m.currentState.Name, err) + return reconcile.Result{}, err + } + } + + res, err, isComplete := m.currentState.Reconcile() + + if err != nil { + m.logger.Debugf("Error reconciling state [%s]: %s", m.currentState.Name, err) + return res, err + } + + if isComplete { + m.logger.Debugf("Completed state: [%s]", m.currentState.Name) + + transition := m.getTransitionForState(*m.currentState) + nextState := "" + if transition != nil { + nextState = transition.to.Name + } + + if nextState != "" { + m.logger.Debugf("preparing transition [%s] -> [%s]", m.currentState.Name, nextState) + } + + if err := m.saveLoader.SaveNextState(m.nsName, nextState); err != nil { + m.logger.Debugf("Error marking state: [%s] as complete: %s", m.currentState.Name, err) + return reconcile.Result{}, err + } + return res, err + } + + m.logger.Debugf("State [%s] is not yet complete", m.currentState.Name) + + return res, err +} + +// determineState ensures that "currentState" has a valid value. +// the state that is loaded comes from the Loader. +func (m *Machine) determineState() error { + currentStateName, err := m.saveLoader.LoadNextState(m.nsName) + if err != nil { + return fmt.Errorf("could not load starting state: %s", err) + } + nextState, ok := m.states[currentStateName] + if !ok { + return fmt.Errorf("could not determine state %s as it was not added to the State Machine", currentStateName) + } + m.currentState = &nextState + return nil +} + +// AddDirectTransition creates a transition between the two +// provided states which will always be valid. +func (m *Machine) AddDirectTransition(from, to State) { + m.AddTransition(from, to, directTransition) +} + +// AddTransition creates a transition between the two states if the given +// predicate returns true. +func (m *Machine) AddTransition(from, to State, predicate TransitionPredicate) { + _, ok := m.allTransitions[from.Name] + if !ok { + m.allTransitions[from.Name] = []transition{} + } + m.allTransitions[from.Name] = append(m.allTransitions[from.Name], transition{ + from: from, + to: to, + predicate: predicate, + }) + + m.states[from.Name] = from + m.states[to.Name] = to +} + +// getTransitionForState returns the first transition it finds that is available +// from the current state. +func (m *Machine) getTransitionForState(s State) *transition { + transitions := m.allTransitions[s.Name] + for _, t := range transitions { + if t.predicate() { + return &t + } + } + return nil +} diff --git a/pkg/util/state/statemachine_test.go b/pkg/util/state/statemachine_test.go new file mode 100644 index 000000000..650749711 --- /dev/null +++ b/pkg/util/state/statemachine_test.go @@ -0,0 +1,349 @@ +package state + +import ( + "errors" + "os" + "testing" + "time" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func init() { + logger, err := zap.NewDevelopment() + if err != nil { + os.Exit(1) + } + zap.ReplaceGlobals(logger) +} + +// inMemorySaveLoader stores and loads states to member fields +// and maintains a history of all the fields saved. +type inMemorySaveLoader struct { + stateHistory []string + nextState string + startingState string +} + +func (s *inMemorySaveLoader) SaveNextState(_ types.NamespacedName, stateName string) error { + if stateName == "" { + return nil + } + s.stateHistory = append(s.stateHistory, stateName) + s.nextState = stateName + return nil +} + +func (s *inMemorySaveLoader) LoadNextState(_ types.NamespacedName) (string, error) { + return s.nextState, nil +} + +func newInMemorySaveLoader(startingState string) *inMemorySaveLoader { + s := &inMemorySaveLoader{} + s.startingState = startingState + _ = s.SaveNextState(types.NamespacedName{}, startingState) + return s +} + +func TestOrderOfStatesIsCorrect(t *testing.T) { + in := newInMemorySaveLoader("State0") + s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + + state0 := newAlwaysCompletingState("State0") + state1 := newAlwaysCompletingState("State1") + state2 := newAlwaysCompletingState("State2") + + s.AddDirectTransition(state0, state1) + s.AddDirectTransition(state1, state2) + + _, _ = s.Reconcile() + _, _ = s.Reconcile() + _, _ = s.Reconcile() + + assert.Equal(t, []string{"State0", "State1", "State2"}, in.stateHistory) +} + +func TestOrderOfStatesIsCorrectIfAddedInDifferentOrder(t *testing.T) { + in := newInMemorySaveLoader("State0") + s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + + state0 := newAlwaysCompletingState("State0") + state1 := newAlwaysCompletingState("State1") + state2 := newAlwaysCompletingState("State2") + + s.AddDirectTransition(state1, state2) + s.AddDirectTransition(state0, state1) + + _, _ = s.Reconcile() + assert.Equal(t, "State1", in.nextState) + + _, _ = s.Reconcile() + assert.Equal(t, "State2", in.nextState) + + _, _ = s.Reconcile() + + assert.Equal(t, []string{"State0", "State1", "State2"}, in.stateHistory) +} + +func TestPredicateReturningFalse_PreventsStateTransition(t *testing.T) { + in := newInMemorySaveLoader("State0") + s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + + state0 := newAlwaysCompletingState("State0") + state1 := newAlwaysCompletingState("State1") + state2 := newAlwaysCompletingState("State2") + state3 := newAlwaysCompletingState("State3") + + s.AddDirectTransition(state0, state1) + + // there is no transition from state1 to state2 + s.AddTransition(state1, state2, func() bool { + return false + }) + s.AddDirectTransition(state1, state3) + + _, _ = s.Reconcile() + _, _ = s.Reconcile() + _, _ = s.Reconcile() + + assert.Equal(t, []string{"State0", "State1", "State3"}, in.stateHistory) +} + +func TestAddTransition(t *testing.T) { + in := newInMemorySaveLoader("State0") + s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + + state0 := newAlwaysCompletingState("State0") + state1 := newAlwaysCompletingState("State1") + + s.AddDirectTransition(state0, state1) + + t.Run("Adds both states to internal map", func(t *testing.T) { + assert.Contains(t, s.states, "State0") + assert.Contains(t, s.states, "State1") + }) + + t.Run("Creates transition for first state", func(t *testing.T) { + assert.Len(t, s.allTransitions["State0"], 1) + assert.Equal(t, s.allTransitions["State0"][0].from.Name, "State0") + assert.Equal(t, s.allTransitions["State0"][0].to.Name, "State1") + }) + + t.Run("Does not create transition for second state", func(t *testing.T) { + assert.Len(t, s.allTransitions["State1"], 0) + }) +} + +func TestIfStateFails_ItIsRunAgain(t *testing.T) { + fails := newAlwaysFailsState("FailsState") + succeeds := newAlwaysCompletingState("SucceedsState") + + in := newInMemorySaveLoader(fails.Name) + s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + + s.AddDirectTransition(fails, succeeds) + + t.Run("Any number of runs will not change the next state to be run", func(t *testing.T) { + _, _ = s.Reconcile() + assert.Equal(t, fails.Name, in.nextState) + + _, _ = s.Reconcile() + assert.Equal(t, fails.Name, in.nextState) + + _, _ = s.Reconcile() + assert.Equal(t, fails.Name, in.nextState) + }) + + t.Run("When the state passes, the next one will run", func(t *testing.T) { + + // the state will now succeed + s.states["FailsState"] = newAlwaysCompletingState(fails.Name) + + _, _ = s.Reconcile() + assert.Equal(t, succeeds.Name, in.nextState) + }) +} + +func TestStateReconcileValue_IsReturnedFromStateMachine(t *testing.T) { + t.Run("When State is Completed", func(t *testing.T) { + s0 := State{ + Name: "State0", + Reconcile: func() (reconcile.Result, error, bool) { + return reconcile.Result{RequeueAfter: time.Duration(15000)}, errors.New("error"), true + }, + } + + s1 := newAlwaysCompletingState("State1") + + in := newInMemorySaveLoader(s0.Name) + s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + + s.AddDirectTransition(s0, s1) + + res, err := s.Reconcile() + assert.False(t, res.Requeue) + assert.Equal(t, time.Duration(15000), res.RequeueAfter) + assert.Equal(t, errors.New("error"), err) + }) + + t.Run("When State is not Completed", func(t *testing.T) { + s0 := State{ + Name: "State0", + Reconcile: func() (reconcile.Result, error, bool) { + return reconcile.Result{Requeue: true, RequeueAfter: time.Duration(5000)}, errors.New("error"), false + }, + } + + s1 := newAlwaysCompletingState("State1") + + in := newInMemorySaveLoader(s0.Name) + s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + + s.AddDirectTransition(s0, s1) + + res, err := s.Reconcile() + assert.True(t, res.Requeue) + assert.Equal(t, time.Duration(5000), res.RequeueAfter) + assert.Equal(t, errors.New("error"), err) + }) +} + +func TestCycleInStateMachine(t *testing.T) { + s0 := newAlwaysCompletingState("State0") + s1 := newAlwaysCompletingState("State1") + s2 := newAlwaysCompletingState("State2") + s3 := newAlwaysCompletingState("State3") + s4 := newAlwaysCompletingState("State4") + + in := newInMemorySaveLoader("State0") + s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + + flag := true + s.AddDirectTransition(s0, s1) + s.AddDirectTransition(s1, s2) + s.AddDirectTransition(s2, s3) + + // create a one time cycle back to s1 + s.AddTransition(s3, s1, func() bool { + res := flag + flag = !flag + return res + }) + + s.AddDirectTransition(s3, s4) + + _, _ = s.Reconcile() + _, _ = s.Reconcile() + _, _ = s.Reconcile() + _, _ = s.Reconcile() + _, _ = s.Reconcile() + _, _ = s.Reconcile() + _, _ = s.Reconcile() + _, _ = s.Reconcile() + + assert.Equal(t, []string{"State0", "State1", "State2", "State3", "State1", "State2", "State3", "State4"}, in.stateHistory) +} + +func TestBranchingPath(t *testing.T) { + root := newAlwaysCompletingState("Root") + left0 := newAlwaysCompletingState("Left0") + left1 := newAlwaysCompletingState("Left1") + left2 := newAlwaysCompletingState("Left2") + + right0 := newAlwaysCompletingState("Right0") + right1 := newAlwaysCompletingState("Right1") + right2 := newAlwaysCompletingState("Right2") + + in := newInMemorySaveLoader(root.Name) + s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + + goLeft := true + + s.AddTransition(root, left0, func() bool { + return goLeft + }) + s.AddDirectTransition(left0, left1) + s.AddDirectTransition(left1, left2) + + s.AddTransition(root, right0, func() bool { + return !goLeft + }) + + s.AddDirectTransition(right0, right1) + s.AddDirectTransition(right1, right2) + + t.Run("Left Path", func(t *testing.T) { + + _, _ = s.Reconcile() + _, _ = s.Reconcile() + _, _ = s.Reconcile() + _, _ = s.Reconcile() + + assert.Equal(t, []string{"Root", "Left0", "Left1", "Left2"}, in.stateHistory) + }) + + t.Run("Right Path", func(t *testing.T) { + goLeft = false + // reset save loader state + in.stateHistory = nil + _ = in.SaveNextState(types.NamespacedName{}, root.Name) + + _, _ = s.Reconcile() + _, _ = s.Reconcile() + _, _ = s.Reconcile() + _, _ = s.Reconcile() + + assert.Equal(t, []string{"Root", "Right0", "Right1", "Right2"}, in.stateHistory) + }) +} + +func TestDetermineStartingState_ReadsFromLoader(t *testing.T) { + t.Run("State Can be determined once added", func(t *testing.T) { + s0 := newAlwaysCompletingState("State0") + s1 := newAlwaysCompletingState("State1") + + in := newInMemorySaveLoader(s0.Name) + s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + + // State must be added before it can be returned in determine state + s.AddDirectTransition(s0, s1) + + assert.Nil(t, s.currentState) + err := s.determineState() + assert.NoError(t, err) + assert.Equal(t, "State0", s.currentState.Name) + }) + + t.Run("State cannot be determined if not added", func(t *testing.T) { + s0 := newAlwaysCompletingState("State0") + + in := newInMemorySaveLoader(s0.Name) + s := NewStateMachine(in, types.NamespacedName{}, zap.S()) + + assert.Nil(t, s.currentState) + err := s.determineState() + assert.Error(t, err) + assert.Nil(t, s.currentState) + }) + +} + +// newAlwaysCompletingState returns a State that will always succeed. +func newAlwaysCompletingState(name string) State { + return State{ + Name: name, + Reconcile: result.StateComplete, + } +} + +// newAlwaysFailsState returns a State that will always fail. +func newAlwaysFailsState(name string) State { + return State{ + Name: name, + Reconcile: result.FailedState, + } +} diff --git a/pkg/util/status/status.go b/pkg/util/status/status.go new file mode 100644 index 000000000..21aebfc62 --- /dev/null +++ b/pkg/util/status/status.go @@ -0,0 +1,52 @@ +package status + +import ( + "context" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type Option interface { + ApplyOption(mdb *mdbv1.MongoDBCommunity) + GetResult() (reconcile.Result, error) +} + +type OptionBuilder interface { + GetOptions() []Option +} + +// Update takes the options provided by the given option builder, applies them all and then updates the resource +func Update(ctx context.Context, statusWriter client.StatusWriter, mdb *mdbv1.MongoDBCommunity, optionBuilder OptionBuilder) (reconcile.Result, error) { + options := optionBuilder.GetOptions() + for _, opt := range options { + opt.ApplyOption(mdb) + } + + if err := statusWriter.Update(ctx, mdb); err != nil { + return reconcile.Result{}, err + } + + return determineReconciliationResult(options) +} + +func determineReconciliationResult(options []Option) (reconcile.Result, error) { + // if there are any errors in any of our options, we return those first + for _, opt := range options { + res, err := opt.GetResult() + if err != nil { + return res, err + } + } + // otherwise we might need to re-queue + for _, opt := range options { + res, _ := opt.GetResult() + if res.Requeue || res.RequeueAfter > 0 { + return res, nil + } + } + // it was a successful reconciliation, nothing to do + return reconcile.Result{}, nil +} diff --git a/pkg/util/status/status_test.go b/pkg/util/status/status_test.go new file mode 100644 index 000000000..c6e5f810d --- /dev/null +++ b/pkg/util/status/status_test.go @@ -0,0 +1,88 @@ +package status + +import ( + "fmt" + "testing" + "time" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + + "github.com/stretchr/testify/assert" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type errorOption struct{} + +func (e errorOption) ApplyOption(_ *mdbv1.MongoDBCommunity) {} + +func (e errorOption) GetResult() (reconcile.Result, error) { + return reconcile.Result{}, fmt.Errorf("error") +} + +type successOption struct{} + +func (s successOption) ApplyOption(_ *mdbv1.MongoDBCommunity) {} + +func (s successOption) GetResult() (reconcile.Result, error) { + return reconcile.Result{}, nil +} + +type retryOption struct{} + +func (r retryOption) ApplyOption(_ *mdbv1.MongoDBCommunity) {} + +func (r retryOption) GetResult() (reconcile.Result, error) { + return reconcile.Result{Requeue: true}, nil +} + +func TestDetermineReconciliationResult(t *testing.T) { + + t.Run("A single error option should result in an error return", func(t *testing.T) { + opts := []Option{ + errorOption{}, + successOption{}, + successOption{}, + } + + res, err := determineReconciliationResult(opts) + assert.NotNil(t, err) + assert.Equal(t, false, res.Requeue) + assert.Equal(t, time.Duration(0), res.RequeueAfter) + }) + + t.Run("An error option takes precedence over a retry", func(t *testing.T) { + opts := []Option{ + errorOption{}, + retryOption{}, + successOption{}, + } + res, err := determineReconciliationResult(opts) + assert.NotNil(t, err) + assert.Equal(t, false, res.Requeue) + assert.Equal(t, time.Duration(0), res.RequeueAfter) + }) + + t.Run("No errors will result in a successful reconciliation", func(t *testing.T) { + opts := []Option{ + successOption{}, + successOption{}, + successOption{}, + } + res, err := determineReconciliationResult(opts) + assert.Nil(t, err) + assert.Equal(t, false, res.Requeue) + assert.Equal(t, time.Duration(0), res.RequeueAfter) + }) + + t.Run("A retry will take precedence over success", func(t *testing.T) { + opts := []Option{ + successOption{}, + successOption{}, + retryOption{}, + } + res, err := determineReconciliationResult(opts) + assert.Nil(t, err) + assert.Equal(t, true, res.Requeue) + }) + +} diff --git a/pkg/util/versions/versions.go b/pkg/util/versions/versions.go new file mode 100644 index 000000000..40be0ae0c --- /dev/null +++ b/pkg/util/versions/versions.go @@ -0,0 +1,21 @@ +package versions + +import ( + "fmt" + + "github.com/blang/semver" +) + +// CalculateFeatureCompatibilityVersion returns a version in the format of "x.y" +func CalculateFeatureCompatibilityVersion(versionStr string) string { + v1, err := semver.Make(versionStr) + if err != nil { + return "" + } + + if v1.GTE(semver.MustParse("3.4.0")) { + return fmt.Sprintf("%d.%d", v1.Major, v1.Minor) + } + + return "" +} diff --git a/pkg/util/versions/versions_test.go b/pkg/util/versions/versions_test.go new file mode 100644 index 000000000..139b89387 --- /dev/null +++ b/pkg/util/versions/versions_test.go @@ -0,0 +1,34 @@ +package versions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCalculateFCV(t *testing.T) { + t.Run("Version > 3.4 works as expected", func(t *testing.T) { + assert.Equal(t, "4.2", CalculateFeatureCompatibilityVersion("4.2.0")) + assert.Equal(t, "4.2", CalculateFeatureCompatibilityVersion("4.2.5")) + assert.Equal(t, "4.2", CalculateFeatureCompatibilityVersion("4.2.8")) + assert.Equal(t, "4.4", CalculateFeatureCompatibilityVersion("4.4.0")) + assert.Equal(t, "4.4", CalculateFeatureCompatibilityVersion("4.4.1")) + assert.Equal(t, "4.0", CalculateFeatureCompatibilityVersion("4.0.8")) + assert.Equal(t, "4.0", CalculateFeatureCompatibilityVersion("4.0.5")) + assert.Equal(t, "4.0", CalculateFeatureCompatibilityVersion("4.0.12")) + }) + + t.Run("Version == 3.4 works as expected", func(t *testing.T) { + assert.Equal(t, "3.4", CalculateFeatureCompatibilityVersion("3.4.12")) + assert.Equal(t, "3.4", CalculateFeatureCompatibilityVersion("3.4.10")) + assert.Equal(t, "3.4", CalculateFeatureCompatibilityVersion("3.4.5")) + assert.Equal(t, "3.4", CalculateFeatureCompatibilityVersion("3.4.0")) + }) + + t.Run("Version < 3.4 returns empty string", func(t *testing.T) { + assert.Equal(t, "", CalculateFeatureCompatibilityVersion("3.2.1")) + assert.Equal(t, "", CalculateFeatureCompatibilityVersion("3.3.20")) + assert.Equal(t, "", CalculateFeatureCompatibilityVersion("2.0.12")) + assert.Equal(t, "", CalculateFeatureCompatibilityVersion("1.4.5")) + }) +} diff --git a/release.json b/release.json new file mode 100644 index 000000000..078b90861 --- /dev/null +++ b/release.json @@ -0,0 +1,8 @@ +{ + "golang-builder-image": "golang:1.24", + "operator": "0.13.0", + "version-upgrade-hook": "1.0.10", + "readiness-probe": "1.0.23", + "agent": "108.0.6.8796-1", + "agent-tools-version": "100.11.0" +} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..3247df769 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,20 @@ +git+https://github.com/mongodb/sonar@bc7bf7732851425421f3cfe2a19cf50b0460e633 +github-action-templates==0.0.4 +docker==7.1.0 +kubernetes==26.1.0 +jinja2==3.1.4 +MarkupSafe==2.0.1 +PyYAML==6.0.1 +black==24.3.0 +mypy==0.961 +tqdm==v4.66.3 +boto3==1.16.21 +pymongo==4.6.3 +dnspython==2.6.1 +requests==2.32.3 +ruamel.yaml==0.17.9 +semver==2.13.0 +rsa>=4.7 # not directly required, pinned by Snyk to avoid a vulnerability +setuptools==78.1.1 # not directly required, pinned by Snyk to avoid a vulnerability +certifi>=2022.12.7 # not directly required, pinned by Snyk to avoid a vulnerability +urllib3<2 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/scripts/ci/base_logger.py b/scripts/ci/base_logger.py new file mode 100644 index 000000000..571c10aa0 --- /dev/null +++ b/scripts/ci/base_logger.py @@ -0,0 +1,21 @@ +import logging +import os +import sys + +LOGLEVEL = os.environ.get("LOGLEVEL", "DEBUG").upper() +logger = logging.getLogger("pipeline") +logger.setLevel(LOGLEVEL) +logger.propagate = False + +# Output Debug and Info logs to stdout, and above to stderr +stdout_handler = logging.StreamHandler(sys.stdout) +stdout_handler.setLevel(logging.DEBUG) +stdout_handler.addFilter(lambda record: record.levelno <= logging.INFO) +stderr_handler = logging.StreamHandler(sys.stderr) +stderr_handler.setLevel(logging.WARNING) + +formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") +stdout_handler.setFormatter(formatter) +stderr_handler.setFormatter(formatter) +logger.addHandler(stdout_handler) +logger.addHandler(stderr_handler) diff --git a/scripts/ci/build_and_push_image.sh b/scripts/ci/build_and_push_image.sh deleted file mode 100755 index b18a1a9d7..000000000 --- a/scripts/ci/build_and_push_image.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/sh - -echo ${quay_password} | docker login -u=${quay_user_name} quay.io --password-stdin - -python scripts/dev/dockerfile_generator.py ${image_type} > Dockerfile -docker build . -f Dockerfile -t ${image} -docker push ${image} diff --git a/scripts/ci/config.json b/scripts/ci/config.json new file mode 100644 index 000000000..0260f015f --- /dev/null +++ b/scripts/ci/config.json @@ -0,0 +1,16 @@ +{ + "namespace": "default", + "repo_url": "quay.io/mongodb", + "mongodb_image_repo_url": "quay.io/mongodb", + "mongodb_image_name": "mongodb-community-server", + "operator_image": "mongodb-kubernetes-operator", + "operator_image_dev": "community-operator-dev", + "e2e_image": "community-operator-e2e", + "version_upgrade_hook_image": "mongodb-kubernetes-operator-version-upgrade-post-start-hook", + "version_upgrade_hook_image_dev": "mongodb-kubernetes-operator-version-upgrade-post-start-hook-dev", + "agent_image": "mongodb-agent-ubi", + "agent_image_dev": "mongodb-agent-ubi-dev", + "readiness_probe_image": "mongodb-kubernetes-readinessprobe", + "readiness_probe_image_dev": "mongodb-kubernetes-readinessprobe-dev", + "s3_bucket": "s3://enterprise-operator-dockerfiles/dockerfiles" +} diff --git a/scripts/ci/create_kind_cluster.sh b/scripts/ci/create_kind_cluster.sh index d06e159d2..12c726407 100755 --- a/scripts/ci/create_kind_cluster.sh +++ b/scripts/ci/create_kind_cluster.sh @@ -1,3 +1,6 @@ #!/bin/sh -kind create cluster --kubeconfig ${KUBECONFIG} +kind create cluster --kubeconfig "${KUBECONFIG}" + +echo "Creating CRDs" +kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml diff --git a/scripts/ci/determine_required_releases.py b/scripts/ci/determine_required_releases.py new file mode 100755 index 000000000..f77b9df11 --- /dev/null +++ b/scripts/ci/determine_required_releases.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 + +# This script accepts a key from the "release.json" file. +# If the corresponding image of the specified version has been released, + +import json +import sys +from typing import List, Dict + +import requests + +# contains a map of the quay urls to fetch data about the corresponding images. +QUAY_URL_MAP: Dict[str, List[str]] = { + "agent": [ + "https://quay.io/api/v1/repository/mongodb/mongodb-agent-ubi", + "https://quay.io/api/v1/repository/mongodb/mongodb-agent", + ], + "readiness-probe": [ + "https://quay.io/api/v1/repository/mongodb/mongodb-kubernetes-readinessprobe", + ], + "version-upgrade-hook": [ + "https://quay.io/api/v1/repository/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook" + ], + "operator": [ + "https://quay.io/api/v1/repository/mongodb/mongodb-kubernetes-operator" + ], +} + + +def _get_all_released_tags(url: str) -> List[str]: + resp = requests.get(url).json() + tags = resp["tags"] + return list(tags.keys()) + + +def _load_image_name_to_version_map() -> Dict[str, str]: + """ + _load_image_name_to_version_map returns a mapping of each image name + to the corresponding version. + + e.g. + { + "mongodb-kubernetes-operator" : "0.7.2", + "mongodb-agent" : "11.0.11.7036-1" + ... + } + """ + with open("release.json") as f: + release = json.loads(f.read()) + + return release + + +def _all_urls_are_released(urls: List[str], version: str) -> bool: + """ + _all_urls_are_released returns True if the given version exists + as a tag in all urls provided. + """ + for url in urls: + tags = _get_all_released_tags(url) + if version not in tags: + return False + return True + + +def main() -> int: + if len(sys.argv) != 2: + raise ValueError("usage: determine_required_releases.py [image-name]") + + image_name = sys.argv[1] + name_to_version_map = _load_image_name_to_version_map() + + if image_name not in name_to_version_map: + raise ValueError( + "Unknown image type [{}], valid values are [{}]".format( + image_name, ",".join(name_to_version_map.keys()) + ) + ) + + if image_name not in QUAY_URL_MAP: + raise ValueError("No associated image urls for key [{}]".format(image_name)) + + if _all_urls_are_released( + QUAY_URL_MAP[image_name], name_to_version_map[image_name] + ): + print("released") + else: + print("unreleased") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/ci/download.go b/scripts/ci/download.go deleted file mode 100644 index 6117ad178..000000000 --- a/scripts/ci/download.go +++ /dev/null @@ -1,72 +0,0 @@ -package main - -import ( - "fmt" - "io" - "net/http" - "os" - "path" -) - -// download.go uses the following environment variables: -// URL: The url of the file to download -// DIR: The directory which the newly downloaded file will be placed -// FILENAME: The name the file should have after being downloaded - -func main() { - if err := downloadFile(mustMakeOptions()); err != nil { - panic(fmt.Errorf("error downloading file: %s", err)) - } -} - -type downloadOptions struct { - url, fileName, dir string - perms os.FileMode -} - -func mustMakeOptions() downloadOptions { - return downloadOptions{ - url: os.Getenv("URL"), - fileName: os.Getenv("FILENAME"), - perms: os.FileMode(755), - dir: os.Getenv("DIR"), - } -} - -func downloadFile(opts downloadOptions) error { - fmt.Printf("Using download options: %+v\n", opts) - fullPath := path.Join(opts.dir, opts.fileName) - fmt.Printf("full path to directory: %s\n", fullPath) - if err := os.MkdirAll(opts.dir, opts.perms); err != nil { - return fmt.Errorf("error making directory %s with permissions %d: %s", opts.dir, opts.perms, err) - } - if err := fetchFile(fullPath, opts.url); err != nil { - return fmt.Errorf("error fetching file: %s", err) - } - fmt.Printf("successfully downloaded file from %s to %s\n", opts.url, fullPath) - if err := os.Chmod(fullPath, opts.perms); err != nil { - return fmt.Errorf("error changing file permissions: %s", err) - } - return nil -} - -func fetchFile(filePath, url string) error { - resp, err := http.Get(url) - if err != nil { - return fmt.Errorf("error getting url: %s", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("bad status: %s", resp.Status) - } - - out, err := os.Create(filePath) - if err != nil { - return fmt.Errorf("error creating file: %s", err) - } - defer out.Close() - - _, err = io.Copy(out, resp.Body) - return err -} diff --git a/scripts/ci/dump_diagnostics.sh b/scripts/ci/dump_diagnostics.sh new file mode 100755 index 000000000..c25374250 --- /dev/null +++ b/scripts/ci/dump_diagnostics.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +mkdir -p diagnostics/secrets + +namespace="$1" + +echo "Dumping CRD" +kubectl get crd mongodbcommunity.mongodbcommunity.mongodb.com -o yaml > diagnostics/crd.yaml + +echo "Dumping Pod list" +kubectl get pods > diagnostics/pod-list.txt + +echo "Dumping Event list" +kubectl get events --sort-by='.lastTimestamp' -owide > diagnostics/events-list.txt + +echo "Dumping yaml Event list" +kubectl kubectl get events --sort-by='.lastTimestamp' -oyaml > diagnostics/events-list.yaml + +# dump operator deployment information. +for deployment_name in $(kubectl get deployment -n "${namespace}" --output=jsonpath={.items..metadata.name}); do + echo "Writing Deployment describe for deployment ${deployment_name}" + kubectl describe deploy "${deployment_name}" > "diagnostics/${deployment_name}.txt" + + echo "Writing Deployment yaml for deployment ${deployment_name}" + kubectl get deploy "${deployment_name}" -o yaml > "diagnostics/${deployment_name}.yaml" +done + +# dump logs for every container in every pod in the given namespace +for pod_name in $(kubectl get pod -n "${namespace}" --output=jsonpath={.items..metadata.name}); do + echo "Writing Pod describe for pod ${pod_name}" + kubectl describe pod "${pod_name}" > "diagnostics/${pod_name}.txt" + + echo "Writing Pod yaml for pod ${pod_name}" + kubectl get pod "${pod_name}" -o yaml > "diagnostics/${pod_name}.yaml" + + # dump agent output + kubectl cp "${pod_name}":/var/log/mongodb-mms-automation -c mongodb-agent diagnostics/"${pod_name}-mongodb-automation"/ + + for container_name in $(kubectl get pods -n "${namespace}" "${pod_name}" -o jsonpath='{.spec.containers[*].name}'); do + echo "Writing log file for pod ${pod_name} - container ${container_name} to diagnostics/${pod_name}-${container_name}.log" + kubectl logs -n "${namespace}" "${pod_name}" -c "${container_name}" > "diagnostics/${pod_name}-${container_name}.log"; + done +done + +# dump information about MongoDBCommunity resources and statefulsets. +for mdbc_name in $(kubectl get mongodbcommunity -n "${namespace}" --output=jsonpath={.items..metadata.name}); do + echo "Writing MongoDBCommunity describe" + kubectl describe mongodbcommunity "${mdbc_name}" -n "${namespace}" > "diagnostics/${mdbc_name}-mongodbcommunity.txt" + echo "Writing yaml output for MongoDBCommunity ${mdbc_name}" + kubectl get mongodbcommunity "${mdbc_name}" -n "${namespace}" -o yaml > "diagnostics/${mdbc_name}-mongodbcommunity.yaml" + echo "Writing describe output for StatefulSet ${mdbc_name}" + kubectl describe sts "${mdbc_name}" -n "${namespace}" > "diagnostics/${mdbc_name}-statefulset.txt" + echo "Writing yaml output for StatefulSet ${mdbc_name}" + kubectl get sts "${mdbc_name}" -n "${namespace}" -o yaml > "diagnostics/${mdbc_name}-statefulset.yaml" + + echo "Writing Automation Config Secret" + kubectl get secret "${mdbc_name}-config" -o jsonpath='{ .data.cluster-config\.json}' | base64 -d | jq > "diagnostics/secrets/${mdbc_name}-config.json" +done + +# dump information about relevant secrets. +# Skip service account tokens, and also skip the Automation Config as this is handled as a special case above. +for secret in $(kubectl get secret -n "${namespace}" --output=jsonpath={.items..metadata.name}); do + if ! echo "${secret}" | grep -qE "token|-config"; then + echo "Writing secret ${secret}" + kubectl get secret "${secret}" -o json | jq -r '.data | with_entries(.value |= @base64d)' > "diagnostics/secrets/${secret}.json" + else + echo "Skipping secret ${secret}" + fi +done diff --git a/scripts/ci/images_signing.py b/scripts/ci/images_signing.py new file mode 100644 index 000000000..e2fb4a94e --- /dev/null +++ b/scripts/ci/images_signing.py @@ -0,0 +1,208 @@ +import os +import subprocess +import sys +from typing import List, Optional + +import requests + +from scripts.ci.base_logger import logger + +SIGNING_IMAGE_URI = os.environ.get( + "SIGNING_IMAGE_URI", + "artifactory.corp.mongodb.com/release-tools-container-registry-local/garasign-cosign", +) + + +def mongodb_artifactory_login() -> None: + command = [ + "docker", + "login", + "--password-stdin", + "--username", + os.environ["ARTIFACTORY_USERNAME"], + "artifactory.corp.mongodb.com/release-tools-container-registry-local/garasign-cosign", + ] + try: + subprocess.run( + command, + input=os.environ["ARTIFACTORY_PASSWORD"].encode("utf-8"), + check=True, + ) + except subprocess.CalledProcessError as e: + logger.error(f"Authentication to MongoDB Artifactory failed : {e.returncode}") + logger.error(f"Output: {e.stderr}") + + +def get_ecr_login_password(region: str) -> Optional[str]: + """ + Retrieves the login password from aws CLI, the secrets need to be stored in ~/.aws/credentials or equivalent. + :param region: Registry's AWS region + :return: The password as a string + """ + try: + result = subprocess.run( + ["aws", "ecr", "get-login-password", "--region", region], + capture_output=True, + text=True, + check=True, + ) + return result.stdout.strip() + except subprocess.CalledProcessError as e: + logger.error(f"Failed to get ECR login password: {e.stderr}") + return None + + +def is_ecr_registry(image_name: str) -> bool: + return "amazonaws.com" in image_name + + +def get_image_digest(image_name: str) -> Optional[str]: + """ + Retrieves the digest of an image from its tag. Uses the skopeo container to be able to retrieve manifests tags as well. + :param image_name: The full image name with its tag. + :return: the image digest, or None in case of failure. + """ + + transport_protocol = "docker://" + # Get digest + digest_command = [ + "docker", + "run", + "--rm", + f"--volume={os.path.expanduser('~')}/.aws:/root/.aws:ro", + "quay.io/skopeo/stable:latest", + "inspect", + "--format={{.Digest}}", + ] + + # Specify ECR credentials if necessary + if is_ecr_registry(image_name): + aws_region = os.environ.get("AWS_DEFAULT_REGION", "eu-west-1") + ecr_password = get_ecr_login_password(aws_region) + digest_command.append(f"--creds=AWS:{ecr_password}") + + digest_command.append(f"{transport_protocol}{image_name}") + + try: + result = subprocess.run( + digest_command, capture_output=True, text=True, check=True + ) + digest = result.stdout.strip() + return digest + except subprocess.CalledProcessError as e: + logger.error(f"Failed to get digest for {image_name}: {e.stderr}") + sys.exit(1) + + +def build_cosign_docker_command( + additional_args: List[str], cosign_command: List[str] +) -> List[str]: + """ + Common logic to build a cosign command with the garasign cosign image provided by DevProd. + :param additional_args: additional arguments passed to the docker container, e.g mounted volume or env + :param cosign_command: actual command executed with cosign such as `sign` or `verify` + :return: the full command as a List of strings + """ + home_dir = os.path.expanduser("~") + base_command = [ + "docker", + "run", + "--platform", + "linux/amd64", + "--rm", + f"--volume={home_dir}/.docker/config.json:/root/.docker/config.json:ro", + ] + return ( + base_command + additional_args + [SIGNING_IMAGE_URI, "cosign"] + cosign_command + ) + + +def sign_image(repository: str, tag: str) -> None: + image = repository + ":" + tag + logger.debug(f"Signing image {image}") + + working_directory = os.getcwd() + container_working_directory = "/usr/local/kubernetes" + + # Referring to the image via its tag is deprecated in cosign + # We fetch the digest from the registry + digest = get_image_digest(image) + if digest is None: + logger.error("Impossible to get image digest, exiting...") + sys.exit(1) + image_ref = f"{repository}@{digest}" + + # Read secrets from environment and put them in env file for container + grs_username = os.environ["GRS_USERNAME"] + grs_password = os.environ["GRS_PASSWORD"] + pkcs11_uri = os.environ["PKCS11_URI"] + env_file_lines = [ + f"GRS_CONFIG_USER1_USERNAME={grs_username}", + f"GRS_CONFIG_USER1_PASSWORD={grs_password}", + f"COSIGN_REPOSITORY={repository}", + ] + env_file_content = "\n".join(env_file_lines) + temp_file = "./env-file" + with open(temp_file, "w") as f: + f.write(env_file_content) + + additional_args = [ + f"--env-file={temp_file}", + f"--volume={working_directory}:{container_working_directory}", + f"--workdir={container_working_directory}", + ] + cosign_command = [ + "sign", + f"--key={pkcs11_uri}", + f"--sign-container-identity={image}", + f"--tlog-upload=false", + image_ref, + ] + command = build_cosign_docker_command(additional_args, cosign_command) + + try: + subprocess.run(command, check=True) + except subprocess.CalledProcessError as e: + # Fail the pipeline if signing fails + logger.error(f"Failed to sign image {image}: {e.stderr}") + raise + logger.debug("Signing successful") + + +def verify_signature(repository: str, tag: str) -> bool: + image = repository + ":" + tag + logger.debug(f"Verifying signature of {image}") + public_key_url = os.environ.get( + "SIGNING_PUBLIC_KEY_URL", + "https://cosign.mongodb.com/mongodb-enterprise-kubernetes-operator.pem", + ) + r = requests.get(public_key_url) + # Ensure the request was successful + if r.status_code == 200: + # Access the content of the file + kubernetes_operator_public_key = r.text + else: + logger.error(f"Failed to retrieve the public key: Status code {r.status_code}") + return False + + public_key_var_name = "OPERATOR_PUBLIC_KEY" + additional_args = [ + "--env", + f"{public_key_var_name}={kubernetes_operator_public_key}", + ] + cosign_command = [ + "verify", + "--insecure-ignore-tlog", + f"--key=env://{public_key_var_name}", + image, + ] + command = build_cosign_docker_command(additional_args, cosign_command) + + try: + subprocess.run(command, capture_output=True, text=True, check=True) + except subprocess.CalledProcessError as e: + # Fail the pipeline if verification fails + logger.error(f"Failed to verify signature for image {image}: {e.stderr}") + raise + logger.debug("Successful verification") + return True diff --git a/scripts/ci/run_test.sh b/scripts/ci/run_test.sh deleted file mode 100755 index f1c9ee26b..000000000 --- a/scripts/ci/run_test.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash - - -# This is a temporary fix to correct the KUBECONFIG file that gets mounted. -# When the KUBECONFIG file is created by kind, it points to localhost. When inside -# the cluster, we need to set this value to be .spec.clusterIP instead -# See: https://github.com/operator-framework/operator-sdk/issues/2618 -KUBERNETES_SERVICE_HOST="$(kubectl get service kubernetes -o jsonpath='{.spec.clusterIP }')" -temp=$(mktemp) -cat ${KUBECONFIG} | sed "s/server: https.*/server: https:\/\/${KUBERNETES_SERVICE_HOST}/g" > ${temp} -contents="$(cat ${temp})" -kubectl create cm kube-config --from-literal=kubeconfig="${contents}" -rm ${temp} - -# create roles and service account required for the test runner -kubectl apply -f deploy/testrunner - -# start the test runner pod -kubectl run test-runner --generator=run-pod/v1 \ - --restart=Never \ - --image-pull-policy=Always \ - --image=quay.io/mongodb/community-operator-testrunner:${version_id} \ - --serviceaccount=test-runner \ - --command -- ./runner --operatorImage quay.io/mongodb/community-operator-dev:${version_id} --testImage quay.io/mongodb/community-operator-e2e:${version_id} --test=${test} - - -echo "Test pod is ready to begin" -kubectl wait --for=condition=Ready pod -l run=test-runner --timeout=600s - -# The test will have fully finished when tailing logs finishes -kubectl logs -f -l run=test-runner - -result="$(kubectl get pod -l run=test-runner -o jsonpath='{ .items[0].status.phase }')" -if [[ ${result} != "Succeeded" ]]; then - exit 1 -fi diff --git a/scripts/ci/run_unit_tests.sh b/scripts/ci/run_unit_tests.sh deleted file mode 100755 index c2998647f..000000000 --- a/scripts/ci/run_unit_tests.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -python scripts/dev/dockerfile_generator.py "unittest" > Dockerfile -docker build . -f Dockerfile -t unit-tests:${version_id} -docker run unit-tests:${version_id} diff --git a/scripts/ci/update_release.py b/scripts/ci/update_release.py new file mode 100755 index 000000000..96c76746f --- /dev/null +++ b/scripts/ci/update_release.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 + +import json +import sys +from typing import Dict, Callable + +import ruamel.yaml + +yaml = ruamel.yaml.YAML() + + +RELATIVE_PATH_TO_MANAGER_YAML = "config/manager/manager.yaml" +RELATIVE_PATH_TO_OPENSHIFT_MANAGER_YAML = "deploy/openshift/operator_openshift.yaml" + +RELATIVE_PATH_TO_CHART_VALUES = "helm-charts/charts/community-operator/values.yaml" +RELATIVE_PATH_TO_CHART = "helm-charts/charts/community-operator/Chart.yaml" +RELATIVE_PATH_TO_CRD_CHART = "helm-charts/charts/community-operator-crds/Chart.yaml" + + +def _load_yaml_file(path: str) -> Dict: + with open(path, "r") as f: + return yaml.load(f.read()) + + +def _dump_yaml(operator: Dict, path: str) -> None: + with open(path, "w+") as f: + yaml.dump(operator, f) + + +def update_and_write_file(path: str, update_function: Callable) -> None: + release = _load_release() + yaml_file = _load_yaml_file(path) + update_function(yaml_file, release) + _dump_yaml(yaml_file, path) + + +def _load_release() -> Dict: + with open("release.json", "r") as f: + return json.loads(f.read()) + + +def _replace_tag(image: str, new_tag: str) -> str: + split_image = image.split(":") + return split_image[0] + ":" + new_tag + + +def update_operator_deployment(operator_deployment: Dict, release: Dict) -> None: + operator_container = operator_deployment["spec"]["template"]["spec"]["containers"][ + 0 + ] + operator_container["image"] = _replace_tag( + operator_container["image"], release["operator"] + ) + operator_envs = operator_container["env"] + for env in operator_envs: + if env["name"] == "VERSION_UPGRADE_HOOK_IMAGE": + env["value"] = _replace_tag(env["value"], release["version-upgrade-hook"]) + if env["name"] == "READINESS_PROBE_IMAGE": + env["value"] = _replace_tag(env["value"], release["readiness-probe"]) + if env["name"] == "AGENT_IMAGE": + env["value"] = _replace_tag(env["value"], release["agent"]) + + +def update_chart_values(values: Dict, release: Dict) -> None: + values["agent"]["version"] = release["agent"] + values["versionUpgradeHook"]["version"] = release["version-upgrade-hook"] + values["readinessProbe"]["version"] = release["readiness-probe"] + values["operator"]["version"] = release["operator"] + + +def update_chart(chart: Dict, release: Dict) -> None: + chart["version"] = release["operator"] + chart["appVersion"] = release["operator"] + + for dependency in chart.get("dependencies", []): + if dependency["name"] == "community-operator-crds": + dependency["version"] = release["operator"] + + +def main() -> int: + # Updating local files + update_and_write_file(RELATIVE_PATH_TO_MANAGER_YAML, update_operator_deployment) + update_and_write_file( + RELATIVE_PATH_TO_OPENSHIFT_MANAGER_YAML, update_operator_deployment + ) + + # Updating Helm Chart files + update_and_write_file(RELATIVE_PATH_TO_CHART_VALUES, update_chart_values) + update_and_write_file(RELATIVE_PATH_TO_CHART, update_chart) + update_and_write_file(RELATIVE_PATH_TO_CRD_CHART, update_chart) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/dev/build_and_deploy_operator.py b/scripts/dev/build_and_deploy_operator.py deleted file mode 100644 index afeed5f7b..000000000 --- a/scripts/dev/build_and_deploy_operator.py +++ /dev/null @@ -1,140 +0,0 @@ -import io -import os -import time -from typing import Dict, Optional - -import yaml -from kubernetes import client, config -from kubernetes.client.rest import ApiException - -from dev_config import DevConfig, load_config -from dockerfile_generator import render -from dockerutil import build_and_push_image - - -def _load_operator_service_account() -> Optional[Dict]: - return load_yaml_from_file("deploy/service_account.yaml") - - -def _load_operator_role() -> Optional[Dict]: - return load_yaml_from_file("deploy/role.yaml") - - -def _load_operator_role_binding() -> Optional[Dict]: - return load_yaml_from_file("deploy/role_binding.yaml") - - -def _load_operator_deployment() -> Optional[Dict]: - return load_yaml_from_file("deploy/operator.yaml") - - -def _load_mongodb_crd() -> Optional[Dict]: - return load_yaml_from_file("deploy/crds/mongodb.com_mongodb_crd.yaml") - - -def load_yaml_from_file(path: str) -> Optional[Dict]: - with open(path, "r") as f: - return yaml.full_load(f.read()) - return None - - -def _ensure_crds(): - """ - ensure_crds makes sure that all the required CRDs have been created - """ - crdv1 = client.ApiextensionsV1beta1Api() - crd = _load_mongodb_crd() - - ignore_if_doesnt_exist( - lambda: crdv1.delete_custom_resource_definition("mongodbs.mongodb.com") - ) - - # TODO: fix this, when calling create_custom_resource_definition, we get the error - # ValueError("Invalid value for `conditions`, must not be `None`") - # but the crd is still successfully created - try: - crdv1.create_custom_resource_definition(body=crd) - except ValueError as e: - pass - - print("Ensured CRDs") - - -def build_and_push_operator(repo_url: str, tag: str, path: str): - """ - build_and_push_operator creates the Dockerfile for the operator - and pushes it to the target repo - """ - return build_and_push_image(repo_url, tag, path, "operator") - - -def _ignore_error_codes(fn, codes): - try: - fn() - except ApiException as e: - if e.status not in codes: - raise - - -def ignore_if_already_exists(fn): - """ - ignore_if_already_exists accepts a function and calls it, - ignoring an Kubernetes API conflict errors - """ - - return _ignore_error_codes(fn, [409]) - - -def ignore_if_doesnt_exist(fn): - """ - ignore_if_doesnt_exist accepts a function and calls it, - ignoring an Kubernetes API not found errors - """ - return _ignore_error_codes(fn, [404]) - - -def deploy_operator(): - """ - deploy_operator ensures the CRDs are created, and als creates all the required ServiceAccounts, Roles - and RoleBindings for the operator, and then creates the operator deployment. - """ - appsv1 = client.AppsV1Api() - corev1 = client.CoreV1Api() - rbacv1 = client.RbacAuthorizationV1Api() - - dev_config = load_config() - _ensure_crds() - - ignore_if_already_exists( - lambda: rbacv1.create_namespaced_role( - dev_config.namespace, _load_operator_role() - ) - ) - ignore_if_already_exists( - lambda: rbacv1.create_namespaced_role_binding( - dev_config.namespace, _load_operator_role_binding() - ) - ) - ignore_if_already_exists( - lambda: corev1.create_namespaced_service_account( - dev_config.namespace, _load_operator_service_account() - ) - ) - ignore_if_already_exists( - lambda: appsv1.create_namespaced_deployment( - dev_config.namespace, _load_operator_deployment() - ) - ) - - -def main(): - config.load_kube_config() - dev_config = load_config() - build_and_push_operator( - dev_config.repo_url, f"{dev_config.repo_url}/mongodb-kubernetes-operator", "." - ) - deploy_operator() - - -if __name__ == "__main__": - main() diff --git a/scripts/dev/dev_config.py b/scripts/dev/dev_config.py index c1fa476a5..93476b203 100644 --- a/scripts/dev/dev_config.py +++ b/scripts/dev/dev_config.py @@ -1,4 +1,6 @@ -from typing import Dict, Optional +from __future__ import annotations +from typing import Dict, Optional, List +from enum import Enum import json import os @@ -6,28 +8,157 @@ FULL_CONFIG_PATH = os.path.expanduser(CONFIG_PATH) +class Distro(Enum): + UBUNTU = 0 + UBI = 1 + + @staticmethod + def from_string(distro_name: str) -> Distro: + distro_name = distro_name.lower() + return { + "ubuntu": Distro.UBUNTU, + "ubi": Distro.UBI, + }[distro_name] + + +def get_config_path() -> str: + return os.getenv("MONGODB_COMMUNITY_CONFIG", FULL_CONFIG_PATH) + + class DevConfig: """ DevConfig is a wrapper around the developer configuration file """ - def __init__(self, config): + def __init__(self, config: Dict, distro: Distro): self._config = config + self._distro = distro + self.include_tags: List[str] = [] + self.skip_tags: List[str] = [] + self.gh_run_id = "" + + def ensure_tag_is_run(self, tag: str) -> None: + if tag not in self.include_tags: + self.include_tags.append(tag) + if tag in self.skip_tags: + self.skip_tags.remove(tag) @property - def namespace(self): + def namespace(self) -> str: return self._config["namespace"] @property - def repo_url(self): + def repo_url(self) -> str: return self._config["repo_url"] + @property + def s3_bucket(self) -> str: + return self._config["s3_bucket"] + + @property + def expire_after(self) -> str: + return self._config.get("expire_after", "never") + + @property + def operator_image(self) -> str: + return self._config["operator_image"] + + @property + def operator_image_dev(self) -> str: + return self._get_dev_image("operator_image_dev", "operator_image") + + @property + def e2e_image(self) -> str: + return self._config["e2e_image"] + + @property + def version_upgrade_hook_image(self) -> str: + return self._config["version_upgrade_hook_image"] + + @property + def version_upgrade_hook_image_dev(self) -> str: + return self._get_dev_image( + "version_upgrade_hook_image_dev", "version_upgrade_hook_image" + ) + + @property + def readiness_probe_image(self) -> str: + return self._config["readiness_probe_image"] + + # these directories are used from within the E2E tests when running locally. + @property + def role_dir(self) -> str: + if "role_dir" in self._config: + return self._config["role_dir"] + return os.path.join(os.getcwd(), "config", "rbac") + + @property + def deploy_dir(self) -> str: + if "deploy_dir" in self._config: + return self._config["deploy_dir"] + return os.path.join(os.getcwd(), "config", "manager") + + @property + def test_data_dir(self) -> str: + if "test_data_dir" in self._config: + return self._config["test_data_dir"] + return os.path.join(os.getcwd(), "testdata") + + @property + def readiness_probe_image_dev(self) -> str: + return self._get_dev_image("readiness_probe_image_dev", "readiness_probe_image") + + @property + def mongodb_image_name(self) -> str: + return self._config.get("mongodb_image_name", "mongodb-community-server") + + @property + def mongodb_image_repo_url(self) -> str: + return self._config.get("mongodb_image_repo_url", "quay.io/mongodb") + + @property + def agent_image(self) -> str: + return self._config["agent_image"] + + @property + def local_operator(self) -> str: + return self._config["mdb_local_operator"] + + @property + def kube_config(self) -> str: + return self._config["kubeconfig"] + + @property + def agent_image_dev(self) -> str: + return self._get_dev_image("agent_image_dev", "agent_image") + + @property + def image_type(self) -> str: + if self._distro == Distro.UBI: + return "ubi8" + return "ubuntu-2004" + + def ensure_skip_tag(self, tag: str) -> None: + if tag not in self.skip_tags: + self.skip_tags.append(tag) + + def _get_dev_image(self, dev_image: str, image: str) -> str: + if dev_image in self._config: + return self._config[dev_image] + return self._config[image] + -def load_config() -> Optional[DevConfig]: - with open(FULL_CONFIG_PATH, "r") as f: - return DevConfig(json.loads(f.read())) +def load_config( + config_file_path: Optional[str] = None, distro: Distro = Distro.UBI +) -> DevConfig: + if config_file_path is None: + config_file_path = get_config_path() - print( - f"No DevConfig found. Please ensure that the configuration file exists at '{FULL_CONFIG_PATH}'" - ) - return None + try: + with open(config_file_path, "r") as f: + return DevConfig(json.loads(f.read()), distro=distro) + except FileNotFoundError: + print( + f"No DevConfig found. Please ensure that the configuration file exists at '{config_file_path}'" + ) + raise diff --git a/scripts/dev/dockerfile_generator.py b/scripts/dev/dockerfile_generator.py deleted file mode 100755 index f981c17d7..000000000 --- a/scripts/dev/dockerfile_generator.py +++ /dev/null @@ -1,67 +0,0 @@ -import jinja2 -import argparse - - -def operator_params(): - return { - "builder": True, - "builder_image": "golang", - "base_image": "registry.access.redhat.com/ubi8/ubi-minimal:latest", - } - - -def test_runner_params(): - return { - "builder": True, - "builder_image": "golang", # TODO: make this image smaller. There were errors using alpine - "base_image": "registry.access.redhat.com/ubi8/ubi-minimal:latest", - } - - -def e2e_params(): - return { - "base_image": "golang", # TODO: make this image smaller, error: 'exec: "gcc": executable file not found in $PATH' with golang:alpine - } - - -def unit_test_params(): - return { - "base_image": "golang", - } - - -def render(image_name): - param_dict = { - "unittest": unit_test_params(), - "e2e": e2e_params(), - "testrunner": test_runner_params(), - "operator": operator_params(), - } - - if image_name not in param_dict: - raise ValueError( - "Image name: {} is invalid. Valid values are {}".format( - image_name, param_dict.keys() - ) - ) - - env = jinja2.Environment() - env.loader = jinja2.FileSystemLoader(searchpath="scripts/dev/templates") - return env.get_template("Dockerfile.{}".format(image_name)).render( - param_dict[image_name] - ) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("image", help="Type of image for the Dockerfile") - return parser.parse_args() - - -def main(): - args = parse_args() - print(render(args.image)) - - -if __name__ == "__main__": - main() diff --git a/scripts/dev/dockerutil.py b/scripts/dev/dockerutil.py deleted file mode 100644 index 69bb174ac..000000000 --- a/scripts/dev/dockerutil.py +++ /dev/null @@ -1,38 +0,0 @@ -import docker -from dockerfile_generator import render -import os - - -def build_image(repo_url: str, tag: str, path): - """ - build_image builds the image with the given tag - """ - client = docker.from_env() - print(f"Building image: {tag}") - client.images.build(tag=tag, path=path) - print("Successfully built image!") - - -def push_image(tag: str): - """ - push_image pushes the given tag. It uses - the current docker environment - """ - client = docker.from_env() - print(f"Pushing image: {tag}") - for line in client.images.push(tag, stream=True): - print(line.decode("utf-8").rstrip()) - - -def build_and_push_image(repo_url: str, tag: str, path: str, image_type: str): - """ - build_and_push_operator creates the Dockerfile for the operator - and pushes it to the target repo - """ - dockerfile_text = render(image_type) - with open(f"{path}/Dockerfile", "w") as f: - f.write(dockerfile_text) - - build_image(repo_url, tag, path) - os.remove(f"{path}/Dockerfile") - push_image(tag) diff --git a/scripts/dev/e2e.py b/scripts/dev/e2e.py index 3abd00dc0..0a8c03df2 100644 --- a/scripts/dev/e2e.py +++ b/scripts/dev/e2e.py @@ -1,181 +1,295 @@ -from build_and_deploy_operator import ( - ignore_if_doesnt_exist, - ignore_if_already_exists, - load_yaml_from_file, -) # TODO: put these function somewhere else -from dockerutil import build_and_push_image -from typing import Dict, Optional -from dev_config import load_config +#!/usr/bin/env python3 + +from kubernetes.client.rest import ApiException + +import k8s_conditions +from typing import Dict +from dev_config import load_config, DevConfig, Distro from kubernetes import client, config import argparse -import time +import sys +import yaml -TEST_RUNNER_NAME = "test-runner" +TEST_POD_NAME = "e2e-test" +TEST_CLUSTER_ROLE_NAME = "e2e-test" +TEST_CLUSTER_ROLE_BINDING_NAME = "e2e-test" +TEST_SERVICE_ACCOUNT_NAME = "e2e-test" -def _load_testrunner_service_account() -> Optional[Dict]: - return load_yaml_from_file("deploy/testrunner/service_account.yaml") +def load_yaml_from_file(path: str) -> Dict: + with open(path, "r") as f: + return yaml.full_load(f.read()) -def _load_testrunner_role() -> Optional[Dict]: - return load_yaml_from_file("deploy/testrunner/role.yaml") +def _load_test_service_account() -> Dict: + return load_yaml_from_file("deploy/e2e/service_account.yaml") -def _load_testrunner_role_binding() -> Optional[Dict]: - return load_yaml_from_file("deploy/testrunner/role_binding.yaml") +def _load_test_role() -> Dict: + return load_yaml_from_file("deploy/e2e/role.yaml") -def _load_testrunner_cluster_role_binding() -> Optional[Dict]: - return load_yaml_from_file("deploy/testrunner/cluster_role_binding.yaml") +def _load_test_role_binding() -> Dict: + return load_yaml_from_file("deploy/e2e/role_binding.yaml") -def _prepare_testrunner_environment(): +def _prepare_test_environment(config_file: str) -> None: """ - _prepare_testrunner_environment ensures the ServiceAccount, - Role and ClusterRole and bindings are created for the test runner. + _prepare_test_environment ensures that the old test pod is deleted + and that namespace, cluster role, cluster role binding and service account + are created for the test pod. """ rbacv1 = client.RbacAuthorizationV1Api() corev1 = client.CoreV1Api() - dev_config = load_config() + dev_config = load_config(config_file) - _delete_testrunner_pod() + _delete_test_pod(config_file) - print("Creating Role") - ignore_if_already_exists( - lambda: rbacv1.create_namespaced_role( - dev_config.namespace, _load_testrunner_role() + print("Creating Namespace") + k8s_conditions.ignore_if_already_exists( + lambda: corev1.create_namespace( + client.V1Namespace(metadata=dict(name=dev_config.namespace)) ) ) - print("Creating Role Binding") - ignore_if_already_exists( - lambda: rbacv1.create_namespaced_role_binding( - dev_config.namespace, _load_testrunner_role_binding() - ) + print("Creating Cluster Role") + k8s_conditions.ignore_if_already_exists( + lambda: rbacv1.create_cluster_role(_load_test_role()) ) print("Creating Cluster Role Binding") - ignore_if_already_exists( - lambda: rbacv1.create_cluster_role_binding( - _load_testrunner_cluster_role_binding() - ) - ) + role_binding = _load_test_role_binding() + # set namespace specified in config.json + role_binding["subjects"][0]["namespace"] = dev_config.namespace - print("Creating ServiceAccount") - ignore_if_already_exists( - lambda: corev1.create_namespaced_service_account( - dev_config.namespace, _load_testrunner_service_account() - ) + k8s_conditions.ignore_if_already_exists( + lambda: rbacv1.create_cluster_role_binding(role_binding) ) + print("Creating Service Account") + service_account = _load_test_service_account() + # set namespace specified in config.json + service_account["metadata"]["namespace"] = dev_config.namespace -def build_and_push_testrunner(repo_url: str, tag: str, path: str): - """ - build_and_push_testrunner builds and pushes the test runner - image. - """ - return build_and_push_image(repo_url, tag, path, "testrunner") - - -def build_and_push_e2e(repo_url: str, tag: str, path: str): - """ - build_and_push_e2e builds and pushes the e2e image. - """ - return build_and_push_image(repo_url, tag, path, "e2e") - - -def _delete_testrunner_pod() -> None: - """ - _delete_testrunner_pod deletes the test runner pod - if it already exists. - """ - dev_config = load_config() - corev1 = client.CoreV1Api() - ignore_if_doesnt_exist( - lambda: corev1.delete_namespaced_pod(TEST_RUNNER_NAME, dev_config.namespace) + k8s_conditions.ignore_if_already_exists( + lambda: corev1.create_namespaced_service_account( + dev_config.namespace, service_account + ) ) -def create_test_runner_pod(test: str): - """ - create_test_runner_pod creates the pod which will run all of the tests. - """ - dev_config = load_config() +def create_test_pod(args: argparse.Namespace, dev_config: DevConfig) -> None: corev1 = client.CoreV1Api() - pod_body = _get_testrunner_pod_body(test) - return corev1.create_namespaced_pod(dev_config.namespace, body=pod_body) - - -def _get_testrunner_pod_body(test: str) -> Dict: - dev_config = load_config() - return { + test_pod = { "kind": "Pod", - "metadata": {"name": TEST_RUNNER_NAME, "namespace": dev_config.namespace,}, + "metadata": { + "name": TEST_POD_NAME, + "namespace": dev_config.namespace, + "labels": {"e2e-test": "true"}, + }, "spec": { "restartPolicy": "Never", - "serviceAccountName": TEST_RUNNER_NAME, + "serviceAccountName": "e2e-test", "containers": [ { - "name": TEST_RUNNER_NAME, - "image": f"{dev_config.repo_url}/{TEST_RUNNER_NAME}", + "name": TEST_POD_NAME, + "image": f"{dev_config.repo_url}/{dev_config.e2e_image}:{args.tag}", "imagePullPolicy": "Always", + "env": [ + { + "name": "CLUSTER_WIDE", + "value": f"{args.cluster_wide}", + }, + { + "name": "OPERATOR_IMAGE", + "value": f"{dev_config.repo_url}/{dev_config.operator_image_dev}:{args.tag}", + }, + { + "name": "AGENT_IMAGE", + "value": f"{dev_config.repo_url}/{dev_config.agent_image_dev}:{args.tag}", + }, + { + "name": "TEST_NAMESPACE", + "value": dev_config.namespace, + }, + { + "name": "VERSION_UPGRADE_HOOK_IMAGE", + "value": f"{dev_config.repo_url}/{dev_config.version_upgrade_hook_image_dev}:{args.tag}", + }, + { + "name": "READINESS_PROBE_IMAGE", + "value": f"{dev_config.repo_url}/{dev_config.readiness_probe_image_dev}:{args.tag}", + }, + { + "name": "MONGODB_IMAGE", + "value": f"{dev_config.mongodb_image_name}", + }, + { + "name": "MONGODB_REPO_URL", + "value": f"{dev_config.mongodb_image_repo_url}", + }, + { + "name": "PERFORM_CLEANUP", + "value": f"{args.perform_cleanup}", + }, + ], "command": [ - "./runner", - "--operatorImage", - f"{dev_config.repo_url}/mongodb-kubernetes-operator", - "--testImage", - f"{dev_config.repo_url}/e2e", - f"--test={test}", - f"--namespace={dev_config.namespace}", + "go", + "test", + "-v", + "-timeout=45m", + "-failfast", + f"./test/e2e/{args.test}", ], } ], }, } + if not k8s_conditions.wait( + lambda: corev1.list_namespaced_pod( + dev_config.namespace, + field_selector=f"metadata.name=={TEST_POD_NAME}", + ), + lambda pod_list: len(pod_list.items) == 0, + timeout=30, + sleep_time=0.5, + ): + raise Exception( + "Execution timed out while waiting for the existing pod to be deleted" + ) + if not k8s_conditions.call_eventually_succeeds( + lambda: corev1.create_namespaced_pod(dev_config.namespace, body=test_pod), + sleep_time=10, + timeout=60, + exceptions_to_ignore=ApiException, + ): + raise Exception("Could not create test pod!") -def wait_for_pod_to_be_running(corev1, name, namespace): + +def wait_for_pod_to_be_running( + corev1: client.CoreV1Api, name: str, namespace: str +) -> None: print("Waiting for pod to be running") - for i in range(10): - try: - pod = corev1.read_namespaced_pod(name, namespace) - if pod.status.phase == "Running": - return True - except ApiException as e: - pass - time.sleep(5) - raise Exception("Pod never got into Running state!") - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("test", help="Name of the test to run") - return parser.parse_args() + if not k8s_conditions.wait( + lambda: corev1.read_namespaced_pod(name, namespace), + lambda pod: pod.status.phase == "Running", + sleep_time=5, + timeout=240, + exceptions_to_ignore=ApiException, + ): + pod = corev1.read_namespaced_pod(name, namespace) + raise Exception("Pod never got into Running state: {}".format(pod)) + print("Pod is running") -def main(): - args = parse_args() - config.load_kube_config() - dev_config = load_config() - build_and_push_testrunner( - dev_config.repo_url, f"{dev_config.repo_url}/{TEST_RUNNER_NAME}", "." + +def _delete_test_environment(config_file: str) -> None: + """ + _delete_test_environment ensures that the cluster role, cluster role binding and service account + for the test pod are deleted. + """ + rbacv1 = client.RbacAuthorizationV1Api() + corev1 = client.CoreV1Api() + dev_config = load_config(config_file) + + k8s_conditions.ignore_if_doesnt_exist( + lambda: rbacv1.delete_cluster_role(TEST_CLUSTER_ROLE_NAME) ) - build_and_push_e2e(dev_config.repo_url, f"{dev_config.repo_url}/e2e", ".") - _prepare_testrunner_environment() + k8s_conditions.ignore_if_doesnt_exist( + lambda: rbacv1.delete_cluster_role_binding(TEST_CLUSTER_ROLE_BINDING_NAME) + ) + + k8s_conditions.ignore_if_doesnt_exist( + lambda: corev1.delete_namespaced_service_account( + TEST_SERVICE_ACCOUNT_NAME, dev_config.namespace + ) + ) - pod = create_test_runner_pod(args.test) + +def _delete_test_pod(config_file: str) -> None: + """ + _delete_test_pod deletes the test pod. + """ + dev_config = load_config(config_file) corev1 = client.CoreV1Api() - wait_for_pod_to_be_running(corev1, TEST_RUNNER_NAME, dev_config.namespace) + k8s_conditions.ignore_if_doesnt_exist( + lambda: corev1.delete_namespaced_pod(TEST_POD_NAME, dev_config.namespace) + ) - print(f"Running test: {args.test}") + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser() + parser.add_argument("--test", help="Name of the test to run") + parser.add_argument( + "--tag", + help="Tag for the images, it will be the same for all images", + type=str, + default="latest", + ) + parser.add_argument( + "--skip-dump-diagnostic", + help="Skip the dump of diagnostic information into files", + action="store_true", + ) + parser.add_argument( + "--perform-cleanup", + help="Cleanup the context after executing the tests", + action="store_true", + ) + parser.add_argument( + "--cluster-wide", + help="Watch all namespaces", + type=lambda x: x.lower() == "true", + ) + parser.add_argument( + "--distro", + help="The distro of images that should be used", + type=str, + default="ubi", + ) + parser.add_argument("--config_file", help="Path to the config file") + return parser.parse_args() + + +def prepare_and_run_test(args: argparse.Namespace, dev_config: DevConfig) -> None: + _prepare_test_environment(args.config_file) + create_test_pod(args, dev_config) + corev1 = client.CoreV1Api() + + wait_for_pod_to_be_running( + corev1, + TEST_POD_NAME, + dev_config.namespace, + ) # stream all of the pod output as the pod is running for line in corev1.read_namespaced_pod_log( - TEST_RUNNER_NAME, dev_config.namespace, follow=True, _preload_content=False + TEST_POD_NAME, dev_config.namespace, follow=True, _preload_content=False ).stream(): print(line.decode("utf-8").rstrip()) +def main() -> int: + args = parse_args() + config.load_kube_config() + + dev_config = load_config(args.config_file, Distro.from_string(args.distro)) + prepare_and_run_test(args, dev_config) + + corev1 = client.CoreV1Api() + if not k8s_conditions.wait( + lambda: corev1.read_namespaced_pod(TEST_POD_NAME, dev_config.namespace), + lambda pod: pod.status.phase == "Succeeded", + sleep_time=5, + timeout=60, + exceptions_to_ignore=ApiException, + ): + return 1 + _delete_test_environment(args.config_file) + return 0 + + if __name__ == "__main__": - main() + sys.exit(main()) diff --git a/scripts/dev/edit_cluster_config.sh b/scripts/dev/edit_cluster_config.sh new file mode 100755 index 000000000..2efcb9be7 --- /dev/null +++ b/scripts/dev/edit_cluster_config.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env bash + +# This script is intended for editing the raw cluster configuration that is used by the automation agents. +# For example if we want to test new features implemented by the automation agent, we can check its behavior by configuring new settings directly in the raw cluster config. +# Steps to do that: +# 1. Deploy replica set, named here "my-replica-set" in "mongodb" namespace. +# 2. Stop the operator, e.g. by scaling operator deployment to 0. Without this step the operator will overwrite any changes made to the cluster config in the secret. +# 3. Edit the cluster config by running: ./edit_cluster_config.sh mongodb my-replica-set, or EDITOR=my-editor ./edit_cluster_config.sh mongodb my-replica-set (if you don't want to use vim) +# 4. It will download the cluster config from the secret and open it in the editor. +# 5. Make some changes to the cluster config, e.g. add new settings. Remember to increment version field, otherwise the changes won't be applied. +# 6. Save the changes and exit the editor. The config will be checked if it's a correct json and will be uploaded to the secret. +# 7. Observe the changes made by the mongodb-agent. Be aware, that starting the operator again will overwrite the changes. + +namespace=$1 +replicaset_name=$2 +secret_name=${replicaset_name}-config + +if [[ "${namespace}" == "" || "${replicaset_name}" == "" ]]; then + echo "Edit automation config secret for given replicaset." + echo "It looks for the secret named '-secret' in the given namespace." + echo "Requires jq to be installed and uses current kubectl context." + echo + echo "Usage:" + printf "\t%s \n" "$(basename "$0")" + printf "\tEDITOR= %s to edit cluster config with a different editor.\n" "$(basename "$0")" + + exit 1 +fi + +cluster_config_file=$(mktemp ./edit_cluster_config.sh.cluster_config.XXXXX) +# rename to have .json extension for syntax highlighting in the editor +mv "${cluster_config_file}" "${cluster_config_file}.json" +cluster_config_file="${cluster_config_file}.json" +cluster_config_file_base64="${cluster_config_file}.base64" + +function cleanup() { + rm -f "${cluster_config_file}" "${cluster_config_file_base64}" +} +trap cleanup EXIT + +function get_secret() { + local namespace=$1 + local secret_name=$2 + kubectl get secret "${secret_name}" -n "${namespace}" -o json | jq -r '.data."cluster-config.json"' | base64 -D +} + +echo "Saving config to a temporary file: ${cluster_config_file}" +get_secret "${namespace}" "${secret_name}" | jq . -r >"${cluster_config_file}" +error_code=$? + +if [[ ${error_code} != 0 ]]; then + echo "Cluster config is invalid, edit without parsing with jq:" + get_secret "${namespace}" "${secret_name}" >"${cluster_config_file}" +fi + +if [[ "${EDITOR}" == "" ]]; then + EDITOR=vim +fi + +old_config=$(cat "${cluster_config_file}") +while true; do + ${EDITOR} "${cluster_config_file}" + new_config=$(jq . < "${cluster_config_file}") + error_code=$? + if [[ ${error_code} != 0 ]]; then + read -n 1 -rsp $"Press any key to continue editing or ^C to abort..." + echo + continue + fi + break +done + +if diff -q <(echo -n "${old_config}") <(echo -n "${new_config}"); then + echo "No changes made to cluster config." + exit 0 +else + echo "Cluster config was changed with following diff:" + diff --normal <(echo -n "${old_config}") <(echo -n "${new_config}") +fi + +base64 < "${cluster_config_file}" > "${cluster_config_file_base64}" + +# shellcheck disable=SC2086 +patch=$(cat < str: + s = io.StringIO() + yaml.dump(github_action, s) + s.seek(0) + return """ +################################################################################## +# +# This file is automatically generated using templates. Changes to this file +# should happen through editing the templates under .action_templates/* +# Manual edits will be overwritten. +# +################################################################################## + +{}""".format( + s.read() + ) + + +def main() -> int: + for template in template_mapping: + github_action = template_github_action(template) + with open(template_mapping[template], "w+") as f: + f.write(_prepend_auto_generated_message(github_action)) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/dev/get_e2e_env_vars.py b/scripts/dev/get_e2e_env_vars.py new file mode 100755 index 000000000..cea1ac4e0 --- /dev/null +++ b/scripts/dev/get_e2e_env_vars.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +import sys +from typing import Dict +import os.path + + +from dev_config import load_config, DevConfig, Distro + + +def _get_e2e_test_envs(dev_config: DevConfig) -> Dict[str, str]: + """ + _get_e2e_test_envs returns a dictionary of all the required environment variables + that need to be set in order to run a local e2e test. + + :param dev_config: The local dev config + :return: A diction of env vars to be set + """ + cleanup = False + if len(sys.argv) > 1: + cleanup = sys.argv[1] == "true" + return { + "ROLE_DIR": dev_config.role_dir, + "DEPLOY_DIR": dev_config.deploy_dir, + "OPERATOR_IMAGE": f"{dev_config.repo_url}/{dev_config.operator_image}", + "VERSION_UPGRADE_HOOK_IMAGE": f"{dev_config.repo_url}/{dev_config.version_upgrade_hook_image}", + "AGENT_IMAGE": f"{dev_config.repo_url}/{dev_config.agent_image}", + "TEST_DATA_DIR": dev_config.test_data_dir, + "TEST_NAMESPACE": dev_config.namespace, + "READINESS_PROBE_IMAGE": f"{dev_config.repo_url}/{dev_config.readiness_probe_image}", + "PERFORM_CLEANUP": "true" if cleanup else "false", + "WATCH_NAMESPACE": dev_config.namespace, + "MONGODB_IMAGE": dev_config.mongodb_image_name, + "MONGODB_REPO_URL": dev_config.mongodb_image_repo_url, + "HELM_CHART_PATH": os.path.abspath("./helm-charts/charts/community-operator"), + "MDB_IMAGE_TYPE": dev_config.image_type, + "MDB_LOCAL_OPERATOR": dev_config.local_operator, + "KUBECONFIG": dev_config.kube_config, + } + + +# convert all values in config.json to env vars. +# this can be used to provide configuration for e2e tests. +def main() -> int: + dev_config = load_config(distro=Distro.UBI) + for k, v in _get_e2e_test_envs(dev_config).items(): + print(f"export {k.upper()}={v}") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/dev/install_prerequisites.sh b/scripts/dev/install_prerequisites.sh new file mode 100755 index 000000000..b630f7dfb --- /dev/null +++ b/scripts/dev/install_prerequisites.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +if ! command -v telepresence &> /dev/null; then \ + echo "Telepresence not found, installing now" + sudo curl -fL https://app.getambassador.io/download/tel2/"$(go env GOHOSTOS)"/"$(go env GOHOSTARCH)"/latest/telepresence -o /tmp/telepresence + sudo mv /tmp/telepresence /usr/local/bin/telepresence + sudo chmod a+x /usr/local/bin/telepresence +else + echo "Telepresence already installed." +fi diff --git a/scripts/dev/k8s_conditions.py b/scripts/dev/k8s_conditions.py new file mode 100644 index 000000000..207ca3667 --- /dev/null +++ b/scripts/dev/k8s_conditions.py @@ -0,0 +1,98 @@ +import time +from typing import Callable, Tuple, List, Optional, Any + +from kubernetes.client.rest import ApiException + +# time to sleep between retries +SLEEP_TIME = 2 +# no timeout (loop forever) +INFINITY = -1 + + +def _current_milliseconds() -> int: + return int(round(time.time() * 1000)) + + +def wait( + fn: Callable, + condition: Callable, + exceptions_to_ignore: Optional[Tuple[BaseException]] = None, + codes_to_ignore: Optional[List[int]] = None, + sleep_time: float = SLEEP_TIME, + timeout: int = INFINITY, +) -> bool: + """ + wait_for_condition accepts a function fn and a function condition, + it periodically calls the function fn and then applies the condition function on the result + until it returns True or we reach timeout + + exceptions_to_ignore is a tuple of Exceptions to ignore is raised by the call to fn + If ApiException is not ignored, if raised by the call to fn codes in codes_to_ignore are ignored + """ + start_time = _current_milliseconds() + end = start_time + (timeout * 1000) + + while _current_milliseconds() < end or timeout <= 0: + res = None + try: + res = _ignore_error_codes(fn, codes_to_ignore) + except exceptions_to_ignore: # type: ignore + # The above comment is due to an issue in mypy with tuple of Exceptions + pass + if res is not None and condition(res): + return True + + time.sleep(sleep_time) + + return False + + +def call_eventually_succeeds( + fn: Callable, + sleep_time: float = SLEEP_TIME, + timeout: int = INFINITY, + exceptions_to_ignore: Optional[Tuple[BaseException]] = None, +) -> bool: + """ + call_eventually_succeeds is similar to wait but in this case we ignore the value returned by the call to fn() + + It periodically calls fn and if the call raises an exception contained in exceptions_to_ignore, it tries + again after a sleep until it either succeeds or we reach timeout + """ + start_time = _current_milliseconds() + end = start_time + (timeout * 1000) + + while _current_milliseconds() < end or timeout <= 0: + try: + fn() + return True + except exceptions_to_ignore: # type: ignore + pass + + time.sleep(sleep_time) + + return False + + +def _ignore_error_codes(fn: Callable, codes: Optional[List[int]]) -> Any: + try: + return fn() + except ApiException as e: + if codes is not None and e.status not in codes: + raise + + +def ignore_if_already_exists(fn: Callable) -> Any: + """ + ignore_if_already_exists accepts a function and calls it, + ignoring an Kubernetes API conflict errors + """ + return _ignore_error_codes(fn, [409]) + + +def ignore_if_doesnt_exist(fn: Callable) -> Any: + """ + ignore_if_doesnt_exist accepts a function and calls it, + ignoring an Kubernetes API not found errors + """ + return _ignore_error_codes(fn, [404]) diff --git a/scripts/dev/k8s_request_data.py b/scripts/dev/k8s_request_data.py new file mode 100644 index 000000000..90b6b61cc --- /dev/null +++ b/scripts/dev/k8s_request_data.py @@ -0,0 +1,105 @@ +from kubernetes.client.rest import ApiException +from kubernetes import client + +from typing import Optional, List, Dict + + +def get_crds() -> Optional[Dict]: + crdv1 = client.ApiextensionsV1beta1Api() + try: + crd = crdv1.list_custom_resource_definition(pretty="true") + except ApiException as e: + print("Exception when calling list_custom_resource_definition: %s\n" % e) + return None + return crd.to_dict() + + +def get_all_mongodb_namespaced(namespace: str) -> Optional[List]: + customv1 = client.CustomObjectsApi() + try: + return list( + customv1.list_namespaced_custom_object( + "mongodbcommunity.mongodb.com", + "v1", + namespace, + "mongodbcommunity", + pretty=True, + )["items"] + ) + except ApiException as e: + print("Exception when calling get_namespaced_custom_object %s\n" % e) + return None + + +def get_persistent_volumes() -> Optional[Dict]: + corev1 = client.CoreV1Api() + try: + pv = corev1.list_persistent_volume(pretty="true") + except ApiException as e: + print("Exception when calling list_persistent_volume %s\n" % e) + return None + return pv.to_dict() + + +def get_stateful_sets_namespaced(namespace: str) -> Optional[Dict]: + av1beta1 = client.AppsV1Api() + try: + sst = av1beta1.list_namespaced_stateful_set(namespace, pretty="true") + except ApiException as e: + print("Exception when calling list_namespaced_stateful_set: %s\n" % e) + return None + return sst.to_dict() + + +def get_configmap_namespaced(namespace: str, name: str) -> Optional[Dict]: + corev1 = client.CoreV1Api() + try: + config_map = corev1.read_namespaced_config_map(name, namespace, pretty="true") + except ApiException as e: + print("Exception when calling read_namespaced_config_map: %s\n" % e) + return None + return config_map.to_dict() + + +def get_secret_namespaced(namespace: str, name: str) -> Optional[Dict]: + corev1 = client.CoreV1Api() + try: + secret = corev1.read_namespaced_secret(name, namespace, pretty="true") + except ApiException as e: + print("Exception when calling read_namespaced_secret: %s\n" % e) + return None + return secret.to_dict() + + +def get_pods_namespaced(namespace: str) -> Optional[List]: + corev1 = client.CoreV1Api() + try: + pods = corev1.list_namespaced_pod(namespace) + except ApiException as e: + print("Exception when calling list_namespaced_pod: %s\n" % e) + return None + return pods.items + + +def get_pod_namespaced(namespace: str, pod_name: str) -> Optional[client.V1Pod]: + corev1 = client.CoreV1Api() + try: + pod = corev1.read_namespaced_pod(name=pod_name, namespace=namespace) + except ApiException as e: + print("Exception when calling read_namespaced_pod: %s\n" % e) + return None + return pod + + +def get_pod_log_namespaced( + namespace: str, pod_name: str, container_name: str +) -> Optional[str]: + corev1 = client.CoreV1Api() + try: + log = corev1.read_namespaced_pod_log( + name=pod_name, namespace=namespace, pretty="true", container=container_name + ) + except ApiException as e: + print("Exception when calling read_namespaced_pod_log: %s\n" % e) + return None + return log diff --git a/scripts/dev/requirements.txt b/scripts/dev/requirements.txt deleted file mode 100644 index 386dd234d..000000000 --- a/scripts/dev/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -docker=4.2.0 -kubernetes=10.0.1 -jinja2==2.11.2 diff --git a/scripts/dev/run_e2e_gh.sh b/scripts/dev/run_e2e_gh.sh new file mode 100755 index 000000000..52297c97e --- /dev/null +++ b/scripts/dev/run_e2e_gh.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +set -Eeou pipefail + +test_name="${1}" +current_branch="$(git branch --show-current)" + +gh workflow run e2e-dispatch.yml -f "test-name=${test_name}" --ref "${current_branch}" + +echo "Waiting for task to start..." +sleep 2 + +run_id="$(gh run list --workflow=e2e-dispatch.yml | grep workflow_dispatch | grep -Eo "[0-9]{9,11}" | head -n 1)" + +gh run view "${run_id}" --web diff --git a/scripts/dev/setup_kind_cluster.sh b/scripts/dev/setup_kind_cluster.sh index b974a076a..3178f2878 100755 --- a/scripts/dev/setup_kind_cluster.sh +++ b/scripts/dev/setup_kind_cluster.sh @@ -1,6 +1,54 @@ #!/usr/bin/env bash set -Eeou pipefail +#### +# This file is copy-pasted from https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/scripts/dev/setup_kind_cluster.sh +# Do not edit !!! +#### + +function usage() { + echo "Deploy local registry and create kind cluster configured to use this registry. Local Docker registry is deployed at localhost:5000. + +Usage: + setup_kind_cluster.sh [-n ] [-r] + setup_kind_cluster.sh [-h] + setup_kind_cluster.sh [-n ] [-e] [-r] + +Options: + -n (optional) Set kind cluster name to . Creates kubeconfig in ~/.kube/. The default name is 'kind' if not set. + -e (optional) Export newly created kind cluster's credentials to ~/.kube/ and set current kubectl context. + -h (optional) Shows this screen. + -r (optional) Recreate cluster if needed + -p (optional) Network reserved for Pods, e.g. 10.244.0.0/16 + -s (optional) Network reserved for Services, e.g. 10.96.0.0/16 +" + exit 0 +} + +cluster_name=${CLUSTER_NAME:-"kind"} +export_kubeconfig=0 +recreate=0 +pod_network="10.244.0.0/16" +service_network="10.96.0.0/16" +while getopts ':p:s:n:her' opt; do + case $opt in + (n) cluster_name=$OPTARG;; + (e) export_kubeconfig=1;; + (r) recreate=1;; + (p) pod_network=$OPTARG;; + (s) service_network=$OPTARG;; + (h) usage;; + (*) usage;; + esac +done +shift "$((OPTIND-1))" + +kubeconfig_path="$HOME/.kube/${cluster_name}" + +# create the kind network early unless it already exists. +# it would normally be created automatically by kind but we +# need it earlier to get the IP address of our registry. +docker network create kind || true # adapted from https://kind.sigs.k8s.io/docs/user/local-registry/ # create registry container unless it already exists @@ -8,28 +56,61 @@ reg_name='kind-registry' reg_port='5000' running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" if [ "${running}" != 'true' ]; then - docker run \ - -d --restart=always -p "${reg_port}:${reg_port}" --name "${reg_name}" \ - registry:2 + docker run -d --restart=always -p "127.0.0.1:${reg_port}:5000" --network kind --name "${reg_name}" registry:2 fi -ip="$(docker inspect kind-registry -f {{.NetworkSettings.IPAddress}})" +if [ "${recreate}" != 0 ]; then + kind delete cluster --name "${cluster_name}" || true +fi # create a cluster with the local registry enabled in containerd -cat <${temp} -contents="$(cat ${temp})" -kubectl create cm kube-config --from-literal=kubeconfig="${contents}" -rm ${temp} +# Add the registry config to the nodes +# +# This is necessary because localhost resolves to loopback addresses that are +# network-namespace local. +# In other words: localhost in the container is not localhost on the host. +# +# We want a consistent name that works from both ends, so we tell containerd to +# alias localhost:${reg_port} to the registry container when pulling images +REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}" +for node in $(kind get nodes --name "${cluster_name}"); do + docker exec "${node}" mkdir -p "${REGISTRY_DIR}" + cat < /dev/null; then - echo "Installing goimports" - GO111MODULE=off go get golang.org/x/tools/cmd/goimports -fi +function go_imports() { + if ! type goimports &> /dev/null; then + echo "Installing goimports" + go install golang.org/x/tools/cmd/goimports + fi + + # Formats each file that was changed. + for file in $(git diff --cached --name-only --diff-filter=ACM | grep '\.go$') + do + goimports -w "${file}" + git add "$file" + done + +} + +function generate_crd(){ + echo "Generating CRD" + make manifests + git add config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml +} + +function mypy_check() +{ + local exit_status=0 + # Mypy doesn't support recursive traversal of directories + # So we manually call it on every staged python file + echo "Running mypy on staged python files" + for file in $(git diff --cached --name-only --diff-filter=ACM | grep '\.py$') + do + echo "Analyzing $file ..." + # We ignore missing import otherwise mypy will complain + # about 3rd party libraries not providing type hints + if ! mypy --disallow-untyped-calls --disallow-untyped-defs --disallow-incomplete-defs --ignore-missing-imports "${file}"; then + exit_status=1 + fi + done + return $exit_status +} + +function go_linting() { + dirs_to_analyze=() + for file in $(git diff --cached --name-only --diff-filter=ACM | grep '\.go$') + do + dirs_to_analyze+=("$(dirname "${file}")" ) + done + if [ ${#dirs_to_analyze[@]} -ne 0 ]; then + mapfile -t dirs_to_analyze < <(printf '%s\n' "${dirs_to_analyze[@]}" | sort -u) + echo "Running golangci-lint on staged files" + local exit_status=0 + for file in "${dirs_to_analyze[@]}" + do + if ! golangci-lint run "${file}"; then + exit_status=1 + fi + done + return $exit_status + fi -# Formats each file that was changed. -for file in $(git diff --cached --name-only --diff-filter=ACM | grep '\.go$') -do - goimports -w "${file}" - git add "$file" -done + return 0 +} + +function black_formatting() +{ + # Black formatting of every python file that was changed + for file in $(git diff --cached --name-only --diff-filter=ACM | grep '\.py$') + do + black -q "$file" + git add "$file" + done +} + +function generate_github_actions(){ + scripts/dev/generate_github_actions.py + git add .github/workflows +} + +generate_github_actions +generate_crd +go_imports +black_formatting +if ! mypy_check; then + echo "MyPy returned some errors, please correct them" + echo "Commit aborted" + # In some cases we might encounter mypy errors that we do not + # actually treat as such. So we provide a link to the dev + # for ignoring them through code annotation + echo "If some of the errors reported are false positives "\ + "and should be ignored, mypy provides a way to silence "\ + "errors: https://mypy.readthedocs.io/en/stable/common_issues.html#spurious-errors-and-locally-silencing-the-checker" + echo "Please use this only for errors that you are sure are"\ + "false positives." + exit 1 +fi +if ! go_linting; then + echo "Golancli-lint returned some errors, please correct them" + echo "Commit aborted" + # In some cases we might encounter mypy errors that we do not + # actually treat as such. So we provide a link to the dev + # for ignoring them through code annotation + echo "If some of the errors reported are false positives "\ + "and should be ignored, golanci-lint provides a way to silence "\ + "errors: https://golangci-lint.run/usage/false-positives/" + echo "Please use this only for errors that you are sure are"\ + "false positives." + exit 1 +fi diff --git a/scripts/git-hooks/pre-merge-commit b/scripts/git-hooks/pre-merge-commit new file mode 100755 index 000000000..e69de29bb diff --git a/scripts/git-hooks/pre-push b/scripts/git-hooks/pre-push new file mode 100755 index 000000000..e69de29bb diff --git a/scripts/git-hooks/pre-rebase b/scripts/git-hooks/pre-rebase new file mode 100755 index 000000000..e69de29bb diff --git a/scripts/git-hooks/pre-receive b/scripts/git-hooks/pre-receive new file mode 100755 index 000000000..e69de29bb diff --git a/scripts/git-hooks/prepare-commit-msg b/scripts/git-hooks/prepare-commit-msg new file mode 100755 index 000000000..e69de29bb diff --git a/scripts/git-hooks/update b/scripts/git-hooks/update new file mode 100755 index 000000000..e69de29bb diff --git a/test/e2e/client.go b/test/e2e/client.go new file mode 100644 index 000000000..478e3b81c --- /dev/null +++ b/test/e2e/client.go @@ -0,0 +1,226 @@ +package e2eutil + +import ( + "bytes" + "context" + "fmt" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/generate" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + + // Needed for running tests on GCP + "k8s.io/client-go/dynamic" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +// TestClient is the global client used by e2e tests. +var TestClient *E2ETestClient + +// OperatorNamespace tracks the namespace in which the operator is deployed. +var OperatorNamespace string + +// CleanupOptions are a way to register cleanup functions on object creation using the test client. +type CleanupOptions struct { + TestContext *TestContext +} + +// ApplyToCreate is a required method for CleanupOptions passed to the Create api. +func (*CleanupOptions) ApplyToCreate(*client.CreateOptions) {} + +// TestContext tracks cleanup functions to be called at the end of a test. +type TestContext struct { + Ctx context.Context + + // shouldPerformCleanup indicates whether or not cleanup should happen after this test + shouldPerformCleanup bool + + // ExecutionId is a unique identifier for this test run. + ExecutionId string + + // cleanupFuncs is a list of functions which will clean up resources + // after the test ends. + cleanupFuncs []func() error + + // t is the testing.T which will be used for the duration of the test. + t *testing.T +} + +// NewContext creates a context. +func NewContext(ctx context.Context, t *testing.T, performCleanup bool) (*TestContext, error) { + testId, err := generate.RandomValidDNS1123Label(10) + if err != nil { + return nil, err + } + + return &TestContext{Ctx: ctx, t: t, ExecutionId: testId, shouldPerformCleanup: performCleanup}, nil +} + +// Teardown is called at the end of a test. +func (ctx *TestContext) Teardown() { + if !ctx.shouldPerformCleanup { + return + } + for _, fn := range ctx.cleanupFuncs { + err := fn() + if err != nil { + fmt.Println(err) + } + } +} + +// AddCleanupFunc adds a cleanup function to the context to be called at the end of a test. +func (ctx *TestContext) AddCleanupFunc(fn func() error) { + ctx.cleanupFuncs = append(ctx.cleanupFuncs, fn) +} + +// E2ETestClient is a wrapper on client.Client that provides cleanup functionality. +type E2ETestClient struct { + Client client.Client + // We need the core API client for some operations that the controller-runtime client doesn't support + // (e.g. exec into the container) + CoreV1Client corev1client.CoreV1Client + DynamicClient dynamic.Interface + restConfig *rest.Config +} + +// NewE2ETestClient creates a new E2ETestClient. +func newE2ETestClient(config *rest.Config, scheme *runtime.Scheme) (*E2ETestClient, error) { + cli, err := client.New(config, client.Options{Scheme: scheme}) + if err != nil { + return nil, err + } + coreClient, err := corev1client.NewForConfig(config) + if err != nil { + return nil, err + } + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + return &E2ETestClient{Client: cli, CoreV1Client: *coreClient, DynamicClient: dynamicClient, restConfig: config}, err +} + +// Create wraps client.Create to provide post-test cleanup functionality. +func (c *E2ETestClient) Create(ctx context.Context, obj client.Object, cleanupOptions *CleanupOptions) error { + err := c.Client.Create(ctx, obj) + if err != nil { + return err + } + + if cleanupOptions == nil || cleanupOptions.TestContext == nil { + return nil + } + + cleanupOptions.TestContext.AddCleanupFunc(func() error { + err := TestClient.Delete(ctx, obj) + if err != nil && !errors.IsNotFound(err) { + return err + } + return nil + }) + + return nil +} + +// Delete wraps client.Delete. +func (c *E2ETestClient) Delete(ctx context.Context, obj client.Object) error { + return c.Client.Delete(ctx, obj) +} + +// Update wraps client.Update. +func (c *E2ETestClient) Update(ctx context.Context, obj client.Object) error { + return c.Client.Update(ctx, obj) +} + +// Get wraps client.Get. +func (c *E2ETestClient) Get(ctx context.Context, key types.NamespacedName, obj client.Object) error { + return c.Client.Get(ctx, key, obj) +} + +func (c *E2ETestClient) Execute(ctx context.Context, pod corev1.Pod, containerName, command string) (string, error) { + req := c.CoreV1Client.RESTClient(). + Post(). + Namespace(pod.Namespace). + Resource("pods"). + Name(pod.Name). + SubResource("exec"). + VersionedParams(&corev1.PodExecOptions{ + Container: containerName, + Command: []string{"/bin/sh", "-c", command}, + Stdin: false, + Stdout: true, + Stderr: true, + TTY: true, + }, scheme.ParameterCodec) + + buf := &bytes.Buffer{} + errBuf := &bytes.Buffer{} + exec, err := remotecommand.NewSPDYExecutor(c.restConfig, "POST", req.URL()) + if err != nil { + return "", err + } + err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ + Stdout: buf, + Stderr: errBuf, + }) + if err != nil { + return "", fmt.Errorf(`failed executing command "%s" on %v/%v: %s ("%s")`, command, pod.Namespace, pod.Name, err, errBuf.String()) + } + + if errBuf.String() != "" { + return buf.String(), fmt.Errorf("remote command %s on %v/%v raised an error: %s", command, pod.Namespace, pod.Name, errBuf.String()) + } + return buf.String(), nil +} + +// RunTest is the main entry point function for an e2e test. +func RunTest(m *testing.M) (int, error) { + var cfg *rest.Config + var testEnv *envtest.Environment + var err error + + useExistingCluster := true + testEnv = &envtest.Environment{ + UseExistingCluster: &useExistingCluster, + AttachControlPlaneOutput: true, + } + + fmt.Println("Starting test environment") + cfg, err = testEnv.Start() + if err != nil { + return 1, err + } + + err = mdbv1.AddToScheme(scheme.Scheme) + if err != nil { + return 1, err + } + + TestClient, err = newE2ETestClient(cfg, scheme.Scheme) + if err != nil { + return 1, err + } + + fmt.Println("Starting test") + code := m.Run() + + err = testEnv.Stop() + if err != nil { + return code, err + } + + return code, nil +} diff --git a/test/e2e/e2eutil.go b/test/e2e/e2eutil.go index a2f091f8e..d29fd9abb 100644 --- a/test/e2e/e2eutil.go +++ b/test/e2e/e2eutil.go @@ -3,127 +3,219 @@ package e2eutil import ( "context" "fmt" - "testing" - "time" + "reflect" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis" - mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" - f "github.com/operator-framework/operator-sdk/pkg/test" appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/controller-runtime/pkg/client" + k8sClient "sigs.k8s.io/controller-runtime/pkg/client" ) -func RegisterTypesWithFramework(newTypes ...runtime.Object) error { - for _, newType := range newTypes { - if err := f.AddToFrameworkScheme(apis.AddToScheme, newType); err != nil { - return fmt.Errorf("failed to add custom resource type %s to framework scheme: %v", newType.GetObjectKind(), err) - } +const testDataDirEnv = "TEST_DATA_DIR" + +// TestLabels should be applied to all resources created by tests. +func TestLabels() map[string]string { + return map[string]string{ + "e2e-test": "true", } - return nil +} + +// TestAnnotations create an annotations map +func TestAnnotations() map[string]string { + return map[string]string{ + "e2e-test-annotated": "true", + } +} + +func TestDataDir() string { + return envvar.GetEnvOrDefault(testDataDirEnv, "/workspace/testdata") // nolint:forbidigo +} + +func TlsTestDataDir() string { + return fmt.Sprintf("%s/tls", TestDataDir()) } // UpdateMongoDBResource applies the provided function to the most recent version of the MongoDB resource // and retries when there are conflicts -func UpdateMongoDBResource(original *mdbv1.MongoDB, updateFunc func(*mdbv1.MongoDB)) error { - err := f.Global.Client.Get(context.TODO(), types.NamespacedName{Name: original.Name, Namespace: original.Namespace}, original) +func UpdateMongoDBResource(ctx context.Context, original *mdbv1.MongoDBCommunity, updateFunc func(*mdbv1.MongoDBCommunity)) error { + err := TestClient.Get(ctx, types.NamespacedName{Name: original.Name, Namespace: original.Namespace}, original) if err != nil { return err } updateFunc(original) - return f.Global.Client.Update(context.TODO(), original) + return TestClient.Update(ctx, original) } -// WaitForConfigMapToExist waits until a ConfigMap of the given name exists -// using the provided retryInterval and timeout -func WaitForConfigMapToExist(cmName string, retryInterval, timeout time.Duration) (corev1.ConfigMap, error) { - cm := corev1.ConfigMap{} - return cm, waitForRuntimeObjectToExist(cmName, retryInterval, timeout, &cm) +func NewTestMongoDB(ctx *TestContext, name string, namespace string) (mdbv1.MongoDBCommunity, mdbv1.MongoDBUser) { + mongodbNamespace := namespace + if mongodbNamespace == "" { + mongodbNamespace = OperatorNamespace + } + mdb := mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: mongodbNamespace, + Labels: TestLabels(), + }, + Spec: mdbv1.MongoDBCommunitySpec{ + Members: 3, + Type: "ReplicaSet", + Version: "8.0.0", + Arbiters: 0, + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + }, + Users: []mdbv1.MongoDBUser{ + { + Name: fmt.Sprintf("%s-user", name), + PasswordSecretRef: mdbv1.SecretKeyReference{ + Key: fmt.Sprintf("%s-password", name), + Name: fmt.Sprintf("%s-%s-password-secret", name, ctx.ExecutionId), + }, + Roles: []mdbv1.Role{ + // roles on testing db for general connectivity + { + DB: "testing", + Name: "readWrite", + }, + { + DB: "testing", + Name: "clusterAdmin", + }, + // admin roles for reading FCV + { + DB: "admin", + Name: "readWrite", + }, + { + DB: "admin", + Name: "clusterAdmin", + }, + { + DB: "admin", + Name: "userAdmin", + }, + }, + ScramCredentialsSecretName: fmt.Sprintf("%s-my-scram", name), + }, + }, + StatefulSetConfiguration: mdbv1.StatefulSetConfiguration{ + SpecWrapper: mdbv1.StatefulSetSpecWrapper{ + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "mongod", + Resources: corev1.ResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + "cpu": resource.MustParse("1.0"), + "memory": resource.MustParse("200M"), + }, + Requests: map[corev1.ResourceName]resource.Quantity{ + "cpu": resource.MustParse("0.1"), + "memory": resource.MustParse("200M"), + }, + }, + }, + { + Name: "mongodb-agent", + Resources: corev1.ResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + "cpu": resource.MustParse("1.0"), + "memory": resource.MustParse("200M"), + }, + Requests: map[corev1.ResourceName]resource.Quantity{ + "cpu": resource.MustParse("0.1"), + "memory": resource.MustParse("200M"), + }, + }, + }, + }, + }, + }, + }}, + }, + }, + } + return mdb, mdb.Spec.Users[0] } -// WaitForMongoDBToReachPhase waits until the given MongoDB resource reaches the expected phase -func WaitForMongoDBToReachPhase(t *testing.T, mdb *mdbv1.MongoDB, phase mdbv1.Phase, retryInterval, timeout time.Duration) error { - return waitForMongoDBCondition(mdb, retryInterval, timeout, func(db mdbv1.MongoDB) bool { - t.Logf("current phase: %s, waiting for phase: %s", db.Status.Phase, phase) - return db.Status.Phase == phase - }) +func NewTestTLSConfig(optional bool) mdbv1.TLS { + return mdbv1.TLS{ + Enabled: true, + Optional: optional, + CertificateKeySecret: corev1.LocalObjectReference{ + Name: "tls-certificate", + }, + CaCertificateSecret: &corev1.LocalObjectReference{ + Name: "tls-ca-key-pair", + }, + } } -// waitForMongoDBCondition polls and waits for a given condition to be true -func waitForMongoDBCondition(mdb *mdbv1.MongoDB, retryInterval, timeout time.Duration, condition func(mdbv1.MongoDB) bool) error { - mdbNew := mdbv1.MongoDB{} - return wait.Poll(retryInterval, timeout, func() (done bool, err error) { - err = f.Global.Client.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: f.Global.OperatorNamespace}, &mdbNew) - if err != nil { - return false, err +func NewPrometheusConfig(ctx context.Context, namespace string) *mdbv1.Prometheus { + sec := secret.Builder(). + SetName("prom-secret"). + SetNamespace(namespace). + SetField("password", "prom-password"). + Build() + err := TestClient.Create(ctx, &sec, &CleanupOptions{}) + if err != nil { + if !apiErrors.IsAlreadyExists(err) { + panic(fmt.Sprintf("Error trying to create secret: %s", err)) } - ready := condition(mdbNew) - return ready, nil - }) -} + } -// WaitForStatefulSetToExist waits until a StatefulSet of the given name exists -// using the provided retryInterval and timeout -func WaitForStatefulSetToExist(stsName string, retryInterval, timeout time.Duration) (appsv1.StatefulSet, error) { - sts := appsv1.StatefulSet{} - return sts, waitForRuntimeObjectToExist(stsName, retryInterval, timeout, &sts) + return &mdbv1.Prometheus{ + Username: "prom-user", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "prom-secret", + }, + } } -// WaitForStatefulSetToBeReady waits until all replicas of the StatefulSet with the given name -// have reached the ready status -func WaitForStatefulSetToBeReady(t *testing.T, mdb *mdbv1.MongoDB, retryInterval, timeout time.Duration) error { - return waitForStatefulSetCondition(t, mdb, retryInterval, timeout, func(sts appsv1.StatefulSet) bool { - return sts.Status.ReadyReplicas == int32(mdb.Spec.Members) - }) -} +func ensureObject(ctx *TestContext, obj k8sClient.Object) error { + key := k8sClient.ObjectKeyFromObject(obj) + obj.SetLabels(TestLabels()) -func waitForStatefulSetCondition(t *testing.T, mdb *mdbv1.MongoDB, retryInterval, timeout time.Duration, condition func(set appsv1.StatefulSet) bool) error { - _, err := WaitForStatefulSetToExist(mdb.Name, retryInterval, timeout) + err := TestClient.Get(ctx.Ctx, key, obj) if err != nil { - return fmt.Errorf("error waiting for stateful set to be created: %s", err) - } - - sts := appsv1.StatefulSet{} - return wait.Poll(retryInterval, timeout, func() (done bool, err error) { - err = f.Global.Client.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: f.Global.OperatorNamespace}, &sts) + if !apiErrors.IsNotFound(err) { + return err + } + err = TestClient.Create(ctx.Ctx, obj, &CleanupOptions{TestContext: ctx}) if err != nil { - return false, err + return err } - t.Logf("Waiting for %s to have %d replicas. Current ready replicas: %d\n", mdb.Name, mdb.Spec.Members, sts.Status.ReadyReplicas) - ready := condition(sts) - return ready, nil - }) -} - -// waitForRuntimeObjectToExist waits until a runtime.Object of the given name exists -// using the provided retryInterval and timeout provided. -func waitForRuntimeObjectToExist(name string, retryInterval, timeout time.Duration, obj runtime.Object) error { - return wait.Poll(retryInterval, timeout, func() (done bool, err error) { - err = f.Global.Client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: f.Global.OperatorNamespace}, obj) + } else { + fmt.Printf("%s %s/%s already exists!\n", reflect.TypeOf(obj), key.Namespace, key.Name) + err = TestClient.Update(ctx.Ctx, obj) if err != nil { - return false, client.IgnoreNotFound(err) + return err } - return true, nil - }) + } + return nil } -func NewTestMongoDB() mdbv1.MongoDB { - return mdbv1.MongoDB{ +// EnsureNamespace checks that the given namespace exists and creates it if not. +func EnsureNamespace(ctx *TestContext, namespace string) error { + return ensureObject(ctx, &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: "example-mongodb", - Namespace: f.Global.OperatorNamespace, - }, - Spec: mdbv1.MongoDBSpec{ - Members: 3, - Type: "ReplicaSet", - Version: "4.0.6", - FeatureCompatibilityVersion: "4.0", + Name: namespace, + Labels: TestLabels(), }, - } + }) } diff --git a/test/e2e/feature_compatibility_version/feature_compatibility_version_test.go b/test/e2e/feature_compatibility_version/feature_compatibility_version_test.go new file mode 100644 index 000000000..2cc2db6d9 --- /dev/null +++ b/test/e2e/feature_compatibility_version/feature_compatibility_version_test.go @@ -0,0 +1,90 @@ +package feature_compatibility_version + +import ( + "context" + "fmt" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + "github.com/stretchr/testify/assert" + "os" + "testing" + "time" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +// TestFeatureCompatibilityVersion test different scenarios of upgrading both FCV and image version. Note, that +// 4.4 images are the most convenient for this test as they support both FCV 4.2 and 4.4 and the underlying storage +// format remains the same. Versions 5 and 6 are one way upgrade only. +// See: https://www.mongodb.com/docs/manual/reference/command/setFeatureCompatibilityVersion/ +func TestFeatureCompatibilityVersion(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + // This is the lowest version available for the official images + const lowestMDBVersion = "4.4.16" + const highestMDBVersion = "4.4.19" + const featureCompatibility = "4.2" + const upgradedFeatureCompatibility = "4.4" + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + mdb.Spec.Version = lowestMDBVersion + mdb.Spec.FeatureCompatibilityVersion = featureCompatibility + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run(fmt.Sprintf("Test FeatureCompatibilityVersion is %s", featureCompatibility), tester.HasFCV(featureCompatibility, 3)) + + // Upgrade while keeping the Feature Compatibility intact + t.Run("MongoDB is reachable while version is upgraded", func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*20)() + t.Run("Test Version can be upgraded", mongodbtests.ChangeVersion(ctx, &mdb, highestMDBVersion)) + t.Run("Stateful Set Reaches Ready State, after Upgrading", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + }) + + t.Run("Test Basic Connectivity after upgrade has completed", tester.ConnectivitySucceeds()) + t.Run(fmt.Sprintf("Test FeatureCompatibilityVersion, after upgrade, is %s", featureCompatibility), tester.HasFCV(featureCompatibility, 3)) + + // Downgrade while keeping the Feature Compatibility intact + t.Run("MongoDB is reachable while version is downgraded", func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() + t.Run("Test Version can be downgraded", mongodbtests.ChangeVersion(ctx, &mdb, lowestMDBVersion)) + t.Run("Stateful Set Reaches Ready State, after Upgrading", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + }) + + t.Run(fmt.Sprintf("Test FeatureCompatibilityVersion, after downgrade, is %s", featureCompatibility), tester.HasFCV(featureCompatibility, 3)) + + // Upgrade the Feature Compatibility keeping the MongoDB version the same + t.Run("Test FeatureCompatibilityVersion can be upgraded", func(t *testing.T) { + err := e2eutil.UpdateMongoDBResource(ctx, &mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.FeatureCompatibilityVersion = upgradedFeatureCompatibility + }) + assert.NoError(t, err) + t.Run("Stateful Set Reaches Ready State, after Upgrading FeatureCompatibilityVersion", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase, after Upgrading FeatureCompatibilityVersion", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + }) + + t.Run(fmt.Sprintf("Test FeatureCompatibilityVersion, after upgrading FeatureCompatibilityVersion, is %s", upgradedFeatureCompatibility), tester.HasFCV(upgradedFeatureCompatibility, 10)) +} diff --git a/test/e2e/mongodbtests/mongodbtests.go b/test/e2e/mongodbtests/mongodbtests.go index acf72e17d..a7bbf30df 100644 --- a/test/e2e/mongodbtests/mongodbtests.go +++ b/test/e2e/mongodbtests/mongodbtests.go @@ -2,31 +2,218 @@ package mongodbtests import ( "context" + "encoding/json" "fmt" + "sort" + "strings" "testing" "time" - "k8s.io/apimachinery/pkg/types" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + + "sigs.k8s.io/controller-runtime/pkg/client" - "k8s.io/apimachinery/pkg/util/wait" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/wait" - mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/controller/mongodb" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" - f "github.com/operator-framework/operator-sdk/pkg/test" "github.com/stretchr/testify/assert" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/mongo" - "go.mongodb.org/mongo-driver/mongo/options" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" ) -// StatefulSetIsReady ensures that the underlying stateful set -// reaches the running state -func StatefulSetIsReady(mdb *mdbv1.MongoDB) func(t *testing.T) { +// SkipTestIfLocal skips tests locally which tests connectivity to mongodb pods +func SkipTestIfLocal(t *testing.T, msg string, f func(t *testing.T)) { + if testing.Short() { + t.Log("Skipping [" + msg + "]") + return + } + t.Run(msg, f) +} + +// StatefulSetBecomesReady ensures that the underlying stateful set +// reaches the running state. +func StatefulSetBecomesReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { + defaultOpts := []wait.Configuration{ + wait.RetryInterval(time.Second * 15), + wait.Timeout(time.Minute * 25), + } + defaultOpts = append(defaultOpts, opts...) + return statefulSetIsReady(ctx, mdb, defaultOpts...) +} + +// ArbitersStatefulSetBecomesReady ensures that the underlying stateful set +// reaches the running state. +func ArbitersStatefulSetBecomesReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { + defaultOpts := []wait.Configuration{ + wait.RetryInterval(time.Second * 15), + wait.Timeout(time.Minute * 20), + } + defaultOpts = append(defaultOpts, opts...) + return arbitersStatefulSetIsReady(ctx, mdb, defaultOpts...) +} + +// StatefulSetBecomesUnready ensures the underlying stateful set reaches +// the unready state. +func StatefulSetBecomesUnready(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { + defaultOpts := []wait.Configuration{ + wait.RetryInterval(time.Second * 15), + wait.Timeout(time.Minute * 15), + } + defaultOpts = append(defaultOpts, opts...) + return statefulSetIsNotReady(ctx, mdb, defaultOpts...) +} + +// StatefulSetIsReadyAfterScaleDown ensures that a replica set is scaled down correctly +// note: scaling down takes considerably longer than scaling up due the readiness probe +// failure threshold being high +func StatefulSetIsReadyAfterScaleDown(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { + return func(t *testing.T) { + err := wait.ForStatefulSetToBeReadyAfterScaleDown(ctx, t, mdb, wait.RetryInterval(time.Second*60), wait.Timeout(time.Minute*45)) + if err != nil { + t.Fatal(err) + } + t.Logf("StatefulSet %s/%s is ready!", mdb.Namespace, mdb.Name) + } +} + +// statefulSetIsReady ensures that the underlying stateful set +// reaches the running state. +func statefulSetIsReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { + return func(t *testing.T) { + start := time.Now() + err := wait.ForStatefulSetToBeReady(ctx, t, mdb, opts...) + if err != nil { + t.Fatal(err) + } + elapsed := time.Since(start).Seconds() + t.Logf("StatefulSet %s/%s is ready! It took %f seconds", mdb.Namespace, mdb.Name, elapsed) + } +} + +// arbitersStatefulSetIsReady ensures that the underlying stateful set +// reaches the running state. +func arbitersStatefulSetIsReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { + return func(t *testing.T) { + err := wait.ForArbitersStatefulSetToBeReady(ctx, t, mdb, opts...) + if err != nil { + t.Fatal(err) + } + t.Logf("Arbiters StatefulSet %s/%s is ready!", mdb.Namespace, mdb.Name) + } +} + +// statefulSetIsNotReady ensures that the underlying stateful set reaches the unready state. +func statefulSetIsNotReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...wait.Configuration) func(t *testing.T) { + return func(t *testing.T) { + err := wait.ForStatefulSetToBeUnready(ctx, t, mdb, opts...) + if err != nil { + t.Fatal(err) + } + t.Logf("StatefulSet %s/%s is not ready!", mdb.Namespace, mdb.Name) + } +} + +func StatefulSetHasOwnerReference(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedOwnerReference metav1.OwnerReference) func(t *testing.T) { + return func(t *testing.T) { + stsNamespacedName := types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace} + sts := appsv1.StatefulSet{} + err := e2eutil.TestClient.Get(ctx, stsNamespacedName, &sts) + + if err != nil { + t.Fatal(err) + } + assertEqualOwnerReference(t, "StatefulSet", stsNamespacedName, sts.GetOwnerReferences(), expectedOwnerReference) + } +} + +// StatefulSetIsDeleted ensures that the underlying stateful set is deleted +func StatefulSetIsDeleted(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { + return func(t *testing.T) { + err := wait.ForStatefulSetToBeDeleted(ctx, mdb.Name, time.Second*10, time.Minute*1, mdb.Namespace) + if err != nil { + t.Fatal(err) + } + } +} + +func ServiceHasOwnerReference(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedOwnerReference metav1.OwnerReference) func(t *testing.T) { + return func(t *testing.T) { + serviceNamespacedName := types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace} + srv := corev1.Service{} + err := e2eutil.TestClient.Get(ctx, serviceNamespacedName, &srv) + if err != nil { + t.Fatal(err) + } + assertEqualOwnerReference(t, "Service", serviceNamespacedName, srv.GetOwnerReferences(), expectedOwnerReference) + } +} + +func ServiceUsesCorrectPort(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedPort int32) func(t *testing.T) { + return func(t *testing.T) { + serviceNamespacedName := types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace} + svc := corev1.Service{} + err := e2eutil.TestClient.Get(ctx, serviceNamespacedName, &svc) + if err != nil { + t.Fatal(err) + } + assert.Len(t, svc.Spec.Ports, 1) + assert.Equal(t, svc.Spec.Ports[0].Port, expectedPort) + } +} + +func AgentX509SecretsExists(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { + return func(t *testing.T) { + agentCertSecret := corev1.Secret{} + err := e2eutil.TestClient.Get(ctx, mdb.AgentCertificateSecretNamespacedName(), &agentCertSecret) + assert.NoError(t, err) + + agentCertPemSecret := corev1.Secret{} + err = e2eutil.TestClient.Get(ctx, mdb.AgentCertificatePemSecretNamespacedName(), &agentCertPemSecret) + assert.NoError(t, err) + } +} + +func AgentSecretsHaveOwnerReference(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedOwnerReference metav1.OwnerReference) func(t *testing.T) { + checkSecret := func(t *testing.T, resourceNamespacedName types.NamespacedName) { + secret := corev1.Secret{} + err := e2eutil.TestClient.Get(ctx, resourceNamespacedName, &secret) + + assert.NoError(t, err) + assertEqualOwnerReference(t, "Secret", resourceNamespacedName, secret.GetOwnerReferences(), expectedOwnerReference) + } + + return func(t *testing.T) { + checkSecret(t, mdb.GetAgentPasswordSecretNamespacedName()) + checkSecret(t, mdb.GetAgentKeyfileSecretNamespacedName()) + } +} + +// ConnectionStringSecretsAreConfigured verifies that secrets storing the connection string were generated for all scram users +// and that they have the expected owner reference +func ConnectionStringSecretsAreConfigured(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedOwnerReference metav1.OwnerReference) func(t *testing.T) { + return func(t *testing.T) { + for _, user := range mdb.GetAuthUsers() { + secret := corev1.Secret{} + secretNamespacedName := types.NamespacedName{Name: user.ConnectionStringSecretName, Namespace: mdb.Namespace} + err := e2eutil.TestClient.Get(ctx, secretNamespacedName, &secret) + + assert.NoError(t, err) + assertEqualOwnerReference(t, "Secret", secretNamespacedName, secret.GetOwnerReferences(), expectedOwnerReference) + } + } +} + +// StatefulSetHasUpdateStrategy verifies that the StatefulSet holding this MongoDB +// resource has the correct Update Strategy +func StatefulSetHasUpdateStrategy(ctx context.Context, mdb *mdbv1.MongoDBCommunity, strategy appsv1.StatefulSetUpdateStrategyType) func(t *testing.T) { return func(t *testing.T) { - err := e2eutil.WaitForStatefulSetToBeReady(t, mdb, time.Second*15, time.Minute*5) + err := wait.ForStatefulSetToHaveUpdateStrategy(ctx, t, mdb, strategy, wait.RetryInterval(time.Second*15), wait.Timeout(time.Minute*8)) if err != nil { t.Fatal(err) } @@ -34,10 +221,120 @@ func StatefulSetIsReady(mdb *mdbv1.MongoDB) func(t *testing.T) { } } +// GetPersistentVolumes returns all persistent volumes on the cluster +func getPersistentVolumesList(ctx context.Context) (*corev1.PersistentVolumeList, error) { + return e2eutil.TestClient.CoreV1Client.PersistentVolumes().List(ctx, metav1.ListOptions{}) +} + +func containsVolume(volumes []corev1.PersistentVolume, volumeName string) bool { + for _, v := range volumes { + if v.Name == volumeName { + return true + } + } + return false +} + +func HasExpectedPersistentVolumes(ctx context.Context, volumes []corev1.PersistentVolume) func(t *testing.T) { + return func(t *testing.T) { + volumeList, err := getPersistentVolumesList(ctx) + actualVolumes := volumeList.Items + assert.NoError(t, err) + assert.Len(t, actualVolumes, len(volumes), + "The number of persistent volumes should be equal to the amount of volumes we created. Expected: %d, actual: %d", + len(volumes), len(actualVolumes)) + for _, v := range volumes { + assert.True(t, containsVolume(actualVolumes, v.Name)) + } + } +} +func HasExpectedMetadata(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedLabels map[string]string, expectedAnnotations map[string]string) func(t *testing.T) { + return func(t *testing.T) { + namespace := mdb.Namespace + + statefulSetList := appsv1.StatefulSetList{} + err := e2eutil.TestClient.Client.List(ctx, &statefulSetList, client.InNamespace(namespace)) + assert.NoError(t, err) + assert.NotEmpty(t, statefulSetList.Items) + for _, s := range statefulSetList.Items { + containsMetadata(t, s.ObjectMeta, expectedLabels, expectedAnnotations, "statefulset "+s.Name) + } + + volumeList := corev1.PersistentVolumeList{} + err = e2eutil.TestClient.Client.List(ctx, &volumeList, client.InNamespace(namespace)) + assert.NoError(t, err) + assert.NotEmpty(t, volumeList.Items) + for _, s := range volumeList.Items { + volName := s.Name + if strings.HasPrefix(volName, "data-volume-") || strings.HasPrefix(volName, "logs-volume-") { + containsMetadata(t, s.ObjectMeta, expectedLabels, expectedAnnotations, "volume "+volName) + } + } + + podList := corev1.PodList{} + err = e2eutil.TestClient.Client.List(ctx, &podList, client.InNamespace(namespace)) + assert.NoError(t, err) + assert.NotEmpty(t, podList.Items) + + for _, s := range podList.Items { + // only consider stateful-sets (as opposite to the controller replica set) + for _, owner := range s.OwnerReferences { + if owner.Kind == "ReplicaSet" { + continue + } + } + // Ignore non-owned pods + if len(s.OwnerReferences) == 0 { + continue + } + + // Ensure we are considering pods owned by a stateful set + hasStatefulSetOwner := false + for _, owner := range s.OwnerReferences { + if owner.Kind == "StatefulSet" { + hasStatefulSetOwner = true + } + } + if !hasStatefulSetOwner { + continue + } + + containsMetadata(t, s.ObjectMeta, expectedLabels, expectedAnnotations, "pod "+s.Name) + } + } +} + +func containsMetadata(t *testing.T, metadata metav1.ObjectMeta, expectedLabels map[string]string, expectedAnnotations map[string]string, msg string) { + labels := metadata.Labels + for k, v := range expectedLabels { + assert.Contains(t, labels, k, msg+" has label "+k) + value := labels[k] + assert.Equal(t, v, value, msg+" has label "+k+" with value "+v) + } + + annotations := metadata.Annotations + for k, v := range expectedAnnotations { + assert.Contains(t, annotations, k, msg+" has annotation "+k) + value := annotations[k] + assert.Equal(t, v, value, msg+" has annotation "+k+" with value "+v) + } +} + +// MongoDBReachesPendingPhase ensures the MongoDB resources gets to the Pending phase +func MongoDBReachesPendingPhase(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { + return func(t *testing.T) { + err := wait.ForMongoDBToReachPhase(ctx, t, mdb, mdbv1.Pending, time.Second*15, time.Minute*2) + if err != nil { + t.Fatal(err) + } + t.Logf("MongoDB %s/%s is Pending!", mdb.Namespace, mdb.Name) + } +} + // MongoDBReachesRunningPhase ensure the MongoDB resource reaches the Running phase -func MongoDBReachesRunningPhase(mdb *mdbv1.MongoDB) func(t *testing.T) { +func MongoDBReachesRunningPhase(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { return func(t *testing.T) { - err := e2eutil.WaitForMongoDBToReachPhase(t, mdb, mdbv1.Running, time.Second*15, time.Minute*5) + err := wait.ForMongoDBToReachPhase(ctx, t, mdb, mdbv1.Running, time.Second*15, time.Minute*12) if err != nil { t.Fatal(err) } @@ -45,38 +342,205 @@ func MongoDBReachesRunningPhase(mdb *mdbv1.MongoDB) func(t *testing.T) { } } -func AutomationConfigConfigMapExists(mdb *mdbv1.MongoDB) func(t *testing.T) { +// MongoDBReachesFailedPhase ensure the MongoDB resource reaches the Failed phase. +func MongoDBReachesFailedPhase(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { + return func(t *testing.T) { + err := wait.ForMongoDBToReachPhase(ctx, t, mdb, mdbv1.Failed, time.Second*15, time.Minute*5) + if err != nil { + t.Fatal(err) + } + t.Logf("MongoDB %s/%s is in Failed state!", mdb.Namespace, mdb.Name) + } +} + +func AutomationConfigSecretExists(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { return func(t *testing.T) { - cm, err := e2eutil.WaitForConfigMapToExist(mdb.ConfigMapName(), time.Second*5, time.Minute*1) + s, err := wait.ForSecretToExist(ctx, mdb.AutomationConfigSecretName(), time.Second*5, time.Minute*1, mdb.Namespace) assert.NoError(t, err) - t.Logf("ConfigMap %s/%s was successfully created", mdb.ConfigMapName(), mdb.Namespace) - assert.Contains(t, cm.Data, mongodb.AutomationConfigKey) + t.Logf("Secret %s/%s was successfully created", mdb.Namespace, mdb.AutomationConfigSecretName()) + assert.Contains(t, s.Data, automationconfig.ConfigKey) - t.Log("The ConfigMap contained the automation config") + t.Log("The Secret contained the automation config") + } +} + +func getAutomationConfig(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity) automationconfig.AutomationConfig { + currentSecret := corev1.Secret{} + currentAc := automationconfig.AutomationConfig{} + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, ¤tSecret) + assert.NoError(t, err) + err = json.Unmarshal(currentSecret.Data[automationconfig.ConfigKey], ¤tAc) + assert.NoError(t, err) + return currentAc +} + +// AutomationConfigVersionHasTheExpectedVersion verifies that the automation config has the expected version. +func AutomationConfigVersionHasTheExpectedVersion(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedVersion int) func(t *testing.T) { + return func(t *testing.T) { + currentAc := getAutomationConfig(ctx, t, mdb) + assert.Equal(t, expectedVersion, currentAc.Version) + } +} + +// AutomationConfigHasLogRotationConfig verifies that the automation config contains the given logRotate config. +func AutomationConfigHasLogRotationConfig(ctx context.Context, mdb *mdbv1.MongoDBCommunity, lrc *automationconfig.CrdLogRotate) func(t *testing.T) { + return func(t *testing.T) { + currentAc := getAutomationConfig(ctx, t, mdb) + for _, p := range currentAc.Processes { + assert.Equal(t, automationconfig.ConvertCrdLogRotateToAC(lrc), p.LogRotate) + } + } +} + +func AutomationConfigHasSettings(ctx context.Context, mdb *mdbv1.MongoDBCommunity, settings map[string]interface{}) func(t *testing.T) { + return func(t *testing.T) { + currentAc := getAutomationConfig(ctx, t, mdb) + assert.Equal(t, currentAc.ReplicaSets[0].Settings, settings) + } +} + +// AutomationConfigReplicaSetsHaveExpectedArbiters verifies that the automation config has the expected version. +func AutomationConfigReplicaSetsHaveExpectedArbiters(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedArbiters int) func(t *testing.T) { + return func(t *testing.T) { + currentAc := getAutomationConfig(ctx, t, mdb) + lsRs := currentAc.ReplicaSets + for _, rs := range lsRs { + arbiters := 0 + for _, rsMember := range rs.Members { + if rsMember.ArbiterOnly { + arbiters += 1 + } + } + assert.Equal(t, expectedArbiters, arbiters) + } + } +} + +// AutomationConfigHasTheExpectedCustomRoles verifies that the automation config has the expected custom roles. +func AutomationConfigHasTheExpectedCustomRoles(ctx context.Context, mdb *mdbv1.MongoDBCommunity, roles []automationconfig.CustomRole) func(t *testing.T) { + return func(t *testing.T) { + currentAc := getAutomationConfig(ctx, t, mdb) + assert.ElementsMatch(t, roles, currentAc.Roles) + } +} + +func AutomationConfigHasVoteTagPriorityConfigured(ctx context.Context, mdb *mdbv1.MongoDBCommunity, memberOptions []automationconfig.MemberOptions) func(t *testing.T) { + acMemberOptions := make([]automationconfig.MemberOptions, 0) + + return func(t *testing.T) { + currentAc := getAutomationConfig(ctx, t, mdb) + rsMembers := currentAc.ReplicaSets + sort.Slice(rsMembers[0].Members, func(i, j int) bool { + return rsMembers[0].Members[i].Id < rsMembers[0].Members[j].Id + }) + + for _, m := range rsMembers[0].Members { + acMemberOptions = append(acMemberOptions, automationconfig.MemberOptions{Votes: m.Votes, Priority: floatPtrTostringPtr(m.Priority), Tags: m.Tags}) + } + assert.ElementsMatch(t, memberOptions, acMemberOptions) } } // CreateMongoDBResource creates the MongoDB resource -func CreateMongoDBResource(mdb *mdbv1.MongoDB, ctx *f.TestCtx) func(*testing.T) { +func CreateMongoDBResource(mdb *mdbv1.MongoDBCommunity, textCtx *e2eutil.TestContext) func(*testing.T) { return func(t *testing.T) { - if err := f.Global.Client.Create(context.TODO(), mdb, &f.CleanupOptions{TestContext: ctx}); err != nil { + if err := e2eutil.TestClient.Create(textCtx.Ctx, mdb, &e2eutil.CleanupOptions{TestContext: textCtx}); err != nil { t.Fatal(err) } t.Logf("Created MongoDB resource %s/%s", mdb.Name, mdb.Namespace) } } -// DeletePod will delete a pod that belongs to this MongoDB resource's StatefulSet -func DeletePod(mdb *mdbv1.MongoDB, podNum int) func(*testing.T) { +// DeleteMongoDBResource deletes the MongoDB resource +func DeleteMongoDBResource(mdb *mdbv1.MongoDBCommunity, testCtx *e2eutil.TestContext) func(*testing.T) { return func(t *testing.T) { - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%d", mdb.Name, podNum), - Namespace: mdb.Namespace, - }, + if err := e2eutil.TestClient.Delete(testCtx.Ctx, mdb); err != nil { + t.Fatal(err) + } + t.Logf("Deleted MongoDB resource %s/%s", mdb.Name, mdb.Namespace) + } +} + +// GetConnectionStringSecret returnes the secret generated by the operator that is storing the connection string for a specific user +func GetConnectionStringSecret(ctx context.Context, mdb mdbv1.MongoDBCommunity, user authtypes.User) corev1.Secret { + secret := corev1.Secret{} + secretNamespacedName := types.NamespacedName{Name: user.ConnectionStringSecretName, Namespace: mdb.Namespace} + _ = e2eutil.TestClient.Get(ctx, secretNamespacedName, &secret) + return secret +} + +// GetConnectionStringForUser returns the mongodb standard connection string for a user +func GetConnectionStringForUser(ctx context.Context, mdb mdbv1.MongoDBCommunity, user authtypes.User) string { + return string(GetConnectionStringSecret(ctx, mdb, user).Data["connectionString.standard"]) +} + +// GetSrvConnectionStringForUser returns the mongodb service connection string for a user +func GetSrvConnectionStringForUser(ctx context.Context, mdb mdbv1.MongoDBCommunity, user authtypes.User) string { + return string(GetConnectionStringSecret(ctx, mdb, user).Data["connectionString.standardSrv"]) +} + +func getOwnerReference(mdb *mdbv1.MongoDBCommunity) metav1.OwnerReference { + return *metav1.NewControllerRef(mdb, schema.GroupVersionKind{ + Group: mdbv1.GroupVersion.Group, + Version: mdbv1.GroupVersion.Version, + Kind: mdb.Kind, + }) +} + +func BasicFunctionality(ctx context.Context, mdb *mdbv1.MongoDBCommunity, skipStatusCheck ...bool) func(*testing.T) { + return func(t *testing.T) { + mdbOwnerReference := getOwnerReference(mdb) + t.Run("Secret Was Correctly Created", AutomationConfigSecretExists(ctx, mdb)) + t.Run("Stateful Set Reaches Ready State", StatefulSetBecomesReady(ctx, mdb)) + t.Run("MongoDB Reaches Running Phase", MongoDBReachesRunningPhase(ctx, mdb)) + t.Run("Stateful Set Has OwnerReference", StatefulSetHasOwnerReference(ctx, mdb, mdbOwnerReference)) + t.Run("Service Set Has OwnerReference", ServiceHasOwnerReference(ctx, mdb, mdbOwnerReference)) + t.Run("Agent Secrets Have OwnerReference", AgentSecretsHaveOwnerReference(ctx, mdb, mdbOwnerReference)) + t.Run("Connection string secrets are configured", ConnectionStringSecretsAreConfigured(ctx, mdb, mdbOwnerReference)) + // TODO: this is temporary, remove the need for skipStatuscheck after 0.7.4 operator release + if len(skipStatusCheck) > 0 && !skipStatusCheck[0] { + t.Run("Test Status Was Updated", Status(ctx, mdb, mdbv1.MongoDBCommunityStatus{ + MongoURI: mdb.MongoURI(""), + Phase: mdbv1.Running, + Version: mdb.GetMongoDBVersion(), + CurrentMongoDBMembers: mdb.Spec.Members, + CurrentStatefulSetReplicas: mdb.Spec.Members, + })) + } + } +} + +func BasicFunctionalityX509(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { + return func(t *testing.T) { + mdbOwnerReference := getOwnerReference(mdb) + t.Run("Secret Was Correctly Created", AutomationConfigSecretExists(ctx, mdb)) + t.Run("Stateful Set Reaches Ready State", StatefulSetBecomesReady(ctx, mdb)) + t.Run("MongoDB Reaches Running Phase", MongoDBReachesRunningPhase(ctx, mdb)) + t.Run("Stateful Set Has OwnerReference", StatefulSetHasOwnerReference(ctx, mdb, mdbOwnerReference)) + t.Run("Service Set Has OwnerReference", ServiceHasOwnerReference(ctx, mdb, mdbOwnerReference)) + t.Run("Connection string secrets are configured", ConnectionStringSecretsAreConfigured(ctx, mdb, mdbOwnerReference)) + } +} + +// ServiceWithNameExists checks whether a service with the name serviceName exists +func ServiceWithNameExists(ctx context.Context, serviceName string, namespace string) func(t *testing.T) { + return func(t *testing.T) { + serviceNamespacedName := types.NamespacedName{Name: serviceName, Namespace: namespace} + srv := corev1.Service{} + err := e2eutil.TestClient.Get(ctx, serviceNamespacedName, &srv) + if err != nil { + t.Fatal(err) } - if err := f.Global.Client.Delete(context.TODO(), &pod); err != nil { + t.Logf("Service with name %s exists", serviceName) + } +} + +// DeletePod will delete a pod that belongs to this MongoDB resource's StatefulSet +func DeletePod(ctx context.Context, mdb *mdbv1.MongoDBCommunity, podNum int) func(*testing.T) { + return func(t *testing.T) { + pod := podFromMongoDBCommunity(mdb, podNum) + if err := e2eutil.TestClient.Delete(ctx, &pod); err != nil { t.Fatal(err) } @@ -84,31 +548,38 @@ func DeletePod(mdb *mdbv1.MongoDB, podNum int) func(*testing.T) { } } -// BasicConnectivity returns a test function which performs -// a basic MongoDB connectivity test -func BasicConnectivity(mdb *mdbv1.MongoDB) func(t *testing.T) { +// DeleteStatefulSet provides a wrapper to delete appsv1.StatefulSet types +func DeleteStatefulSet(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { return func(t *testing.T) { - if err := Connect(mdb); err != nil { - t.Fatal(fmt.Sprintf("Error connecting to MongoDB deployment: %+v", err)) + sts := appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: mdb.Name, + Namespace: mdb.Namespace, + }, + } + if err := e2eutil.TestClient.Delete(ctx, &sts); err != nil { + t.Fatal(err) } + + t.Logf("StatefulSet %s/%s deleted", sts.ObjectMeta.Namespace, sts.ObjectMeta.Name) } } // Status compares the given status to the actual status of the MongoDB resource -func Status(mdb *mdbv1.MongoDB, expectedStatus mdbv1.MongoDBStatus) func(t *testing.T) { +func Status(ctx context.Context, mdb *mdbv1.MongoDBCommunity, expectedStatus mdbv1.MongoDBCommunityStatus) func(t *testing.T) { return func(t *testing.T) { - if err := f.Global.Client.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, mdb); err != nil { - t.Fatal(fmt.Errorf("error getting MongoDB resource: %+v", err)) + if err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, mdb); err != nil { + t.Fatalf("error getting MongoDB resource: %s", err) } assert.Equal(t, expectedStatus, mdb.Status) } } -// Scale update the MongoDB with a new number of members and updates the resource -func Scale(mdb *mdbv1.MongoDB, newMembers int) func(*testing.T) { +// Scale update the MongoDB with a new number of members and updates the resource. +func Scale(ctx context.Context, mdb *mdbv1.MongoDBCommunity, newMembers int) func(*testing.T) { return func(t *testing.T) { t.Logf("Scaling Mongodb %s, to %d members", mdb.Name, newMembers) - err := e2eutil.UpdateMongoDBResource(mdb, func(db *mdbv1.MongoDB) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { db.Spec.Members = newMembers }) if err != nil { @@ -117,50 +588,246 @@ func Scale(mdb *mdbv1.MongoDB, newMembers int) func(*testing.T) { } } -// Connect performs a connectivity check by initializing a mongo client -// and inserting a document into the MongoDB resource -func Connect(mdb *mdbv1.MongoDB) error { - ctx, _ := context.WithTimeout(context.Background(), 10*time.Minute) - mongoClient, err := mongo.Connect(ctx, options.Client().ApplyURI(mdb.MongoURI())) - if err != nil { - return err +// ScaleArbiters update the MongoDB with a new number of arbiters and updates the resource. +func ScaleArbiters(ctx context.Context, mdb *mdbv1.MongoDBCommunity, newArbiters int) func(*testing.T) { + return func(t *testing.T) { + t.Logf("Scaling Mongodb %s, to %d members", mdb.Name, newArbiters) + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.Arbiters = newArbiters + }) + if err != nil { + t.Fatal(err) + } } +} - return wait.Poll(time.Second*1, time.Second*30, func() (done bool, err error) { - collection := mongoClient.Database("testing").Collection("numbers") - _, err = collection.InsertOne(ctx, bson.M{"name": "pi", "value": 3.14159}) +// DisableTLS changes the tls.enabled attribute to false. +func DisableTLS(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { + return tls(ctx, mdb, false) +} + +// EnableTLS changes the tls.enabled attribute to true. +func EnableTLS(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { + return tls(ctx, mdb, true) +} + +// tls function configures the security.tls.enabled attribute. +func tls(ctx context.Context, mdb *mdbv1.MongoDBCommunity, enabled bool) func(*testing.T) { + return func(t *testing.T) { + t.Logf("Setting security.tls.enabled to %t", enabled) + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.Security.TLS.Enabled = enabled + }) if err != nil { - return false, nil + t.Fatal(err) } - return true, nil - }) + } } -// IsReachableDuring periodically tests connectivity to the provided MongoDB resource -// during execution of the provided functions. This function can be used to ensure -// The MongoDB is up throughout the test. -func IsReachableDuring(mdb *mdbv1.MongoDB, interval time.Duration, testFunc func()) func(*testing.T) { - return func(t *testing.T) { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - // start a go routine which will periodically check basic MongoDB connectivity - // once all the test functions have been executed, the go routine will be cancelled - go func() { - for { - select { - case <-ctx.Done(): - t.Logf("context cancelled, no longer checking connectivity") - return - case <-time.After(interval): - if err := Connect(mdb); err != nil { - t.Fatal(fmt.Sprintf("error reaching MongoDB deployment: %+v", err)) - } else { - t.Logf("Successfully connected to %s", mdb.Name) - } - } - } - }() - testFunc() +func ChangeVersion(ctx context.Context, mdb *mdbv1.MongoDBCommunity, newVersion string) func(*testing.T) { + return func(t *testing.T) { + t.Logf("Changing versions from: %s to %s", mdb.Spec.Version, newVersion) + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.Version = newVersion + }) + if err != nil { + t.Fatal(err) + } + } +} + +func ChangePort(ctx context.Context, mdb *mdbv1.MongoDBCommunity, newPort int) func(*testing.T) { + return func(t *testing.T) { + t.Logf("Changing port from: %d to %d", mdb.GetMongodConfiguration().GetDBPort(), newPort) + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.AdditionalMongodConfig.SetDBPort(newPort) + }) + if err != nil { + t.Fatal(err) + } + } +} + +func AddConnectionStringOption(ctx context.Context, mdb *mdbv1.MongoDBCommunity, key string, value interface{}) func(t *testing.T) { + return func(t *testing.T) { + t.Logf("Adding %s:%v to connection string", key, value) + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.AdditionalConnectionStringConfig.SetOption(key, value) + }) + if err != nil { + t.Fatal(err) + } + } +} + +func ResetConnectionStringOptions(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(t *testing.T) { + return func(t *testing.T) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.AdditionalConnectionStringConfig = mdbv1.NewMapWrapper() + db.Spec.Users[0].AdditionalConnectionStringConfig = mdbv1.NewMapWrapper() + }) + if err != nil { + t.Fatal(err) + } + } +} + +func AddConnectionStringOptionToUser(ctx context.Context, mdb *mdbv1.MongoDBCommunity, key string, value interface{}) func(t *testing.T) { + return func(t *testing.T) { + t.Logf("Adding %s:%v to connection string to first user", key, value) + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.Users[0].AdditionalConnectionStringConfig.SetOption(key, value) + }) + if err != nil { + t.Fatal(err) + } + } +} + +func StatefulSetContainerConditionIsTrue(ctx context.Context, mdb *mdbv1.MongoDBCommunity, containerName string, condition func(c corev1.Container) bool) func(*testing.T) { + return func(t *testing.T) { + sts := appsv1.StatefulSet{} + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + if err != nil { + t.Fatal(err) + } + + existingContainer := container.GetByName(containerName, sts.Spec.Template.Spec.Containers) + if existingContainer == nil { + t.Fatalf(`No container found with name "%s" in StatefulSet pod template`, containerName) + } + + if !condition(*existingContainer) { + t.Fatalf(`Container "%s" does not satisfy condition`, containerName) + } + } +} + +func StatefulSetConditionIsTrue(ctx context.Context, mdb *mdbv1.MongoDBCommunity, condition func(s appsv1.StatefulSet) bool) func(*testing.T) { + return func(t *testing.T) { + sts := appsv1.StatefulSet{} + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + if err != nil { + t.Fatal(err) + } + + if !condition(sts) { + t.Fatalf(`StatefulSet "%s" does not satisfy condition`, mdb.Name) + } + } +} + +// PodContainerBecomesNotReady waits until the container with 'containerName' in the pod #podNum becomes not ready. +func PodContainerBecomesNotReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, podNum int, containerName string) func(*testing.T) { + return func(t *testing.T) { + pod := podFromMongoDBCommunity(mdb, podNum) + assert.NoError(t, wait.ForPodReadiness(ctx, t, false, containerName, time.Minute*10, pod)) + } +} + +// PodContainerBecomesReady waits until the container with 'containerName' in the pod #podNum becomes ready. +func PodContainerBecomesReady(ctx context.Context, mdb *mdbv1.MongoDBCommunity, podNum int, containerName string) func(*testing.T) { + return func(t *testing.T) { + pod := podFromMongoDBCommunity(mdb, podNum) + assert.NoError(t, wait.ForPodReadiness(ctx, t, true, containerName, time.Minute*3, pod)) + } +} + +func ExecInContainer(ctx context.Context, mdb *mdbv1.MongoDBCommunity, podNum int, containerName, command string) func(*testing.T) { + return func(t *testing.T) { + pod := podFromMongoDBCommunity(mdb, podNum) + _, err := e2eutil.TestClient.Execute(ctx, pod, containerName, command) + assert.NoError(t, err) + } +} + +// StatefulSetMessageIsReceived waits (up to 5 minutes) to get desiredMessageStatus as a mongodb message status or returns a fatal error. +func StatefulSetMessageIsReceived(mdb *mdbv1.MongoDBCommunity, testCtx *e2eutil.TestContext, desiredMessageStatus string) func(t *testing.T) { + return func(t *testing.T) { + err := wait.ForMongoDBMessageStatus(testCtx.Ctx, t, mdb, time.Second*15, time.Minute*5, desiredMessageStatus) + if err != nil { + t.Fatal(err) + } + + } +} + +func podFromMongoDBCommunity(mdb *mdbv1.MongoDBCommunity, podNum int) corev1.Pod { + return corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", mdb.Name, podNum), + Namespace: mdb.Namespace, + }, + } +} + +func assertEqualOwnerReference(t *testing.T, resourceType string, resourceNamespacedName types.NamespacedName, ownerReferences []metav1.OwnerReference, expectedOwnerReference metav1.OwnerReference) { + assert.Len(t, ownerReferences, 1, fmt.Sprintf("%s %s/%s doesn't have OwnerReferences", resourceType, resourceNamespacedName.Name, resourceNamespacedName.Namespace)) + + assert.Equal(t, expectedOwnerReference.APIVersion, ownerReferences[0].APIVersion) + assert.Equal(t, "MongoDBCommunity", ownerReferences[0].Kind) + assert.Equal(t, expectedOwnerReference.Name, ownerReferences[0].Name) + assert.Equal(t, expectedOwnerReference.UID, ownerReferences[0].UID) +} + +func RemoveLastUserFromMongoDBCommunity(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { + return func(t *testing.T) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.Users = db.Spec.Users[:len(db.Spec.Users)-1] + }) + + if err != nil { + t.Fatal(err) + } + } +} + +func EditConnectionStringSecretNameOfLastUser(ctx context.Context, mdb *mdbv1.MongoDBCommunity, newSecretName string) func(*testing.T) { + return func(t *testing.T) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.Users[len(db.Spec.Users)-1].ConnectionStringSecretName = newSecretName + }) + + if err != nil { + t.Fatal(err) + } + } +} + +func ConnectionStringSecretIsCleanedUp(ctx context.Context, mdb *mdbv1.MongoDBCommunity, removedConnectionString string) func(t *testing.T) { + return func(t *testing.T) { + connectionStringSecret := corev1.Secret{} + newErr := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: removedConnectionString, Namespace: mdb.Namespace}, &connectionStringSecret) + + assert.EqualError(t, newErr, fmt.Sprintf("secrets \"%s\" not found", removedConnectionString)) + } +} + +func AuthUsersDeletedIsUpdated(ctx context.Context, mdb *mdbv1.MongoDBCommunity, mdbUser mdbv1.MongoDBUser) func(t *testing.T) { + return func(t *testing.T) { + deletedUser := automationconfig.DeletedUser{User: mdbUser.Name, Dbs: []string{mdbUser.DB}} + + currentAc := getAutomationConfig(ctx, t, mdb) + + assert.Contains(t, currentAc.Auth.UsersDeleted, deletedUser) + } +} + +func AddUserToMongoDBCommunity(ctx context.Context, mdb *mdbv1.MongoDBCommunity, newUser mdbv1.MongoDBUser) func(t *testing.T) { + return func(t *testing.T) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.Users = append(db.Spec.Users, newUser) + }) + if err != nil { + t.Fatal(err) + } + } +} + +func floatPtrTostringPtr(floatPtr *float32) *string { + if floatPtr != nil { + stringValue := fmt.Sprintf("%.1f", *floatPtr) + return &stringValue } + return nil } diff --git a/test/e2e/prometheus/prometheus_test.go b/test/e2e/prometheus/prometheus_test.go new file mode 100644 index 000000000..809b9ca9c --- /dev/null +++ b/test/e2e/prometheus/prometheus_test.go @@ -0,0 +1,68 @@ +package prometheus + +import ( + "context" + "fmt" + "os" + "testing" + + v1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/stretchr/testify/assert" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestPrometheus(t *testing.T) { + ctx := context.Background() + resourceName := "mdb0" + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) + + mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) + mdb.Spec.Prometheus = e2eutil.NewPrometheusConfig(ctx, mdb.Namespace) + + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + + mongodbtests.SkipTestIfLocal(t, "Ensure MongoDB with Prometheus configuration", func(t *testing.T) { + t.Run("Resource has TLS Mode", tester.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb))) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + t.Run("Test Prometheus endpoint is active", tester.PrometheusEndpointIsReachable("prom-user", "prom-password", false)) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3, WithTls(ctx, mdb))) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + + t.Run("Enabling HTTPS on the Prometheus endpoint", func(t *testing.T) { + err = e2eutil.UpdateMongoDBResource(ctx, &mdb, func(mdb *v1.MongoDBCommunity) { + mdb.Spec.Prometheus.TLSSecretRef.Name = "tls-certificate" + }) + assert.NoError(t, err) + + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Test Prometheus HTTPS endpoint is active", tester.PrometheusEndpointIsReachable("prom-user", "prom-password", true)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 2)) + }) + }) +} diff --git a/test/e2e/replica_set/replica_set_test.go b/test/e2e/replica_set/replica_set_test.go index 02f900ac1..4dfa5327f 100644 --- a/test/e2e/replica_set/replica_set_test.go +++ b/test/e2e/replica_set/replica_set_test.go @@ -1,36 +1,109 @@ package replica_set import ( + "context" + "fmt" + "os" "testing" - mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + v1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - f "github.com/operator-framework/operator-sdk/pkg/test" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" ) func TestMain(m *testing.M) { - f.MainEntry(m) + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) } +func intPtr(x int) *int { return &x } +func strPtr(s string) *string { return &s } + func TestReplicaSet(t *testing.T) { - ctx := f.NewTestCtx(t) - defer ctx.Cleanup() + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + scramUser := mdb.GetAuthUsers()[0] + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + lcr := automationconfig.CrdLogRotate{ + // fractional values are supported + SizeThresholdMB: "0.1", + LogRotate: automationconfig.LogRotate{ + TimeThresholdHrs: 1, + NumUncompressed: 10, + NumTotal: 10, + IncludeAuditLogsWithMongoDBLogs: false, + }, + PercentOfDiskspace: "1", + } + + systemLog := automationconfig.SystemLog{ + Destination: automationconfig.File, + Path: "/tmp/mongod.log", + LogAppend: false, + } + + // logRotate can only be configured if systemLog to file has been configured + mdb.Spec.AgentConfiguration.LogRotate = &lcr + mdb.Spec.AgentConfiguration.SystemLog = &systemLog + + // config member options + memberOptions := []automationconfig.MemberOptions{ + { + Votes: intPtr(1), + Tags: map[string]string{"foo1": "bar1"}, + Priority: strPtr("1.5"), + }, + { + Votes: intPtr(1), + Tags: map[string]string{"foo2": "bar2"}, + Priority: strPtr("1.0"), + }, + { + Votes: intPtr(1), + Tags: map[string]string{"foo3": "bar3"}, + Priority: strPtr("2.5"), + }, + } + mdb.Spec.MemberConfig = memberOptions + + settings := map[string]interface{}{ + "electionTimeoutMillis": float64(20), + } + mdb.Spec.AutomationConfigOverride = &v1.AutomationConfigOverride{ + ReplicaSet: v1.OverrideReplicaSet{Settings: v1.MapWrapper{Object: settings}}, + } - // register our types with the testing framework - if err := e2eutil.RegisterTypesWithFramework(&mdbv1.MongoDB{}); err != nil { + tester, err := FromResource(ctx, t, mdb) + if err != nil { t.Fatal(err) } - mdb := e2eutil.NewTestMongoDB() - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Config Map Was Correctly Created", mongodbtests.AutomationConfigConfigMapExists(&mdb)) - t.Run("Stateful Set Reaches Ready State", mongodbtests.StatefulSetIsReady(&mdb)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("Test Basic Connectivity", mongodbtests.BasicConnectivity(&mdb)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, - mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - })) + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("AutomationConfig has the correct logRotateConfig", mongodbtests.AutomationConfigHasLogRotationConfig(ctx, &mdb, &lcr)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) + t.Run("Test Basic Connectivity with generated connection string secret", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Test SRV Connectivity with generated connection string secret", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("AutomationConfig has correct member options", mongodbtests.AutomationConfigHasVoteTagPriorityConfigured(ctx, &mdb, memberOptions)) + t.Run("AutomationConfig has correct settings", mongodbtests.AutomationConfigHasSettings(ctx, &mdb, settings)) } diff --git a/test/e2e/replica_set_arbiter/replica_set_arbiter_test.go b/test/e2e/replica_set_arbiter/replica_set_arbiter_test.go new file mode 100644 index 000000000..0906dd900 --- /dev/null +++ b/test/e2e/replica_set_arbiter/replica_set_arbiter_test.go @@ -0,0 +1,112 @@ +package replica_set + +import ( + "context" + "fmt" + "os" + "testing" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + "github.com/stretchr/testify/assert" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func Test(t *testing.T) { + +} + +func TestReplicaSetArbiter(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + type args struct { + numberOfArbiters int + scaleArbitersTo int + numberOfMembers int + expectedErrorMessage string + resourceName string + } + tests := map[string]args{ + "Number of Arbiters must be less than number of nodes": { + numberOfArbiters: 3, + numberOfMembers: 3, + expectedErrorMessage: fmt.Sprintf("error validating new Spec: number of arbiters specified (%v) is greater or equal than the number of members in the replicaset (%v). At least one member must not be an arbiter", 3, 3), + resourceName: "mdb0", + }, + "Number of Arbiters must be greater than 0": { + numberOfArbiters: -1, + numberOfMembers: 3, + expectedErrorMessage: "error validating new Spec: number of arbiters must be greater or equal than 0", + resourceName: "mdb1", + }, + "Scaling arbiters from 0 to 1": { + numberOfArbiters: 0, + scaleArbitersTo: 1, + numberOfMembers: 2, + resourceName: "mdb2", + }, + "Scaling Arbiters from 1 to 0": { + numberOfArbiters: 1, + scaleArbitersTo: 0, + numberOfMembers: 3, + resourceName: "mdb3", + }, + "Arbiters can be deployed in initial bootstrap": { + numberOfArbiters: 1, + scaleArbitersTo: 1, + numberOfMembers: 2, + resourceName: "mdb4", + }, + } + for testName := range tests { + t.Run(testName, func(t *testing.T) { + testConfig := tests[testName] + mdb, user := e2eutil.NewTestMongoDB(testCtx, testConfig.resourceName, "") + mdb.Spec.Arbiters = testConfig.numberOfArbiters + mdb.Spec.Members = testConfig.numberOfMembers + // FIXME: This behavior has been changed in 6.x timeline and now the arbiter (nor the RS) can't reach the goal state. + mdb.Spec.Version = "4.4.19" + pwd, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + if len(testConfig.expectedErrorMessage) > 0 { + t.Run("Check status", mongodbtests.StatefulSetMessageIsReceived(&mdb, testCtx, testConfig.expectedErrorMessage)) + } else { + t.Run("Check that the stateful set becomes ready", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Check the number of arbiters", mongodbtests.AutomationConfigReplicaSetsHaveExpectedArbiters(ctx, &mdb, testConfig.numberOfArbiters)) + + if testConfig.numberOfArbiters != testConfig.scaleArbitersTo { + t.Run(fmt.Sprintf("Scale Arbiters to %v", testConfig.scaleArbitersTo), mongodbtests.ScaleArbiters(ctx, &mdb, testConfig.scaleArbitersTo)) + t.Run("Arbiters Stateful Set Scaled Correctly", mongodbtests.ArbitersStatefulSetBecomesReady(ctx, &mdb)) + } + + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Test SRV Connectivity with generated connection string secret", func(t *testing.T) { + tester, err := mongotester.FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + scramUser := mdb.GetAuthUsers()[0] + expectedCnxStr := fmt.Sprintf("mongodb+srv://%s-user:%s@%s-svc.%s.svc.cluster.local/admin?replicaSet=%s&ssl=false", mdb.Name, pwd, mdb.Name, mdb.Namespace, mdb.Name) + cnxStrSrv := mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser) + assert.Equal(t, expectedCnxStr, cnxStrSrv) + tester.ConnectivitySucceeds(mongotester.WithURI(cnxStrSrv)) + }) + } + t.Run("Delete MongoDB Resource", mongodbtests.DeleteMongoDBResource(&mdb, testCtx)) + }) + } +} diff --git a/test/e2e/replica_set_authentication/replica_set_authentication_test.go b/test/e2e/replica_set_authentication/replica_set_authentication_test.go new file mode 100644 index 000000000..38dbcd962 --- /dev/null +++ b/test/e2e/replica_set_authentication/replica_set_authentication_test.go @@ -0,0 +1,128 @@ +package replica_set_authentication + +import ( + "context" + "fmt" + "os" + "testing" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + "go.mongodb.org/mongo-driver/bson/primitive" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetAuthentication(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + pw, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + + // Run all the possible configuration using sha256 or sha1 + t.Run("Auth test with SHA-256", testConfigAuthentication(ctx, mdb, user, pw)) + t.Run("Auth test with SHA-256 and SHA-1", testConfigAuthentication(ctx, mdb, user, pw, withSha1())) + t.Run("Auth test with SHA-256 (using label)", testConfigAuthentication(ctx, mdb, user, pw, withLabeledSha256())) + t.Run("Auth test with SHA-256 (using label) and SHA-1", testConfigAuthentication(ctx, mdb, user, pw, withSha1(), withLabeledSha256())) + t.Run("Auth test with SHA-1", testConfigAuthentication(ctx, mdb, user, pw, withSha1(), withoutSha256())) +} + +type authOptions struct { + sha256, sha1, useLabelForSha256 bool +} + +func withoutSha256() func(*authOptions) { + return func(opts *authOptions) { + opts.sha256 = false + } +} +func withLabeledSha256() func(*authOptions) { + return func(opts *authOptions) { + opts.sha256 = true + opts.useLabelForSha256 = true + } +} +func withSha1() func(*authOptions) { + return func(opts *authOptions) { + opts.sha1 = true + } +} + +// testConfigAuthentication run the tests using the auth options to update mdb and then checks that the resources are correctly configured +func testConfigAuthentication(ctx context.Context, mdb mdbv1.MongoDBCommunity, user mdbv1.MongoDBUser, pw string, allOptions ...func(*authOptions)) func(t *testing.T) { + return func(t *testing.T) { + + pickedOpts := authOptions{ + sha256: true, + } + for _, opt := range allOptions { + opt(&pickedOpts) + } + t.Logf("Config: use Sha256: %t (use label: %t), use Sha1: %t", pickedOpts.sha256, pickedOpts.useLabelForSha256, pickedOpts.sha1) + + enabledMechanisms := primitive.A{"SCRAM-SHA-256"} + var acceptedModes []mdbv1.AuthMode + if pickedOpts.sha256 { + if pickedOpts.useLabelForSha256 { + acceptedModes = append(acceptedModes, "SCRAM") + } else { + acceptedModes = append(acceptedModes, "SCRAM-SHA-256") + } + } + if pickedOpts.sha1 { + acceptedModes = append(acceptedModes, "SCRAM-SHA-1") + if pickedOpts.sha256 { + enabledMechanisms = primitive.A{"SCRAM-SHA-256", "SCRAM-SHA-1"} + } else { + enabledMechanisms = primitive.A{"SCRAM-SHA-1"} + } + } + + err := e2eutil.UpdateMongoDBResource(ctx, &mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.Security.Authentication.Modes = acceptedModes + }) + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + if pickedOpts.sha256 { + t.Run("Test Basic Connectivity with accepted auth", tester.ConnectivitySucceeds(WithScramWithAuth(user.Name, pw, "SCRAM-SHA-256"))) + } else { + t.Run("Test Basic Connectivity with unaccepted auth", tester.ConnectivityFails(WithScramWithAuth(user.Name, pw, "SCRAM-SHA-256"))) + } + if pickedOpts.sha1 { + t.Run("Test Basic Connectivity with accepted auth", tester.ConnectivitySucceeds(WithScramWithAuth(user.Name, pw, "SCRAM-SHA-1"))) + } else { + t.Run("Test Basic Connectivity with unaccepted auth", tester.ConnectivityFails(WithScramWithAuth(user.Name, pw, "SCRAM-SHA-1"))) + } + + if pickedOpts.sha256 { + t.Run("Ensure Authentication", tester.EnsureAuthenticationWithAuthIsConfigured(3, enabledMechanisms, WithScramWithAuth(user.Name, pw, "SCRAM-SHA-256"))) + } + if pickedOpts.sha1 { + t.Run("Ensure Authentication", tester.EnsureAuthenticationWithAuthIsConfigured(3, enabledMechanisms, WithScramWithAuth(user.Name, pw, "SCRAM-SHA-1"))) + } + } +} diff --git a/test/e2e/replica_set_change_version/replica_set_change_version_test.go b/test/e2e/replica_set_change_version/replica_set_change_version_test.go new file mode 100644 index 000000000..4d022f9d7 --- /dev/null +++ b/test/e2e/replica_set_change_version/replica_set_change_version_test.go @@ -0,0 +1,75 @@ +package replica_set + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + + appsv1 "k8s.io/api/apps/v1" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetUpgradeVersion(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + const initialMDBVersion = "4.4.18" + const upgradedMDBVersion = "5.0.12" + const upgradedWithIncreasedPatchMDBVersion = "5.0.15" + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + mdb.Spec.Version = initialMDBVersion + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + + // Upgrade minor version to upgradedMDBVersion + t.Run("MongoDB is reachable while minor version is upgraded", func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() + t.Run("Test Minor Version can be upgraded", mongodbtests.ChangeVersion(ctx, &mdb, upgradedMDBVersion)) + t.Run("StatefulSet has OnDelete update strategy", mongodbtests.StatefulSetHasUpdateStrategy(ctx, &mdb, appsv1.OnDeleteStatefulSetStrategyType)) + t.Run("Stateful Set Reaches Ready State, after Upgrading", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 2)) + }) + + t.Run("StatefulSet has RollingUpgrade restart strategy", mongodbtests.StatefulSetHasUpdateStrategy(ctx, &mdb, appsv1.RollingUpdateStatefulSetStrategyType)) + + // Upgrade patch version to upgradedWithIncreasedPatchMDBVersion + t.Run("MongoDB is reachable while patch version is upgraded", func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() + t.Run("Test Patch Version can be upgraded", mongodbtests.ChangeVersion(ctx, &mdb, upgradedWithIncreasedPatchMDBVersion)) + t.Run("StatefulSet has OnDelete restart strategy", mongodbtests.StatefulSetHasUpdateStrategy(ctx, &mdb, appsv1.OnDeleteStatefulSetStrategyType)) + t.Run("Stateful Set Reaches Ready State, after upgrading", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 3)) + }) + t.Run("StatefulSet has RollingUpgrade restart strategy", mongodbtests.StatefulSetHasUpdateStrategy(ctx, &mdb, appsv1.RollingUpdateStatefulSetStrategyType)) +} diff --git a/test/e2e/replica_set_connection_string_options/replica_set_connection_string_options_test.go b/test/e2e/replica_set_connection_string_options/replica_set_connection_string_options_test.go new file mode 100644 index 000000000..6358f9d3a --- /dev/null +++ b/test/e2e/replica_set_connection_string_options/replica_set_connection_string_options_test.go @@ -0,0 +1,111 @@ +package replica_set_connection_string_options + +import ( + "context" + "fmt" + "os" + "testing" + + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetWithConnectionString(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + scramUser := mdb.GetAuthUsers()[0] + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + + /** + User options only. + */ + t.Run("Connection String With User Options Only", func(t *testing.T) { + t.Run("Test Add New Connection String Option to User", mongodbtests.AddConnectionStringOptionToUser(ctx, &mdb, "readPreference", "primary")) + t.Run("Test Secrets Are Updated", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + scramUser = mdb.GetAuthUsers()[0] + t.Run("Test Basic Connectivity With User Options", tester.ConnectivitySucceeds()) + t.Run("Test SRV Connectivity With User Options", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) + t.Run("Test Basic Connectivity with generated connection string secret with user options", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Test SRV Connectivity with generated connection string secret with user options", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) + }) + + /** + General options only. + */ + t.Run("Connection String With General Options Only", func(t *testing.T) { + t.Run("Resetting Connection String Options", mongodbtests.ResetConnectionStringOptions(ctx, &mdb)) + t.Run("Test Add New Connection String Option to Resource", mongodbtests.AddConnectionStringOption(ctx, &mdb, "readPreference", "primary")) + t.Run("Test Secrets Are Updated", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + scramUser = mdb.GetAuthUsers()[0] + t.Run("Test Basic Connectivity With Resource Options", tester.ConnectivitySucceeds()) + t.Run("Test SRV Connectivity With Resource Options", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) + t.Run("Test Basic Connectivity with generated connection string secret with resource options", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Test SRV Connectivity with generated connection string secret with resource options", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) + }) + + /** + Overwritten options. + */ + t.Run("Connection String With Overwritten Options", func(t *testing.T) { + t.Run("Test Add New Connection String Option to Resource", mongodbtests.AddConnectionStringOption(ctx, &mdb, "readPreference", "primary")) + t.Run("Test Add New Connection String Option to User", mongodbtests.AddConnectionStringOptionToUser(ctx, &mdb, "readPreference", "secondary")) + t.Run("Test Secrets Are Updated", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + scramUser = mdb.GetAuthUsers()[0] + t.Run("Test Basic Connectivity With Overwritten Options", tester.ConnectivitySucceeds()) + t.Run("Test SRV Connectivity With Overwritten Options", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) + t.Run("Test Basic Connectivity with generated connection string secret with overwritten options", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Test SRV Connectivity with generated connection string secret with overwritten options", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) + }) + + /** + Wrong options. + */ + t.Run("Connection String With Wrong Options", func(t *testing.T) { + t.Run("Resetting Connection String Options", mongodbtests.ResetConnectionStringOptions(ctx, &mdb)) + t.Run("Test Add New Connection String Option to Resource", mongodbtests.AddConnectionStringOption(ctx, &mdb, "readPreference", "wrong")) + t.Run("Test Secrets Are Updated", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + scramUser = mdb.GetAuthUsers()[0] + t.Run("Test Basic Connectivity", tester.ConnectivityRejected(ctx, WithURI(mdb.MongoURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) + t.Run("Test SRV Connectivity", tester.ConnectivityRejected(ctx, WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) + t.Run("Test Basic Connectivity with generated connection string secret", + tester.ConnectivityRejected(ctx, WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Test SRV Connectivity with generated connection string secret", + tester.ConnectivityRejected(ctx, WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) + }) + +} diff --git a/test/e2e/replica_set_cross_namespace_deploy/replica_set_cross_namespace_deploy_test.go b/test/e2e/replica_set_cross_namespace_deploy/replica_set_cross_namespace_deploy_test.go new file mode 100644 index 000000000..2bd41ed86 --- /dev/null +++ b/test/e2e/replica_set_cross_namespace_deploy/replica_set_cross_namespace_deploy_test.go @@ -0,0 +1,114 @@ +package replica_set_cross_namespace_deploy + +import ( + "context" + "fmt" + "os" + "testing" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/generate" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestCrossNamespaceDeploy(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + postfix, err := generate.RandomValidDNS1123Label(5) + if err != nil { + t.Fatal(err) + } + namespace := fmt.Sprintf("clusterwide-test-%s", postfix) + + err = e2eutil.EnsureNamespace(testCtx, namespace) + if err != nil { + t.Fatal(err) + } + + if err := createDatabaseServiceAccountRoleAndRoleBinding(ctx, t, namespace); err != nil { + t.Fatal(err) + } + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", namespace) + + _, err = setup.GeneratePasswordForUser(testCtx, user, namespace) + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) +} + +// createDatabaseServiceAccountRoleAndRoleBinding creates the ServiceAccount, Role and RoleBinding required +// for the database StatefulSet in the other namespace. +func createDatabaseServiceAccountRoleAndRoleBinding(ctx context.Context, t *testing.T, namespace string) error { + sa := corev1.ServiceAccount{} + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: "mongodb-database", Namespace: e2eutil.OperatorNamespace}, &sa) + if err != nil { + t.Fatal(err) + } + + sa.Namespace = namespace + sa.ObjectMeta.ResourceVersion = "" + + err = e2eutil.TestClient.Create(ctx, &sa, &e2eutil.CleanupOptions{}) + if err != nil { + t.Fatal(err) + } + + role := rbacv1.Role{} + err = e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: "mongodb-database", Namespace: e2eutil.OperatorNamespace}, &role) + if err != nil { + t.Fatal(err) + } + + role.Namespace = namespace + role.ObjectMeta.ResourceVersion = "" + + err = e2eutil.TestClient.Create(ctx, &role, &e2eutil.CleanupOptions{}) + if err != nil { + t.Fatal(err) + } + + rolebinding := rbacv1.RoleBinding{} + err = e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: "mongodb-database", Namespace: e2eutil.OperatorNamespace}, &rolebinding) + if err != nil { + t.Fatal(err) + } + + rolebinding.Namespace = namespace + rolebinding.ObjectMeta.ResourceVersion = "" + + err = e2eutil.TestClient.Create(ctx, &rolebinding, &e2eutil.CleanupOptions{}) + if err != nil { + t.Fatal(err) + } + return nil +} diff --git a/test/e2e/replica_set_custom_annotations_test/replica_set_custom_annotations_test.go b/test/e2e/replica_set_custom_annotations_test/replica_set_custom_annotations_test.go new file mode 100644 index 000000000..d92d5db1b --- /dev/null +++ b/test/e2e/replica_set_custom_annotations_test/replica_set_custom_annotations_test.go @@ -0,0 +1,79 @@ +package replica_set_custom_annotations_test + +import ( + "context" + "fmt" + v1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "os" + "testing" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetCustomAnnotations(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.Template.ObjectMeta = metav1.ObjectMeta{ + Labels: e2eutil.TestLabels(), + Annotations: e2eutil.TestAnnotations(), + } + mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "data-volume", + Labels: e2eutil.TestLabels(), + Annotations: e2eutil.TestAnnotations(), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "logs-volume", + Labels: e2eutil.TestLabels(), + Annotations: e2eutil.TestAnnotations(), + }, + }, + } + mdb.Spec.StatefulSetConfiguration.MetadataWrapper = v1.StatefulSetMetadataWrapper{ + Labels: e2eutil.TestLabels(), + Annotations: e2eutil.TestAnnotations(), + } + scramUser := mdb.GetAuthUsers()[0] + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet((mdb.Name)))) + t.Run("Test Basic Connectivity with generated connection string secret", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Test SRV Connectivity with generated connection string secret", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("Cluster has the expected labels and annotations", mongodbtests.HasExpectedMetadata(ctx, &mdb, e2eutil.TestLabels(), e2eutil.TestAnnotations())) +} diff --git a/test/e2e/replica_set_custom_persistent_volume/replica_set_custom_persistent_volume_test.go b/test/e2e/replica_set_custom_persistent_volume/replica_set_custom_persistent_volume_test.go new file mode 100644 index 000000000..db16c5ebe --- /dev/null +++ b/test/e2e/replica_set_custom_persistent_volume/replica_set_custom_persistent_volume_test.go @@ -0,0 +1,146 @@ +package replica_set_custom_persistent_volume + +import ( + "context" + "fmt" + "os" + "testing" + + v1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + "github.com/stretchr/testify/assert" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + corev1 "k8s.io/api/core/v1" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +// getPersistentVolumeLocal returns a persistentVolume of type localPath and a "type" label. +func getPersistentVolumeLocal(name string, localPath string, label string) corev1.PersistentVolume { + return corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: e2eutil.OperatorNamespace, + Labels: map[string]string{"type": label}, + }, + Spec: corev1.PersistentVolumeSpec{ + PersistentVolumeSource: corev1.PersistentVolumeSource{ + Local: &corev1.LocalVolumeSource{ + Path: localPath, + }, + }, + AccessModes: []corev1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + StorageClassName: "default", + Capacity: corev1.ResourceList{corev1.ResourceStorage: *resource.NewScaledQuantity(int64(8), resource.Giga)}, + NodeAffinity: &corev1.VolumeNodeAffinity{ + Required: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/hostname", + Operator: "In", + Values: []string{"kind-control-plane"}, + }, + }, + }, + }, + }, + }, + }, + } +} + +// getVolumes returns two persistentVolumes for each of the `members` pod. +// one volume will be for the `data` claim and the other will be for the `logs` claim +func getVolumes(ctx *e2eutil.TestContext, volumeType string, members int) []corev1.PersistentVolume { + volumes := make([]corev1.PersistentVolume, members) + for i := 0; i < members; i++ { + volumes[i] = getPersistentVolumeLocal( + fmt.Sprintf("%s-volume-%d", volumeType, i), + fmt.Sprintf("/opt/data/mongo-%s-%d", volumeType, i), + volumeType, + ) + } + return volumes +} + +func getPvc(pvcType string, mdb v1.MongoDBCommunity) corev1.PersistentVolumeClaim { + name := "" + if pvcType == "logs" { + name = mdb.LogsVolumeName() + } else { + name = mdb.DataVolumeName() + } + defaultStorageClass := "default" + return corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"type": pvcType}, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{"storage": *resource.NewScaledQuantity(int64(8), resource.Giga)}, + }, + StorageClassName: &defaultStorageClass, + }, + } +} + +func TestReplicaSetCustomPersistentVolumes(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{ + getPvc("data", mdb), + getPvc("logs", mdb), + } + volumesToCreate := getVolumes(testCtx, "data", mdb.Spec.Members) + volumesToCreate = append(volumesToCreate, getVolumes(testCtx, "logs", mdb.Spec.Members)...) + + for i := range volumesToCreate { + err := e2eutil.TestClient.Create(ctx, &volumesToCreate[i], &e2eutil.CleanupOptions{TestContext: testCtx}) + assert.NoError(t, err) + } + scramUser := mdb.GetAuthUsers()[0] + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet((mdb.Name)))) + t.Run("Test Basic Connectivity with generated connection string secret", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Test SRV Connectivity with generated connection string secret", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("Cluster has the expected persistent volumes", mongodbtests.HasExpectedPersistentVolumes(ctx, volumesToCreate)) +} diff --git a/test/e2e/replica_set_custom_role/replica_set_custom_role_test.go b/test/e2e/replica_set_custom_role/replica_set_custom_role_test.go new file mode 100644 index 000000000..54075a71d --- /dev/null +++ b/test/e2e/replica_set_custom_role/replica_set_custom_role_test.go @@ -0,0 +1,111 @@ +package replica_set_custom_role + +import ( + "context" + "fmt" + "os" + "testing" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetCustomRole(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + someDB := "test" + someCollection := "foo" + anyDB := "" + anyCollection := "" + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + mdb.Spec.Security.Roles = []mdbv1.CustomRole{ + { + Role: "testRole", + DB: "admin", + Privileges: []mdbv1.Privilege{ + { + Resource: mdbv1.Resource{DB: &anyDB, Collection: &someCollection}, + Actions: []string{"collStats", "find"}, + }, + { + Resource: mdbv1.Resource{DB: &someDB, Collection: &anyCollection}, + Actions: []string{"dbStats"}, + }, + { + Resource: mdbv1.Resource{DB: &someDB, Collection: &someCollection}, + Actions: []string{"collStats", "createCollection", "dbStats", "find"}, + }, + }, + Roles: []mdbv1.Role{}, + }, + { + Role: "testClusterRole", + DB: "admin", + Privileges: []mdbv1.Privilege{{ + Resource: mdbv1.Resource{Cluster: true}, + Actions: []string{"dbStats", "find"}, + }}, + Roles: []mdbv1.Role{}, + }, + { + Role: "testAnyResourceRole", + DB: "admin", + Privileges: []mdbv1.Privilege{{ + Resource: mdbv1.Resource{AnyResource: true}, + Actions: []string{"anyAction"}, + }}, + Roles: []mdbv1.Role{}, + }, + { + Role: "MongodbAutomationAgentUserRole", + DB: "admin", + Privileges: []mdbv1.Privilege{ + { + Resource: mdbv1.Resource{ + AnyResource: true, + }, + Actions: []string{"bypassDefaultMaxTimeMS"}, + }, + }, + Roles: []mdbv1.Role{}, + }, + } + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + + // Verify automation config roles and roles created in admin database. + roles := mdbv1.ConvertCustomRolesToAutomationConfigCustomRole(mdb.Spec.Security.Roles) + t.Run("AutomationConfig has the correct custom role", mongodbtests.AutomationConfigHasTheExpectedCustomRoles(ctx, &mdb, roles)) + t.Run("Custom Role was created ", tester.VerifyRoles(roles, 1)) + +} diff --git a/test/e2e/replica_set_enterprise_upgrade/replica_set_enterprise_upgrade.go b/test/e2e/replica_set_enterprise_upgrade/replica_set_enterprise_upgrade.go new file mode 100644 index 000000000..ff6930252 --- /dev/null +++ b/test/e2e/replica_set_enterprise_upgrade/replica_set_enterprise_upgrade.go @@ -0,0 +1,49 @@ +package replica_set_enterprise_upgrade + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" +) + +func DeployEnterpriseAndUpgradeTest(ctx context.Context, t *testing.T, versionsToBeTested []string) { + t.Setenv(construct.MongodbRepoUrlEnv, "docker.io/mongodb") + t.Setenv(construct.MongodbImageEnv, "mongodb-enterprise-server") + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + mdb.Spec.Version = versionsToBeTested[0] + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := mongotester.FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + + for i := 1; i < len(versionsToBeTested); i++ { + t.Run(fmt.Sprintf("Testing upgrade from %s to %s", versionsToBeTested[i-1], versionsToBeTested[i]), func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() + t.Run(fmt.Sprintf("Upgrading to %s", versionsToBeTested[i]), mongodbtests.ChangeVersion(ctx, &mdb, versionsToBeTested[i])) + t.Run("Stateful Set Reaches Ready State, after Upgrading", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, i+1)) + }) + } +} diff --git a/test/e2e/replica_set_enterprise_upgrade_4_5/replica_set_enterprise_upgrade_4_5_test.go b/test/e2e/replica_set_enterprise_upgrade_4_5/replica_set_enterprise_upgrade_4_5_test.go new file mode 100644 index 000000000..298829059 --- /dev/null +++ b/test/e2e/replica_set_enterprise_upgrade_4_5/replica_set_enterprise_upgrade_4_5_test.go @@ -0,0 +1,29 @@ +package replica_set + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" +) + +var ( + versionsForUpgrades = []string{"4.4.19", "5.0.15"} +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSet(t *testing.T) { + ctx := context.Background() + replica_set_enterprise_upgrade.DeployEnterpriseAndUpgradeTest(ctx, t, versionsForUpgrades) +} diff --git a/test/e2e/replica_set_enterprise_upgrade_5_6/replica_set_enterprise_upgrade_5_6_test.go b/test/e2e/replica_set_enterprise_upgrade_5_6/replica_set_enterprise_upgrade_5_6_test.go new file mode 100644 index 000000000..0e0eedef5 --- /dev/null +++ b/test/e2e/replica_set_enterprise_upgrade_5_6/replica_set_enterprise_upgrade_5_6_test.go @@ -0,0 +1,28 @@ +package replica_set + +import ( + "context" + "fmt" + "os" + "testing" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade" +) + +var ( + versionsForUpgrades = []string{"5.0.15", "6.0.5"} +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSet(t *testing.T) { + ctx := context.Background() + replica_set_enterprise_upgrade.DeployEnterpriseAndUpgradeTest(ctx, t, versionsForUpgrades) +} diff --git a/test/e2e/replica_set_enterprise_upgrade_6_7/replica_set_enterprise_upgrade_5_6_test.go b/test/e2e/replica_set_enterprise_upgrade_6_7/replica_set_enterprise_upgrade_5_6_test.go new file mode 100644 index 000000000..c447ca6c6 --- /dev/null +++ b/test/e2e/replica_set_enterprise_upgrade_6_7/replica_set_enterprise_upgrade_5_6_test.go @@ -0,0 +1,28 @@ +package replica_set + +import ( + "context" + "fmt" + "os" + "testing" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade" +) + +var ( + versionsForUpgrades = []string{"6.0.5", "7.0.2"} +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSet(t *testing.T) { + ctx := context.Background() + replica_set_enterprise_upgrade.DeployEnterpriseAndUpgradeTest(ctx, t, versionsForUpgrades) +} diff --git a/test/e2e/replica_set_enterprise_upgrade_7_8/replica_set_enterprise_upgrade_5_6_test.go b/test/e2e/replica_set_enterprise_upgrade_7_8/replica_set_enterprise_upgrade_5_6_test.go new file mode 100644 index 000000000..00cdf8f10 --- /dev/null +++ b/test/e2e/replica_set_enterprise_upgrade_7_8/replica_set_enterprise_upgrade_5_6_test.go @@ -0,0 +1,28 @@ +package replica_set + +import ( + "context" + "fmt" + "os" + "testing" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/replica_set_enterprise_upgrade" +) + +var ( + versionsForUpgrades = []string{"7.0.12", "8.0.0"} +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSet(t *testing.T) { + ctx := context.Background() + replica_set_enterprise_upgrade.DeployEnterpriseAndUpgradeTest(ctx, t, versionsForUpgrades) +} diff --git a/test/e2e/replica_set_mongod_config/replica_set_mongod_config_test.go b/test/e2e/replica_set_mongod_config/replica_set_mongod_config_test.go new file mode 100644 index 000000000..1a009c812 --- /dev/null +++ b/test/e2e/replica_set_mongod_config/replica_set_mongod_config_test.go @@ -0,0 +1,73 @@ +package replica_set_mongod_config + +import ( + "context" + "fmt" + "os" + "testing" + + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/stretchr/objx" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSet(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + settings := []string{ + "storage.wiredTiger.engineConfig.journalCompressor", + "storage.dbPath", + } + + values := []string{ + "zlib", + "/some/path/db", + } + + // Override the journal compressor and dbPath settings + mongodConfig := objx.New(map[string]interface{}{}) + for i := range settings { + mongodConfig.Set(settings[i], values[i]) + } + + // Override the net.port setting + mongodConfig.Set("net.port", 40333.) + + mdb.Spec.AdditionalMongodConfig.Object = mongodConfig + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + for i := range settings { + t.Run(fmt.Sprintf("Mongod setting %s has been set", settings[i]), tester.EnsureMongodConfig(settings[i], values[i])) + } + t.Run("Mongod setting net.port has been set", tester.EnsureMongodConfig("net.port", int32(40333))) + t.Run("Service has the correct port", mongodbtests.ServiceUsesCorrectPort(ctx, &mdb, 40333)) +} diff --git a/test/e2e/replica_set_mongod_port_change_with_arbiters/replica_set_mongod_port_change_with_arbiters_test.go b/test/e2e/replica_set_mongod_port_change_with_arbiters/replica_set_mongod_port_change_with_arbiters_test.go new file mode 100644 index 000000000..f398e36fc --- /dev/null +++ b/test/e2e/replica_set_mongod_port_change_with_arbiters/replica_set_mongod_port_change_with_arbiters_test.go @@ -0,0 +1,83 @@ +package replica_set_mongod_config + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetMongodPortChangeWithArbiters(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + // FIXME: This behavior has been changed in 6.x timeline and now the arbiter (nor the RS) can't reach the goal state. + mdb.Spec.Version = "4.4.19" + scramUser := mdb.GetAuthUsers()[0] + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + connectivityTests := func(t *testing.T) { + fmt.Printf("connectionStringForUser: %s\n", mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)) + t.Run("Test Basic Connectivity with generated connection string secret", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) + + // FIXME after port change in the service mongodb+srv connection stopped working! + //t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) + //t.Run("Test SRV Connectivity with generated connection string secret", + // tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(mdb, scramUser)))) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("Mongod setting net.port has been set", tester.EnsureMongodConfig("net.port", int32(automationconfig.DefaultDBPort))) + t.Run("Service has the correct port", mongodbtests.ServiceUsesCorrectPort(ctx, &mdb, int32(automationconfig.DefaultDBPort))) + t.Run("Stateful Set becomes ready", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Wait for MongoDB to finish setup cluster", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Connectivity tests", connectivityTests) + + t.Run("Scale to 1 Arbiter", mongodbtests.ScaleArbiters(ctx, &mdb, 1)) + t.Run("Wait for MongoDB to start scaling arbiters", mongodbtests.MongoDBReachesPendingPhase(ctx, &mdb)) + t.Run("Wait for MongoDB to finish scaling arbiters", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Automation config has expecter arbiter", mongodbtests.AutomationConfigReplicaSetsHaveExpectedArbiters(ctx, &mdb, 1)) + t.Run("Stateful Set becomes ready", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Arbiters Stateful Set becomes ready", mongodbtests.ArbitersStatefulSetBecomesReady(ctx, &mdb)) + t.Run("Connectivity tests", connectivityTests) + + t.Run("Change port of running cluster", mongodbtests.ChangePort(ctx, &mdb, 40333)) + t.Run("Wait for MongoDB to start changing port", mongodbtests.MongoDBReachesPendingPhase(ctx, &mdb)) + t.Run("Wait for MongoDB to finish changing port", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Stateful Set becomes ready", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Arbiters Stateful Set becomes ready", mongodbtests.ArbitersStatefulSetBecomesReady(ctx, &mdb)) + t.Run("Mongod setting net.port has been set", tester.EnsureMongodConfig("net.port", int32(40333))) + t.Run("Service has the correct port", mongodbtests.ServiceUsesCorrectPort(ctx, &mdb, int32(40333))) + t.Run("Connectivity tests", connectivityTests) +} diff --git a/test/e2e/replica_set_mongod_readiness/replica_set_mongod_readiness_test.go b/test/e2e/replica_set_mongod_readiness/replica_set_mongod_readiness_test.go new file mode 100644 index 000000000..d82837fb9 --- /dev/null +++ b/test/e2e/replica_set_mongod_readiness/replica_set_mongod_readiness_test.go @@ -0,0 +1,52 @@ +package replica_set_mongod_readiness + +import ( + "context" + "fmt" + "os" + "testing" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSet(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Ensure Agent container is marked as non-ready", func(t *testing.T) { + t.Run("Break mongod data files", mongodbtests.ExecInContainer(ctx, &mdb, 0, "mongod", "mkdir /data/tmp; mv /data/WiredTiger.wt /data/tmp")) + // Just moving the file doesn't fail the mongod until any data is written - the easiest way is to kill the mongod + // and in this case it won't restart + t.Run("Kill mongod process", mongodbtests.ExecInContainer(ctx, &mdb, 0, "mongod", "kill 1")) + // CLOUDP-89260: mongod uptime 1 minute and readiness probe failureThreshold 40 (40 * 5 -> 200 seconds) + // note, that this may take much longer on evergreen than locally + t.Run("Pod agent container becomes not-ready", mongodbtests.PodContainerBecomesNotReady(ctx, &mdb, 0, "mongodb-agent")) + }) + t.Run("Ensure Agent container gets fixed", func(t *testing.T) { + // Note, that we call this command on the 'mongodb-agent' container as the 'mongod' container is down and we cannot + // execute shell there. But both containers share the same /data directory so we can do it from any of them. + t.Run("Fix mongod data files", mongodbtests.ExecInContainer(ctx, &mdb, 0, "mongodb-agent", "mv /data/tmp/WiredTiger.wt /data/")) + // Eventually the agent will start mongod again + t.Run("Pod agent container becomes ready", mongodbtests.PodContainerBecomesReady(ctx, &mdb, 0, "mongodb-agent")) + }) +} diff --git a/test/e2e/replica_set_mount_connection_string/replica_set_mount_connection_string_test.go b/test/e2e/replica_set_mount_connection_string/replica_set_mount_connection_string_test.go new file mode 100644 index 000000000..b4a03cbec --- /dev/null +++ b/test/e2e/replica_set_mount_connection_string/replica_set_mount_connection_string_test.go @@ -0,0 +1,112 @@ +package replica_set_mount_connection_string + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/wait" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +// createPythonTestPod creates a pod with a simple python app which connects to a MongoDB database +// using the connection string referenced within a given secret key. +func createPythonTestPod(idx int, namespace, secretName, secretKey string) corev1.Pod { + return corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("test-pod-%d", idx), + Namespace: namespace, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: "python-app", + Image: "quay.io/mongodb/mongodb-kubernetes-operator-test-app:1.0.0", + Command: []string{"python", "main.py"}, + WorkingDir: "/app", + Env: []corev1.EnvVar{ + { + Name: "CONNECTION_STRING", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: secretName, + }, + Key: secretKey, + }, + }, + }, + }, + }, + }, + }, + } +} + +func TestMountConnectionString(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + scramUser := mdb.GetAuthUsers()[0] + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet((mdb.Name)))) + t.Run("Test Basic Connectivity with generated connection string secret", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Test SRV Connectivity with generated connection string secret", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + + t.Run("Application Pod can connect to MongoDB using the generated standard connection string.", func(t *testing.T) { + testPod := createPythonTestPod(0, mdb.Namespace, fmt.Sprintf("%s-admin-%s", mdb.Name, user.Name), "connectionString.standard") + err := e2eutil.TestClient.Create(ctx, &testPod, &e2eutil.CleanupOptions{ + TestContext: testCtx, + }) + assert.NoError(t, err) + assert.NoError(t, wait.ForPodPhase(ctx, t, time.Minute*5, testPod, corev1.PodSucceeded)) + }) + + t.Run("Application Pod can connect to MongoDB using the generated secret SRV connection string", func(t *testing.T) { + testPod := createPythonTestPod(1, mdb.Namespace, fmt.Sprintf("%s-admin-%s", mdb.Name, user.Name), "connectionString.standardSrv") + err := e2eutil.TestClient.Create(ctx, &testPod, &e2eutil.CleanupOptions{ + TestContext: testCtx, + }) + assert.NoError(t, err) + assert.NoError(t, wait.ForPodPhase(ctx, t, time.Minute*5, testPod, corev1.PodSucceeded)) + }) +} diff --git a/test/e2e/replica_set_multiple/replica_set_multiple_test.go b/test/e2e/replica_set_multiple/replica_set_multiple_test.go new file mode 100644 index 000000000..a38786eb0 --- /dev/null +++ b/test/e2e/replica_set_multiple/replica_set_multiple_test.go @@ -0,0 +1,101 @@ +package replica_set_multiple + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +// TestReplicaSetMultiple creates two MongoDB resources that are handled by the Operator at the +// same time. One of them is scaled to 5 and then back to 3 +func TestReplicaSetMultiple(t *testing.T) { + ctx := context.Background() + + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb0, user0 := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + mdb1, user1 := e2eutil.NewTestMongoDB(testCtx, "mdb1", "") + + _, err := setup.GeneratePasswordForUser(testCtx, user0, "") + if err != nil { + t.Fatal(err) + } + + _, err = setup.GeneratePasswordForUser(testCtx, user1, "") + if err != nil { + t.Fatal(err) + } + + tester0, err := mongotester.FromResource(ctx, t, mdb0) + if err != nil { + t.Fatal(err) + } + tester1, err := mongotester.FromResource(ctx, t, mdb1) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource mdb0", mongodbtests.CreateMongoDBResource(&mdb0, testCtx)) + t.Run("Create MongoDB Resource mdb1", mongodbtests.CreateMongoDBResource(&mdb1, testCtx)) + + t.Run("mdb0: Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb0)) + t.Run("mdb1: Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb1)) + + t.Run("mdb0: Test Basic Connectivity", tester0.ConnectivitySucceeds()) + t.Run("mdb1: Test Basic Connectivity", tester1.ConnectivitySucceeds()) + + t.Run("mdb0: AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb0, 1)) + t.Run("mdb1: AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb1, 1)) + + t.Run("mdb0: Ensure Authentication", tester0.EnsureAuthenticationIsConfigured(3)) + t.Run("mdb1: Ensure Authentication", tester1.EnsureAuthenticationIsConfigured(3)) + + t.Run("MongoDB is reachable while being scaled up", func(t *testing.T) { + defer tester0.StartBackgroundConnectivityTest(t, time.Second*10)() + t.Run("Scale MongoDB Resource Up", mongodbtests.Scale(ctx, &mdb0, 5)) + t.Run("Stateful Set Scaled Up Correctly", mongodbtests.StatefulSetBecomesReady(ctx, &mdb0)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb0)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb0, 3)) + t.Run("Test Status Was Updated", mongodbtests.Status(ctx, &mdb0, mdbv1.MongoDBCommunityStatus{ + MongoURI: mdb0.MongoURI(""), + Phase: mdbv1.Running, + CurrentMongoDBMembers: 5, + CurrentStatefulSetReplicas: 5, + })) + + // TODO: Currently the scale down process takes too long to reasonably include this in the test + //t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb0, 3)) + //t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb0)) + //t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb0)) + //t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb0, 3)) + //t.Run("Test Status Was Updated", mongodbtests.Status(&mdb0, + // mdbv1.MongoDBStatus{ + // MongoURI: mdb0.MongoURI(""), + // Phase: mdbv1.Running, + // CurrentMongoDBMembers: 5, + // CurrentStatefulSetReplicas: 5, + // })) + + }) + + // One last check that mdb1 was not altered. + t.Run("mdb1: Test Basic Connectivity", tester1.ConnectivitySucceeds()) +} diff --git a/test/e2e/replica_set_operator_upgrade/replica_set_operator_upgrade_test.go b/test/e2e/replica_set_operator_upgrade/replica_set_operator_upgrade_test.go new file mode 100644 index 000000000..726c52514 --- /dev/null +++ b/test/e2e/replica_set_operator_upgrade/replica_set_operator_upgrade_test.go @@ -0,0 +1,129 @@ +package replica_set_operator_upgrade + +import ( + "context" + "fmt" + "os" + "testing" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + "github.com/stretchr/testify/assert" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetOperatorUpgrade(t *testing.T) { + ctx := context.Background() + resourceName := "mdb0" + testConfig := setup.LoadTestConfigFromEnv() + testCtx := setup.SetupWithTestConfig(ctx, t, testConfig, true, true, resourceName) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) + // Prior operator versions did not support MDB7 + mdb.Spec.Version = "6.0.5" + scramUser := mdb.GetAuthUsers()[0] + mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) + mdb.Spec.Arbiters = 1 + mdb.Spec.Members = 2 + + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb, true)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + mongodbtests.SkipTestIfLocal(t, "Ensure MongoDB TLS Configuration", func(t *testing.T) { + t.Run("Has TLS Mode", tester.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb))) + t.Run("Basic Connectivity Succeeds", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + t.Run("SRV Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithTls(ctx, mdb))) + t.Run("Basic Connectivity With Generated Connection String Secret Succeeds", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)), WithTls(ctx, mdb))) + t.Run("SRV Connectivity With Generated Connection String Secret Succeeds", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)), WithTls(ctx, mdb))) + t.Run("Connectivity Fails", tester.ConnectivityFails(WithoutTls())) + t.Run("Ensure authentication is configured", tester.EnsureAuthenticationIsConfigured(3, WithTls(ctx, mdb))) + }) + + // upgrade the operator to master + config := setup.LoadTestConfigFromEnv() + err = setup.DeployOperator(ctx, config, resourceName, true, false) + assert.NoError(t, err) + + // Perform the basic tests + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb, true)) +} + +// TestReplicaSetOperatorUpgradeFrom0_7_2 is intended to be run locally not in CI. +// It simulates deploying cluster using community operator 0.7.2 and then upgrading it using newer version. +func TestReplicaSetOperatorUpgradeFrom0_7_2(t *testing.T) { + ctx := context.Background() //nolint + t.Skip("Supporting this test in CI requires installing also CRDs from release v0.7.2") + resourceName := "mdb-upg" + testConfig := setup.LoadTestConfigFromEnv() + + // deploy operator and other components as it was at version 0.7.2 + testConfig.OperatorImage = "quay.io/mongodb/mongodb-kubernetes-operator:0.7.2" + testConfig.VersionUpgradeHookImage = "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.3" + testConfig.ReadinessProbeImage = "quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.6" + testConfig.AgentImage = "quay.io/mongodb/mongodb-agent-ubi:11.0.5.6963-1" + + testCtx := setup.SetupWithTestConfig(ctx, t, testConfig, true, false, resourceName) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, "") + scramUser := mdb.GetAuthUsers()[0] + mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + runTests := func(t *testing.T) { + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb, true)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("Has TLS Mode", tester.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb))) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) + t.Run("Test Basic Connectivity with generated connection string secret", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Test SRV Connectivity with generated connection string secret", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)))) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + } + + runTests(t) + + // When running against local operator we could stop here, + // rescale helm operator deployment to zero and run local operator then. + + testConfig = setup.LoadTestConfigFromEnv() + err = setup.DeployOperator(ctx, testConfig, resourceName, true, false) + assert.NoError(t, err) + + runTests(t) +} diff --git a/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go b/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go deleted file mode 100644 index a7f7830e6..000000000 --- a/test/e2e/replica_set_readiness_probe/replica_set_readiness_probe_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package replica_set_readiness_probe - -import ( - "math/rand" - "testing" - "time" - - mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" - e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" - "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - f "github.com/operator-framework/operator-sdk/pkg/test" -) - -func TestMain(m *testing.M) { - f.MainEntry(m) -} - -func TestReplicaSetReadinessProbeScaling(t *testing.T) { - ctx := f.NewTestCtx(t) - defer ctx.Cleanup() - - // register our types with the testing framework - if err := e2eutil.RegisterTypesWithFramework(&mdbv1.MongoDB{}); err != nil { - t.Fatal(err) - } - - mdb := e2eutil.NewTestMongoDB() - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Config Map Was Correctly Created", mongodbtests.AutomationConfigConfigMapExists(&mdb)) - t.Run("Stateful Set Reaches Ready State", mongodbtests.StatefulSetIsReady(&mdb)) - t.Run("MongoDB is reachable", mongodbtests.IsReachableDuring(&mdb, time.Second*10, - func() { - t.Run("Delete Random Pod", mongodbtests.DeletePod(&mdb, rand.Intn(mdb.Spec.Members-1))) - t.Run("Test Replica Set Recovers", mongodbtests.StatefulSetIsReady(&mdb)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, - mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - })) - }, - )) -} diff --git a/test/e2e/replica_set_recovery/replica_set_recovery_test.go b/test/e2e/replica_set_recovery/replica_set_recovery_test.go new file mode 100644 index 000000000..91c9426b7 --- /dev/null +++ b/test/e2e/replica_set_recovery/replica_set_recovery_test.go @@ -0,0 +1,68 @@ +package replica_set_recovery + +import ( + "context" + "crypto/rand" + "fmt" + "math/big" + "os" + "testing" + "time" + + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetRecovery(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := mongotester.FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + + t.Run("MongoDB is reachable", func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() + n, err := rand.Int(rand.Reader, big.NewInt(int64(mdb.Spec.Members))) + if err != nil { + t.Fatal(err) + } + t.Run("Delete Random Pod", mongodbtests.DeletePod(ctx, &mdb, int(n.Int64()))) + t.Run("Test Replica Set Recovers", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Test Status Was Updated", mongodbtests.Status(ctx, &mdb, mdbv1.MongoDBCommunityStatus{ + MongoURI: mdb.MongoURI(""), + Phase: mdbv1.Running, + Version: mdb.GetMongoDBVersion(), + CurrentMongoDBMembers: 3, + CurrentStatefulSetReplicas: 3, + })) + + }) +} diff --git a/test/e2e/replica_set_remove_user/replica_set_remove_user_test.go b/test/e2e/replica_set_remove_user/replica_set_remove_user_test.go new file mode 100644 index 000000000..2abeb93c3 --- /dev/null +++ b/test/e2e/replica_set_remove_user/replica_set_remove_user_test.go @@ -0,0 +1,127 @@ +package replica_set_remove_user + +import ( + "context" + "fmt" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + "os" + "testing" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func intPtr(x int) *int { return &x } +func strPtr(s string) *string { return &s } + +func TestCleanupUsers(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + // config member options + memberOptions := []automationconfig.MemberOptions{ + { + Votes: intPtr(1), + Tags: map[string]string{"foo1": "bar1"}, + Priority: strPtr("1.5"), + }, + { + Votes: intPtr(1), + Tags: map[string]string{"foo2": "bar2"}, + Priority: strPtr("1"), + }, + { + Votes: intPtr(1), + Tags: map[string]string{"foo3": "bar3"}, + Priority: strPtr("2.5"), + }, + } + mdb.Spec.MemberConfig = memberOptions + + settings := map[string]interface{}{ + "electionTimeoutMillis": float64(20), + } + mdb.Spec.AutomationConfigOverride = &mdbv1.AutomationConfigOverride{ + ReplicaSet: mdbv1.OverrideReplicaSet{Settings: mdbv1.MapWrapper{Object: settings}}, + } + + newUser := mdbv1.MongoDBUser{ + Name: fmt.Sprintf("%s-user-2", "mdb-0"), + PasswordSecretRef: mdbv1.SecretKeyReference{ + Key: fmt.Sprintf("%s-password-2", "mdb-0"), + Name: fmt.Sprintf("%s-%s-password-secret-2", "mdb-0", testCtx.ExecutionId), + }, + Roles: []mdbv1.Role{ + // roles on testing db for general connectivity + { + DB: "testing", + Name: "readWrite", + }, + { + DB: "testing", + Name: "clusterAdmin", + }, + // admin roles for reading FCV + { + DB: "admin", + Name: "readWrite", + }, + { + DB: "admin", + Name: "clusterAdmin", + }, + { + DB: "admin", + Name: "userAdmin", + }, + }, + ScramCredentialsSecretName: fmt.Sprintf("%s-my-scram-2", "mdb-0"), + } + + _, err = setup.GeneratePasswordForUser(testCtx, newUser, "") + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Test SRV Connectivity", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithoutTls(), WithReplicaSet(mdb.Name))) + t.Run("Add new user to MongoDB Resource", mongodbtests.AddUserToMongoDBCommunity(ctx, &mdb, newUser)) + t.Run("MongoDB reaches Running phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + editedUser := mdb.Spec.Users[1] + t.Run("Edit connection string secret name of the added user", mongodbtests.EditConnectionStringSecretNameOfLastUser(ctx, &mdb, "other-secret-name")) + t.Run("MongoDB reaches Running phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Old connection string secret is cleaned up", mongodbtests.ConnectionStringSecretIsCleanedUp(ctx, &mdb, editedUser.GetConnectionStringSecretName(mdb.Name))) + deletedUser := mdb.Spec.Users[1] + t.Run("Remove last user from MongoDB Resource", mongodbtests.RemoveLastUserFromMongoDBCommunity(ctx, &mdb)) + t.Run("MongoDB reaches Pending phase", mongodbtests.MongoDBReachesPendingPhase(ctx, &mdb)) + t.Run("Removed users are added to automation config", mongodbtests.AuthUsersDeletedIsUpdated(ctx, &mdb, deletedUser)) + t.Run("MongoDB reaches Running phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Connection string secrets are cleaned up", mongodbtests.ConnectionStringSecretIsCleanedUp(ctx, &mdb, deletedUser.GetConnectionStringSecretName(mdb.Name))) + t.Run("Delete MongoDB Resource", mongodbtests.DeleteMongoDBResource(&mdb, testCtx)) +} diff --git a/test/e2e/replica_set_scale/replica_set_scaling_test.go b/test/e2e/replica_set_scale/replica_set_scaling_test.go index b2ea42af3..0361ba9f0 100644 --- a/test/e2e/replica_set_scale/replica_set_scaling_test.go +++ b/test/e2e/replica_set_scale/replica_set_scaling_test.go @@ -1,50 +1,76 @@ -package replica_set_readiness_probe +package replica_set_scale_up import ( + "context" + "fmt" + "os" "testing" "time" - mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/pkg/apis/mongodb/v1" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" - f "github.com/operator-framework/operator-sdk/pkg/test" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" ) func TestMain(m *testing.M) { - f.MainEntry(m) + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) } -func TestReplicaSetScale(t *testing.T) { - ctx := f.NewTestCtx(t) - defer ctx.Cleanup() +func TestReplicaSetScaleUp(t *testing.T) { + ctx := context.Background() + + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() - // register our types with the testing framework - if err := e2eutil.RegisterTypesWithFramework(&mdbv1.MongoDB{}); err != nil { + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { t.Fatal(err) } - mdb := e2eutil.NewTestMongoDB() - t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, ctx)) - t.Run("Config Map Was Correctly Created", mongodbtests.AutomationConfigConfigMapExists(&mdb)) - t.Run("Stateful Set Reaches Ready State", mongodbtests.StatefulSetIsReady(&mdb)) - t.Run("MongoDB is reachable", mongodbtests.IsReachableDuring(&mdb, time.Second*10, - func() { - t.Run("Scale MongoDB Resource Up", mongodbtests.Scale(&mdb, 5)) - t.Run("Stateful Set Scaled Up Correctly", mongodbtests.StatefulSetIsReady(&mdb)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, - mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - })) - t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 3)) - t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReady(&mdb)) - t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) - t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, - mdbv1.MongoDBStatus{ - MongoURI: mdb.MongoURI(), - Phase: mdbv1.Running, - })) - }, - )) + tester, err := mongotester.FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("MongoDB is reachable", func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() + t.Run("Scale MongoDB Resource Up", mongodbtests.Scale(ctx, &mdb, 5)) + t.Run("Stateful Set Scaled Up Correctly", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 3)) + t.Run("Test Status Was Updated", mongodbtests.Status(ctx, &mdb, mdbv1.MongoDBCommunityStatus{ + MongoURI: mdb.MongoURI(""), + Phase: mdbv1.Running, + Version: mdb.GetMongoDBVersion(), + CurrentMongoDBMembers: 5, + CurrentStatefulSetReplicas: 5, + })) + + // TODO: Currently the scale down process takes too long to reasonably include this in the test + //t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(&mdb, 3)) + //t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(&mdb)) + //t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(&mdb)) + //t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(&mdb, 5)) + //t.Run("Test Status Was Updated", mongodbtests.Status(&mdb, + // mdbv1.MongoDBStatus{ + // MongoURI: mdb.MongoURI(""), + // Phase: mdbv1.Running, + // Version: mdb.GetMongoDBVersion(), + // CurrentMongoDBMembers: 3, + // CurrentStatefulSetReplicas: 3, + // })) + }) } diff --git a/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go b/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go new file mode 100644 index 000000000..fd03fdafc --- /dev/null +++ b/test/e2e/replica_set_scale_down/replica_set_scale_down_test.go @@ -0,0 +1,65 @@ +package replica_set_scale_down + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetScaleDown(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "replica-set-scale-down", "") + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Keyfile authentication is configured", tester.HasKeyfileAuth(3)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + + t.Run("MongoDB is reachable", func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*10)() + t.Run("Scale MongoDB Resource Down", mongodbtests.Scale(ctx, &mdb, 1)) + t.Run("Stateful Set Scaled Down Correctly", mongodbtests.StatefulSetIsReadyAfterScaleDown(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("AutomationConfig's version has been increased", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 3)) + t.Run("Test Status Was Updated", mongodbtests.Status(ctx, &mdb, mdbv1.MongoDBCommunityStatus{ + MongoURI: mdb.MongoURI(""), + Phase: mdbv1.Running, + Version: mdb.GetMongoDBVersion(), + CurrentMongoDBMembers: 1, + CurrentStatefulSetReplicas: 1, + })) + }) +} diff --git a/test/e2e/replica_set_tls/replica_set_tls_test.go b/test/e2e/replica_set_tls/replica_set_tls_test.go new file mode 100644 index 000000000..719bcdc8f --- /dev/null +++ b/test/e2e/replica_set_tls/replica_set_tls_test.go @@ -0,0 +1,62 @@ +package replica_set_tls + +import ( + "context" + "fmt" + "os" + "testing" + + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetTLS(t *testing.T) { + ctx := context.Background() + resourceName := "mdb-tls" + + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) + scramUser := mdb.GetAuthUsers()[0] + mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) + + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + mongodbtests.SkipTestIfLocal(t, "Ensure MongoDB TLS Configuration", func(t *testing.T) { + t.Run("Has TLS Mode", tester.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb))) + t.Run("Basic Connectivity Succeeds", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + t.Run("SRV Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(mdb.MongoSRVURI("")), WithTls(ctx, mdb))) + t.Run("Basic Connectivity With Generated Connection String Secret Succeeds", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, scramUser)), WithTls(ctx, mdb))) + t.Run("SRV Connectivity With Generated Connection String Secret Succeeds", + tester.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb, scramUser)), WithTls(ctx, mdb))) + t.Run("Connectivity Fails", tester.ConnectivityFails(WithoutTls())) + t.Run("Ensure authentication is configured", tester.EnsureAuthenticationIsConfigured(3, WithTls(ctx, mdb))) + }) + t.Run("TLS is disabled", mongodbtests.DisableTLS(ctx, &mdb)) + t.Run("MongoDB Reaches Failed Phase", mongodbtests.MongoDBReachesFailedPhase(ctx, &mdb)) + t.Run("TLS is enabled", mongodbtests.EnableTLS(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) +} diff --git a/test/e2e/replica_set_tls_recreate_mdbc/replica_set_tls_recreate_mdbc_test.go b/test/e2e/replica_set_tls_recreate_mdbc/replica_set_tls_recreate_mdbc_test.go new file mode 100644 index 000000000..751a048c4 --- /dev/null +++ b/test/e2e/replica_set_tls_recreate_mdbc/replica_set_tls_recreate_mdbc_test.go @@ -0,0 +1,72 @@ +package replica_set_tls + +import ( + "context" + "fmt" + "os" + "testing" + + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetTLSRecreateMdbc(t *testing.T) { + ctx := context.Background() + resourceName := "mdb-tls" + + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName) + defer testCtx.Teardown() + + mdb1, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) + scramUser := mdb1.GetAuthUsers()[0] + mdb1.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) + + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb1, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb1)) + + if err := e2eutil.TestClient.Delete(ctx, &mdb1); err != nil { + t.Fatalf("Failed to delete first test MongoDB: %s", err) + } + t.Run("Stateful Set Is Deleted", mongodbtests.StatefulSetIsDeleted(ctx, &mdb1)) + + mdb2, _ := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) + mdb2.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) + tester1, err := FromResource(ctx, t, mdb2) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb2, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb2)) + mongodbtests.SkipTestIfLocal(t, "Ensure MongoDB TLS Configuration", func(t *testing.T) { + t.Run("Has TLS Mode", tester1.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb2))) + t.Run("Basic Connectivity Succeeds", tester1.ConnectivitySucceeds(WithTls(ctx, mdb2))) + t.Run("SRV Connectivity Succeeds", tester1.ConnectivitySucceeds(WithURI(mdb2.MongoSRVURI("")), WithTls(ctx, mdb2))) + t.Run("Basic Connectivity With Generated Connection String Secret Succeeds", + tester1.ConnectivitySucceeds(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb2, scramUser)), WithTls(ctx, mdb2))) + t.Run("SRV Connectivity With Generated Connection String Secret Succeeds", + tester1.ConnectivitySucceeds(WithURI(mongodbtests.GetSrvConnectionStringForUser(ctx, mdb2, scramUser)), WithTls(ctx, mdb2))) + t.Run("Connectivity Fails", tester1.ConnectivityFails(WithoutTls())) + t.Run("Ensure authentication is configured", tester1.EnsureAuthenticationIsConfigured(3, WithTls(ctx, mdb2))) + }) + t.Run("TLS is disabled", mongodbtests.DisableTLS(ctx, &mdb2)) + t.Run("MongoDB Reaches Failed Phase", mongodbtests.MongoDBReachesFailedPhase(ctx, &mdb2)) + t.Run("TLS is enabled", mongodbtests.EnableTLS(ctx, &mdb2)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb2)) +} diff --git a/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go b/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go new file mode 100644 index 000000000..86c1b6614 --- /dev/null +++ b/test/e2e/replica_set_tls_rotate/replica_set_tls_rotate_test.go @@ -0,0 +1,68 @@ +package replica_set_tls + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/tlstests" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetTLSRotate(t *testing.T) { + ctx := context.Background() + resourceName := "mdb-tls" + + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) + mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) + + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + clientCert, err := GetClientCert(ctx, mdb) + if err != nil { + t.Fatal(err) + } + initialCertSerialNumber := clientCert.SerialNumber + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Wait for TLS to be enabled", tester.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb))) + t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3, WithTls(ctx, mdb))) + t.Run("Test TLS required", tester.ConnectivityFails(WithoutTls())) + + t.Run("MongoDB is reachable while certificate is rotated", func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*10, WithTls(ctx, mdb))() + t.Run("Update certificate secret", tlstests.RotateCertificate(ctx, &mdb)) + t.Run("Wait for certificate to be rotated", tester.WaitForRotatedCertificate(mdb, initialCertSerialNumber)) + t.Run("Wait for MongoDB to reach Running Phase after rotating server cert", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Extend CA certificate validity", tlstests.ExtendCACertificate(ctx, &mdb)) + t.Run("Wait for MongoDB to start reconciling after extending CA", mongodbtests.MongoDBReachesPendingPhase(ctx, &mdb)) + t.Run("Wait for MongoDB to reach Running Phase after extending CA", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + }) +} diff --git a/test/e2e/replica_set_tls_rotate_delete_sts/replica_set_tls_rotate_delete_sts_test.go b/test/e2e/replica_set_tls_rotate_delete_sts/replica_set_tls_rotate_delete_sts_test.go new file mode 100644 index 000000000..0bc0448bd --- /dev/null +++ b/test/e2e/replica_set_tls_rotate_delete_sts/replica_set_tls_rotate_delete_sts_test.go @@ -0,0 +1,67 @@ +package replica_set_tls_rotate_delete_sts + +import ( + "context" + "os" + "testing" + + "fmt" + + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/tlstests" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetTLSRotateDeleteSts(t *testing.T) { + ctx := context.Background() + resourceName := "mdb-tls" + + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) + mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) + + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + clientCert, err := GetClientCert(ctx, mdb) + if err != nil { + t.Fatal(err) + } + initialCertSerialNumber := clientCert.SerialNumber + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Wait for TLS to be enabled", tester.HasTlsMode("requireSSL", 60, WithTls(ctx, mdb))) + t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3, WithTls(ctx, mdb))) + t.Run("Test TLS required", tester.ConnectivityFails(WithoutTls())) + + t.Run("MongoDB is reachable while certificate is rotated", func(t *testing.T) { + t.Run("Delete Statefulset", mongodbtests.DeleteStatefulSet(ctx, &mdb)) + t.Run("Update certificate secret", tlstests.RotateCertificate(ctx, &mdb)) + t.Run("Wait for certificate to be rotated", tester.WaitForRotatedCertificate(mdb, initialCertSerialNumber)) + t.Run("Test Replica Set Recovers", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Wait for MongoDB to reach Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + }) +} diff --git a/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go b/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go new file mode 100644 index 000000000..eb85477f3 --- /dev/null +++ b/test/e2e/replica_set_tls_upgrade/replica_set_tls_upgrade_test.go @@ -0,0 +1,75 @@ +package replica_set_tls + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/tlstests" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetTLSUpgrade(t *testing.T) { + ctx := context.Background() + resourceName := "mdb-tls" + + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) + _, err := setup.GeneratePasswordForUser(testCtx, user, testConfig.Namespace) + if err != nil { + t.Fatal(err) + } + + tester, err := FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds(WithoutTls())) + t.Run("Ensure Authentication", tester.EnsureAuthenticationIsConfigured(3)) + + // Enable TLS as optional + t.Run("MongoDB is reachable while TLS is being enabled", func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*15, WithoutTls())() + t.Run("Upgrade to TLS", tlstests.EnableTLS(ctx, &mdb, true)) + t.Run("Stateful Set Leaves Ready State, after setting TLS to preferSSL", mongodbtests.StatefulSetBecomesUnready(ctx, &mdb)) + t.Run("Stateful Set Reaches Ready State, after setting TLS to preferSSL", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Wait for TLS to be enabled", tester.HasTlsMode("preferSSL", 60, WithoutTls())) + }) + + // Ensure MongoDB is reachable both with and without TLS + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds(WithoutTls())) + t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + t.Run("Internal cluster keyfile authentication is enabled", tester.HasKeyfileAuth(3, WithTls(ctx, mdb))) + + // Make TLS required + t.Run("MongoDB is reachable over TLS while making TLS required", func(t *testing.T) { + defer tester.StartBackgroundConnectivityTest(t, time.Second*10, WithTls(ctx, mdb))() + t.Run("Make TLS required", tlstests.EnableTLS(ctx, &mdb, false)) + t.Run("Stateful Set Reaches Ready State, after setting TLS to requireSSL", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("Wait for TLS to be required", tester.HasTlsMode("requireSSL", 120, WithTls(ctx, mdb))) + }) + + // Ensure MongoDB is reachable only over TLS + t.Run("Test Basic TLS Connectivity", tester.ConnectivitySucceeds(WithTls(ctx, mdb))) + t.Run("Test TLS Required For Connectivity", tester.ConnectivityFails(WithoutTls())) +} diff --git a/test/e2e/replica_set_x509/replica_set_x509_test.go b/test/e2e/replica_set_x509/replica_set_x509_test.go new file mode 100644 index 000000000..a7ed3503c --- /dev/null +++ b/test/e2e/replica_set_x509/replica_set_x509_test.go @@ -0,0 +1,223 @@ +package replica_set_x509 + +import ( + "context" + "fmt" + "os" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + v1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/tlstests" + . "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + "github.com/stretchr/testify/assert" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestReplicaSetX509(t *testing.T) { + ctx := context.Background() + resourceName := "mdb-tls" + helmArgs := []setup.HelmArg{ + {Name: "resource.tls.useX509", Value: "true"}, + {Name: "resource.tls.sampleX509User", Value: "true"}, + } + testCtx, testConfig := setup.SetupWithTLS(ctx, t, resourceName, helmArgs...) + defer testCtx.Teardown() + + mdb, _ := e2eutil.NewTestMongoDB(testCtx, resourceName, testConfig.Namespace) + mdb.Spec.Security.Authentication.Modes = []v1.AuthMode{"X509"} + mdb.Spec.Security.TLS = e2eutil.NewTestTLSConfig(false) + + tester, err := FromX509Resource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Connection with certificates of wrong user", func(t *testing.T) { + mdb.Spec.Users = []v1.MongoDBUser{ + getInvalidUser(), + } + users := mdb.GetAuthUsers() + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionalityX509(ctx, &mdb)) + t.Run("Agent certificate secrets configured", mongodbtests.AgentX509SecretsExists(ctx, &mdb)) + + cert, root, dir := createCerts(ctx, t, &mdb) + defer os.RemoveAll(dir) + + t.Run("Connectivity Fails without certs", tester.ConnectivityFails(WithURI(mongodbtests.GetConnectionStringForUser(ctx, mdb, users[0])), WithTls(ctx, mdb))) + t.Run("Connectivity Fails with invalid certs", tester.ConnectivityFails(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(ctx, mdb, users[0]), root, cert)))) + }) + + t.Run("Connection with valid certificate", func(t *testing.T) { + t.Run("Update MongoDB Resource", func(t *testing.T) { + err := e2eutil.UpdateMongoDBResource(ctx, &mdb, func(m *v1.MongoDBCommunity) { + m.Spec.Users = []v1.MongoDBUser{getValidUser()} + }) + assert.NoError(t, err) + }) + + cert, root, dir := createCerts(ctx, t, &mdb) + defer os.RemoveAll(dir) + + users := mdb.GetAuthUsers() + + t.Run("Basic tests", mongodbtests.BasicFunctionalityX509(ctx, &mdb)) + t.Run("Agent certificate secrets configured", mongodbtests.AgentX509SecretsExists(ctx, &mdb)) + t.Run("Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(ctx, mdb, users[0]), root, cert)))) + }) + + t.Run("Rotate agent certificate", func(t *testing.T) { + agentCert, err := GetAgentCert(ctx, mdb) + if err != nil { + t.Fatal(err) + } + initialCertSerialNumber := agentCert.SerialNumber + + initialAgentPem := &corev1.Secret{} + err = e2eutil.TestClient.Get(ctx, mdb.AgentCertificatePemSecretNamespacedName(), initialAgentPem) + assert.NoError(t, err) + + cert, root, dir := createCerts(ctx, t, &mdb) + defer os.RemoveAll(dir) + + users := mdb.GetAuthUsers() + + t.Run("Update certificate secret", tlstests.RotateAgentCertificate(ctx, &mdb)) + t.Run("Wait for MongoDB to reach Running Phase after rotating agent cert", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + + agentCert, err = GetAgentCert(ctx, mdb) + if err != nil { + t.Fatal(err) + } + finalCertSerialNumber := agentCert.SerialNumber + + assert.NotEqual(t, finalCertSerialNumber, initialCertSerialNumber) + + finalAgentPem := &corev1.Secret{} + err = e2eutil.TestClient.Get(ctx, mdb.AgentCertificatePemSecretNamespacedName(), finalAgentPem) + assert.NoError(t, err) + + assert.NotEqual(t, finalAgentPem.Data, initialAgentPem.Data) + + t.Run("Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(ctx, mdb, users[0]), root, cert)))) + }) + + t.Run("Transition to also allow SCRAM", func(t *testing.T) { + t.Run("Update MongoDB Resource", func(t *testing.T) { + err := e2eutil.UpdateMongoDBResource(ctx, &mdb, func(m *v1.MongoDBCommunity) { + m.Spec.Security.Authentication.Modes = []v1.AuthMode{"X509", "SCRAM"} + m.Spec.Security.Authentication.AgentMode = "X509" + }) + assert.NoError(t, err) + }) + + cert, root, dir := createCerts(ctx, t, &mdb) + defer os.RemoveAll(dir) + + users := mdb.GetAuthUsers() + + t.Run("Basic tests", mongodbtests.BasicFunctionalityX509(ctx, &mdb)) + t.Run("Agent certificate secrets configured", mongodbtests.AgentX509SecretsExists(ctx, &mdb)) + t.Run("Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(ctx, mdb, users[0]), root, cert)))) + }) + + t.Run("Transition to SCRAM agent", func(t *testing.T) { + t.Run("Update MongoDB Resource", func(t *testing.T) { + err := e2eutil.UpdateMongoDBResource(ctx, &mdb, func(m *v1.MongoDBCommunity) { + m.Spec.Security.Authentication.AgentMode = "SCRAM" + }) + assert.NoError(t, err) + }) + + cert, root, dir := createCerts(ctx, t, &mdb) + defer os.RemoveAll(dir) + + users := mdb.GetAuthUsers() + + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Connectivity Succeeds", tester.ConnectivitySucceeds(WithURI(fmt.Sprintf("%s&tlsCAFile=%s&tlsCertificateKeyFile=%s", mongodbtests.GetConnectionStringForUser(ctx, mdb, users[0]), root, cert)))) + }) + +} + +func getValidUser() v1.MongoDBUser { + return v1.MongoDBUser{ + Name: "CN=my-x509-user,OU=organizationalunit,O=organization", + DB: constants.ExternalDB, + Roles: []v1.Role{ + { + DB: "admin", + Name: "readWriteAnyDatabase", + }, + { + DB: "admin", + Name: "clusterAdmin", + }, + { + DB: "admin", + Name: "userAdminAnyDatabase", + }, + }, + } +} + +func getInvalidUser() v1.MongoDBUser { + return v1.MongoDBUser{ + Name: "CN=my-invalid-x509-user,OU=organizationalunit,O=organization", + DB: constants.ExternalDB, + Roles: []v1.Role{ + { + DB: "admin", + Name: "readWriteAnyDatabase", + }, + { + DB: "admin", + Name: "clusterAdmin", + }, + { + DB: "admin", + Name: "userAdminAnyDatabase", + }, + }, + } +} + +func createCerts(ctx context.Context, t *testing.T, mdb *v1.MongoDBCommunity) (string, string, string) { + dir, _ := os.MkdirTemp("", "certdir") + + t.Logf("Creating client certificate pem file") + cert, _ := os.CreateTemp(dir, "pem") + clientCertSecret := corev1.Secret{} + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{ + Namespace: mdb.Namespace, + Name: "my-x509-user-cert", + }, &clientCertSecret) + assert.NoError(t, err) + + _, err = cert.Write(append(clientCertSecret.Data["tls.crt"], clientCertSecret.Data["tls.key"]...)) + assert.NoError(t, err) + t.Logf("Created pem file: %s", cert.Name()) + + t.Logf("Creating root ca file") + root, _ := os.CreateTemp(dir, "root") + _, err = root.Write(clientCertSecret.Data["ca.crt"]) + assert.NoError(t, err) + t.Logf("Created root ca file: %s", root.Name()) + + return cert.Name(), root.Name(), dir +} diff --git a/test/e2e/setup/setup.go b/test/e2e/setup/setup.go new file mode 100644 index 000000000..8bf9595bd --- /dev/null +++ b/test/e2e/setup/setup.go @@ -0,0 +1,286 @@ +package setup + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "testing" + "time" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/helm" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" + waite2e "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/wait" + + appsv1 "k8s.io/api/apps/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/generate" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" +) + +type tlsSecretType string + +type HelmArg struct { + Name string + Value string +} + +const ( + performCleanupEnv = "PERFORM_CLEANUP" + CertKeyPair tlsSecretType = "CERTKEYPAIR" + Pem tlsSecretType = "PEM" +) + +func Setup(ctx context.Context, t *testing.T) *e2eutil.TestContext { + testCtx, err := e2eutil.NewContext(ctx, t, envvar.ReadBool(performCleanupEnv)) // nolint:forbidigo + + if err != nil { + t.Fatal(err) + } + + config := LoadTestConfigFromEnv() + if err := DeployOperator(ctx, config, "mdb", false, false); err != nil { + t.Fatal(err) + } + + return testCtx +} + +func SetupWithTLS(ctx context.Context, t *testing.T, resourceName string, additionalHelmArgs ...HelmArg) (*e2eutil.TestContext, TestConfig) { + textCtx, err := e2eutil.NewContext(ctx, t, envvar.ReadBool(performCleanupEnv)) // nolint:forbidigo + + if err != nil { + t.Fatal(err) + } + + config := LoadTestConfigFromEnv() + if err := deployCertManager(config); err != nil { + t.Fatal(err) + } + + if err := DeployOperator(ctx, config, resourceName, true, false, additionalHelmArgs...); err != nil { + t.Fatal(err) + } + + return textCtx, config +} + +func SetupWithTestConfig(ctx context.Context, t *testing.T, testConfig TestConfig, withTLS, defaultOperator bool, resourceName string) *e2eutil.TestContext { + testCtx, err := e2eutil.NewContext(ctx, t, envvar.ReadBool(performCleanupEnv)) // nolint:forbidigo + + if err != nil { + t.Fatal(err) + } + + if withTLS { + if err := deployCertManager(testConfig); err != nil { + t.Fatal(err) + } + } + + if err := DeployOperator(ctx, testConfig, resourceName, withTLS, defaultOperator); err != nil { + t.Fatal(err) + } + + return testCtx +} + +// GeneratePasswordForUser will create a secret with a password for the given user +func GeneratePasswordForUser(testCtx *e2eutil.TestContext, mdbu mdbv1.MongoDBUser, namespace string) (string, error) { + passwordKey := mdbu.PasswordSecretRef.Key + if passwordKey == "" { + passwordKey = "password" + } + + password, err := generate.RandomFixedLengthStringOfSize(20) + if err != nil { + return "", err + } + + nsp := namespace + if nsp == "" { + nsp = e2eutil.OperatorNamespace + } + + passwordSecret := secret.Builder(). + SetName(mdbu.PasswordSecretRef.Name). + SetNamespace(nsp). + SetField(passwordKey, password). + SetLabels(e2eutil.TestLabels()). + Build() + + return password, e2eutil.TestClient.Create(testCtx.Ctx, &passwordSecret, &e2eutil.CleanupOptions{TestContext: testCtx}) +} + +// extractRegistryNameAndVersion splits a full image string and returns the individual components. +// this function expects the input to be in the form of some/registry/imagename:tag. +func extractRegistryNameAndVersion(fullImage string) (string, string, string) { + splitString := strings.Split(fullImage, "/") + registry := strings.Join(splitString[:len(splitString)-1], "/") + + splitString = strings.Split(splitString[len(splitString)-1], ":") + version := "latest" + if len(splitString) > 1 { + version = splitString[len(splitString)-1] + } + name := splitString[0] + return registry, name, version +} + +// getHelmArgs returns a map of helm arguments that are required to install the operator. +func getHelmArgs(testConfig TestConfig, watchNamespace string, resourceName string, withTLS bool, defaultOperator bool, additionalHelmArgs ...HelmArg) map[string]string { + agentRegistry, agentName, agentVersion := extractRegistryNameAndVersion(testConfig.AgentImage) + versionUpgradeHookRegistry, versionUpgradeHookName, versionUpgradeHookVersion := extractRegistryNameAndVersion(testConfig.VersionUpgradeHookImage) + readinessProbeRegistry, readinessProbeName, readinessProbeVersion := extractRegistryNameAndVersion(testConfig.ReadinessProbeImage) + operatorRegistry, operatorName, operatorVersion := extractRegistryNameAndVersion(testConfig.OperatorImage) + + helmArgs := make(map[string]string) + + helmArgs["namespace"] = testConfig.Namespace + + helmArgs["operator.watchNamespace"] = watchNamespace + + if !defaultOperator { + helmArgs["operator.operatorImageName"] = operatorName + helmArgs["operator.version"] = operatorVersion + helmArgs["versionUpgradeHook.name"] = versionUpgradeHookName + helmArgs["versionUpgradeHook.version"] = versionUpgradeHookVersion + + helmArgs["readinessProbe.name"] = readinessProbeName + helmArgs["readinessProbe.version"] = readinessProbeVersion + + helmArgs["agent.version"] = agentVersion + helmArgs["agent.name"] = agentName + + helmArgs["mongodb.name"] = testConfig.MongoDBImage + helmArgs["mongodb.repo"] = testConfig.MongoDBRepoUrl + + helmArgs["registry.versionUpgradeHook"] = versionUpgradeHookRegistry + helmArgs["registry.operator"] = operatorRegistry + helmArgs["registry.agent"] = agentRegistry + helmArgs["registry.readinessProbe"] = readinessProbeRegistry + } + + helmArgs["community-operator-crds.enabled"] = strconv.FormatBool(false) + + helmArgs["createResource"] = strconv.FormatBool(false) + helmArgs["resource.name"] = resourceName + helmArgs["resource.tls.enabled"] = strconv.FormatBool(withTLS) + helmArgs["resource.tls.useCertManager"] = strconv.FormatBool(withTLS) + + for _, arg := range additionalHelmArgs { + helmArgs[arg.Name] = arg.Value + } + + return helmArgs +} + +// DeployOperator installs all resources required by the operator using helm. +func DeployOperator(ctx context.Context, config TestConfig, resourceName string, withTLS bool, defaultOperator bool, additionalHelmArgs ...HelmArg) error { + e2eutil.OperatorNamespace = config.Namespace + fmt.Printf("Setting operator namespace to %s\n", e2eutil.OperatorNamespace) + watchNamespace := config.Namespace + if config.ClusterWide { + watchNamespace = "*" + } + fmt.Printf("Setting namespace to watch to %s\n", watchNamespace) + + helmChartName := "mongodb-kubernetes-operator" + if err := helm.Uninstall(helmChartName, config.Namespace); err != nil { + return err + } + + helmArgs := getHelmArgs(config, watchNamespace, resourceName, withTLS, defaultOperator, additionalHelmArgs...) + helmFlags := map[string]string{ + "namespace": config.Namespace, + "create-namespace": "", + } + + if config.LocalOperator { + helmArgs["operator.replicas"] = "0" + } + + if err := helm.DependencyUpdate(config.HelmChartPath); err != nil { + return err + } + + if err := helm.Install(config.HelmChartPath, helmChartName, helmFlags, helmArgs); err != nil { + return err + } + + dep, err := waite2e.ForDeploymentToExist(ctx, "mongodb-kubernetes-operator", time.Second*10, time.Minute*1, e2eutil.OperatorNamespace) + if err != nil { + return err + } + + quantityCPU, err := resource.ParseQuantity("50m") + if err != nil { + return err + } + + for _, cont := range dep.Spec.Template.Spec.Containers { + cont.Resources.Requests["cpu"] = quantityCPU + } + + err = e2eutil.TestClient.Update(ctx, &dep) + if err != nil { + return err + } + + if err := wait.PollUntilContextTimeout(ctx, time.Second*2, 120*time.Second, true, hasDeploymentRequiredReplicas(&dep)); err != nil { + return errors.New("error building operator deployment: the deployment does not have the required replicas") + } + fmt.Println("Successfully installed the operator deployment") + return nil +} + +func deployCertManager(config TestConfig) error { + const helmChartName = "cert-manager" + if err := helm.Uninstall(helmChartName, config.CertManagerNamespace); err != nil { + return fmt.Errorf("failed to uninstall cert-manager Helm chart: %s", err) + } + + charlUrl := fmt.Sprintf("https://charts.jetstack.io/charts/cert-manager-%s.tgz", config.CertManagerVersion) + flags := map[string]string{ + "version": config.CertManagerVersion, + "namespace": config.CertManagerNamespace, + "create-namespace": "", + } + values := map[string]string{"installCRDs": "true"} + if err := helm.Install(charlUrl, helmChartName, flags, values); err != nil { + return fmt.Errorf("failed to install cert-manager Helm chart: %s", err) + } + return nil +} + +// hasDeploymentRequiredReplicas returns a condition function that indicates whether the given deployment +// currently has the required amount of replicas in the ready state as specified in spec.replicas +func hasDeploymentRequiredReplicas(dep *appsv1.Deployment) wait.ConditionWithContextFunc { + return func(ctx context.Context) (bool, error) { + err := e2eutil.TestClient.Get(ctx, + types.NamespacedName{Name: dep.Name, + Namespace: e2eutil.OperatorNamespace}, + dep) + if err != nil { + if apiErrors.IsNotFound(err) { + return false, nil + } + return false, fmt.Errorf("error getting operator deployment: %s", err) + } + if dep.Status.ReadyReplicas == *dep.Spec.Replicas { + return true, nil + } + fmt.Printf("Deployment not ready! ReadyReplicas: %d, Spec.Replicas: %d\n", dep.Status.ReadyReplicas, *dep.Spec.Replicas) + return false, nil + } +} diff --git a/test/e2e/setup/test_config.go b/test/e2e/setup/test_config.go new file mode 100644 index 000000000..1fc247021 --- /dev/null +++ b/test/e2e/setup/test_config.go @@ -0,0 +1,52 @@ +package setup + +import ( + "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" +) + +const ( + testNamespaceEnvName = "TEST_NAMESPACE" + testCertManagerNamespaceEnvName = "TEST_CERT_MANAGER_NAMESPACE" + testCertManagerVersionEnvName = "TEST_CERT_MANAGER_VERSION" + operatorImageEnvName = "OPERATOR_IMAGE" + clusterWideEnvName = "CLUSTER_WIDE" + performCleanupEnvName = "PERFORM_CLEANUP" + helmChartPathEnvName = "HELM_CHART_PATH" + LocalOperatorEnvName = "MDB_LOCAL_OPERATOR" +) + +type TestConfig struct { + Namespace string + CertManagerNamespace string + CertManagerVersion string + OperatorImage string + VersionUpgradeHookImage string + ClusterWide bool + PerformCleanup bool + AgentImage string + ReadinessProbeImage string + HelmChartPath string + MongoDBImage string + MongoDBRepoUrl string + LocalOperator bool +} + +func LoadTestConfigFromEnv() TestConfig { + return TestConfig{ + Namespace: envvar.GetEnvOrDefault(testNamespaceEnvName, "mongodb"), // nolint:forbidigo + CertManagerNamespace: envvar.GetEnvOrDefault(testCertManagerNamespaceEnvName, "cert-manager"), // nolint:forbidigo + CertManagerVersion: envvar.GetEnvOrDefault(testCertManagerVersionEnvName, "v1.5.3"), // nolint:forbidigo + OperatorImage: envvar.GetEnvOrDefault(operatorImageEnvName, "quay.io/mongodb/community-operator-dev:latest"), // nolint:forbidigo + MongoDBImage: envvar.GetEnvOrDefault(construct.MongodbImageEnv, "mongodb-community-server"), // nolint:forbidigo + MongoDBRepoUrl: envvar.GetEnvOrDefault(construct.MongodbRepoUrlEnv, "quay.io/mongodb"), // nolint:forbidigo + VersionUpgradeHookImage: envvar.GetEnvOrDefault(construct.VersionUpgradeHookImageEnv, "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.2"), // nolint:forbidigo + // TODO: better way to decide default agent image. + AgentImage: envvar.GetEnvOrDefault(construct.AgentImageEnv, "quay.io/mongodb/mongodb-agent-ubi:10.29.0.6830-1"), // nolint:forbidigo + ClusterWide: envvar.ReadBool(clusterWideEnvName), // nolint:forbidigo + PerformCleanup: envvar.ReadBool(performCleanupEnvName), // nolint:forbidigo + ReadinessProbeImage: envvar.GetEnvOrDefault(construct.ReadinessProbeImageEnv, "quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.3"), // nolint:forbidigo + HelmChartPath: envvar.GetEnvOrDefault(helmChartPathEnvName, "/workspace/helm-charts/charts/community-operator"), // nolint:forbidigo + LocalOperator: envvar.ReadBool(LocalOperatorEnvName), // nolint:forbidigo + } +} diff --git a/test/e2e/statefulset_arbitrary_config/statefulset_arbitrary_config_test.go b/test/e2e/statefulset_arbitrary_config/statefulset_arbitrary_config_test.go new file mode 100644 index 000000000..d622cc68d --- /dev/null +++ b/test/e2e/statefulset_arbitrary_config/statefulset_arbitrary_config_test.go @@ -0,0 +1,75 @@ +package statefulset_arbitrary_config_update + +import ( + "context" + "fmt" + "os" + "reflect" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestStatefulSetArbitraryConfig(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + overrideTolerations := []corev1.Toleration{ + { + Key: "key1", + Value: "value1", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "key2", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectPreferNoSchedule, + }, + } + + mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.Template.Spec.Containers[1].ReadinessProbe = &corev1.Probe{TimeoutSeconds: 100} + mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.Template.Spec.Tolerations = overrideTolerations + + customServiceName := "database" + mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.ServiceName = customServiceName + + tester, err := mongotester.FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Test setting Service Name", mongodbtests.ServiceWithNameExists(ctx, customServiceName, mdb.Namespace)) + t.Run("Test Basic Connectivity", tester.ConnectivitySucceeds()) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + t.Run("Container has been merged by name", mongodbtests.StatefulSetContainerConditionIsTrue(ctx, &mdb, "mongodb-agent", func(container corev1.Container) bool { + return container.ReadinessProbe.TimeoutSeconds == 100 + })) + t.Run("Tolerations have been added correctly", mongodbtests.StatefulSetConditionIsTrue(ctx, &mdb, func(sts appsv1.StatefulSet) bool { + return reflect.DeepEqual(overrideTolerations, sts.Spec.Template.Spec.Tolerations) + })) +} diff --git a/test/e2e/statefulset_arbitrary_config_update/statefulset_arbitrary_config_update_test.go b/test/e2e/statefulset_arbitrary_config_update/statefulset_arbitrary_config_update_test.go new file mode 100644 index 000000000..051189946 --- /dev/null +++ b/test/e2e/statefulset_arbitrary_config_update/statefulset_arbitrary_config_update_test.go @@ -0,0 +1,81 @@ +package statefulset_arbitrary_config + +import ( + "context" + "fmt" + "os" + "reflect" + "testing" + + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/util/mongotester" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestStatefulSetArbitraryConfig(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + tester, err := mongotester.FromResource(ctx, t, mdb) + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Test basic connectivity", tester.ConnectivitySucceeds()) + t.Run("AutomationConfig has the correct version", mongodbtests.AutomationConfigVersionHasTheExpectedVersion(ctx, &mdb, 1)) + + overrideTolerations := []corev1.Toleration{ + { + Key: "key1", + Value: "value1", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "key2", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectPreferNoSchedule, + }, + } + + overrideSpec := mdb.Spec.StatefulSetConfiguration + overrideSpec.SpecWrapper.Spec.Template.Spec.Containers[1].ReadinessProbe = &corev1.Probe{TimeoutSeconds: 100} + overrideSpec.SpecWrapper.Spec.Template.Spec.Tolerations = overrideTolerations + + err = e2eutil.UpdateMongoDBResource(ctx, &mdb, func(mdb *mdbv1.MongoDBCommunity) { mdb.Spec.StatefulSetConfiguration = overrideSpec }) + + assert.NoError(t, err) + + t.Run("Basic tests after update", mongodbtests.BasicFunctionality(ctx, &mdb)) + t.Run("Test basic connectivity after update", tester.ConnectivitySucceeds()) + t.Run("Container has been merged by name", mongodbtests.StatefulSetContainerConditionIsTrue(ctx, &mdb, "mongodb-agent", func(container corev1.Container) bool { + return container.ReadinessProbe.TimeoutSeconds == 100 + })) + t.Run("Tolerations have been added correctly", mongodbtests.StatefulSetConditionIsTrue(ctx, &mdb, func(sts appsv1.StatefulSet) bool { + return reflect.DeepEqual(overrideTolerations, sts.Spec.Template.Spec.Tolerations) + })) +} diff --git a/test/e2e/statefulset_delete/statefulset_delete_test.go b/test/e2e/statefulset_delete/statefulset_delete_test.go new file mode 100644 index 000000000..3117109e6 --- /dev/null +++ b/test/e2e/statefulset_delete/statefulset_delete_test.go @@ -0,0 +1,49 @@ +package statefulset_delete + +import ( + "context" + "fmt" + "os" + "testing" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/mongodbtests" + "github.com/mongodb/mongodb-kubernetes-operator/test/e2e/setup" +) + +func TestMain(m *testing.M) { + code, err := e2eutil.RunTest(m) + if err != nil { + fmt.Println(err) + } + os.Exit(code) +} + +func TestStatefulSetDelete(t *testing.T) { + ctx := context.Background() + testCtx := setup.Setup(ctx, t) + defer testCtx.Teardown() + + mdb, user := e2eutil.NewTestMongoDB(testCtx, "mdb0", "") + + _, err := setup.GeneratePasswordForUser(testCtx, user, "") + if err != nil { + t.Fatal(err) + } + + t.Run("Create MongoDB Resource", mongodbtests.CreateMongoDBResource(&mdb, testCtx)) + t.Run("Basic tests", mongodbtests.BasicFunctionality(ctx, &mdb)) + + t.Run("Operator recreates StatefulSet", func(t *testing.T) { + t.Run("Delete Statefulset", mongodbtests.DeleteStatefulSet(ctx, &mdb)) + t.Run("Test Replica Set Recovers", mongodbtests.StatefulSetBecomesReady(ctx, &mdb)) + t.Run("MongoDB Reaches Running Phase", mongodbtests.MongoDBReachesRunningPhase(ctx, &mdb)) + t.Run("Test Status Was Updated", mongodbtests.Status(ctx, &mdb, mdbv1.MongoDBCommunityStatus{ + MongoURI: mdb.MongoURI(""), + Phase: mdbv1.Running, + Version: mdb.GetMongoDBVersion(), + CurrentMongoDBMembers: mdb.DesiredReplicas(), + })) + }) +} diff --git a/test/e2e/tlstests/tlstests.go b/test/e2e/tlstests/tlstests.go new file mode 100644 index 000000000..6d327ec0d --- /dev/null +++ b/test/e2e/tlstests/tlstests.go @@ -0,0 +1,103 @@ +package tlstests + +import ( + "bytes" + "context" + "encoding/json" + "testing" + "time" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" +) + +// EnableTLS will upgrade an existing TLS cluster to use TLS. +func EnableTLS(ctx context.Context, mdb *mdbv1.MongoDBCommunity, optional bool) func(*testing.T) { + return func(t *testing.T) { + err := e2eutil.UpdateMongoDBResource(ctx, mdb, func(db *mdbv1.MongoDBCommunity) { + db.Spec.Security.TLS = e2eutil.NewTestTLSConfig(optional) + }) + if err != nil { + t.Fatal(err) + } + } +} + +func ExtendCACertificate(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { + return func(t *testing.T) { + certGVR := schema.GroupVersionResource{ + Group: "cert-manager.io", + Version: "v1", + Resource: "certificates", + } + caCertificateClient := e2eutil.TestClient.DynamicClient.Resource(certGVR).Namespace(mdb.Namespace) + patch := []interface{}{ + map[string]interface{}{ + "op": "replace", + "path": "/spec/duration", + "value": "8760h0m0s", + }, + map[string]interface{}{ + "op": "replace", + "path": "/spec/renewBefore", + "value": "720h0m0s", + }, + map[string]interface{}{ + "op": "add", + "path": "/spec/dnsNames", + "value": []string{"*.ca-example.domain"}, + }, + } + payload, err := json.Marshal(patch) + assert.NoError(t, err) + _, err = caCertificateClient.Patch(ctx, "tls-selfsigned-ca", types.JSONPatchType, payload, metav1.PatchOptions{}) + assert.NoError(t, err) + } +} + +func RotateCertificate(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { + return func(t *testing.T) { + certKeySecretName := mdb.TLSSecretNamespacedName() + rotateCertManagerSecret(ctx, certKeySecretName, t) + } +} + +func RotateAgentCertificate(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { + return func(t *testing.T) { + agentCertSecretName := mdb.AgentCertificateSecretNamespacedName() + rotateCertManagerSecret(ctx, agentCertSecretName, t) + } +} + +func RotateCACertificate(ctx context.Context, mdb *mdbv1.MongoDBCommunity) func(*testing.T) { + return func(t *testing.T) { + caCertSecretName := mdb.TLSCaCertificateSecretNamespacedName() + rotateCertManagerSecret(ctx, caCertSecretName, t) + } +} + +func rotateCertManagerSecret(ctx context.Context, secretName types.NamespacedName, t *testing.T) { + currentSecret := corev1.Secret{} + err := e2eutil.TestClient.Get(ctx, secretName, ¤tSecret) + assert.NoError(t, err) + + // delete current cert secret, cert-manager should generate a new one + err = e2eutil.TestClient.Delete(ctx, ¤tSecret) + assert.NoError(t, err) + + newSecret := corev1.Secret{} + err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 1*time.Minute, false, func(ctx context.Context) (done bool, err error) { + if err := e2eutil.TestClient.Get(ctx, secretName, &newSecret); err != nil { + return false, nil + } + return true, nil + }) + assert.NoError(t, err) + assert.False(t, bytes.Equal(currentSecret.Data[corev1.TLSCertKey], newSecret.Data[corev1.TLSCertKey])) +} diff --git a/test/e2e/util/mongotester/mongotester.go b/test/e2e/util/mongotester/mongotester.go new file mode 100644 index 000000000..58ad54181 --- /dev/null +++ b/test/e2e/util/mongotester/mongotester.go @@ -0,0 +1,635 @@ +package mongotester + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "math/big" + "net/http" + "reflect" + "testing" + "time" + + "github.com/stretchr/objx" + "go.mongodb.org/mongo-driver/bson/primitive" + + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + "github.com/stretchr/testify/assert" + "go.mongodb.org/mongo-driver/bson" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" +) + +type Tester struct { + ctx context.Context + mongoClient *mongo.Client + clientOpts []*options.ClientOptions + resource *mdbv1.MongoDBCommunity +} + +func newTester(ctx context.Context, mdb *mdbv1.MongoDBCommunity, opts ...*options.ClientOptions) *Tester { + t := &Tester{ + ctx: ctx, + resource: mdb, + } + t.clientOpts = append(t.clientOpts, opts...) + return t +} + +// OptionApplier is an interface which is able to accept a list +// of options.ClientOptions, and return the final desired list +// making any modifications required +type OptionApplier interface { + ApplyOption(opts ...*options.ClientOptions) []*options.ClientOptions +} + +// FromResource returns a Tester instance from a MongoDB resource. It infers SCRAM username/password +// and the hosts from the resource. +func FromResource(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity, opts ...OptionApplier) (*Tester, error) { + var clientOpts []*options.ClientOptions + + clientOpts = WithHosts(mdb.Hosts("")).ApplyOption(clientOpts...) + + t.Logf("Configuring hosts: %s for MongoDB: %s", mdb.Hosts(""), mdb.NamespacedName()) + + users := mdb.Spec.Users + if len(users) == 1 { + user := users[0] + passwordSecret := corev1.Secret{} + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: user.PasswordSecretRef.Name, Namespace: mdb.Namespace}, &passwordSecret) + if err != nil { + return nil, err + } + t.Logf("Configuring SCRAM username: %s and password from secret %s for MongoDB: %s", user.Name, user.PasswordSecretRef.Name, mdb.NamespacedName()) + clientOpts = WithScram(user.Name, string(passwordSecret.Data[user.PasswordSecretRef.Key])).ApplyOption(clientOpts...) + } + + // add any additional options + for _, opt := range opts { + clientOpts = opt.ApplyOption(clientOpts...) + } + + return newTester(ctx, &mdb, clientOpts...), nil +} + +func FromX509Resource(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity, opts ...OptionApplier) (*Tester, error) { + var clientOpts []*options.ClientOptions + + clientOpts = WithHosts(mdb.Hosts("")).ApplyOption(clientOpts...) + + t.Logf("Configuring hosts: %s for MongoDB: %s", mdb.Hosts(""), mdb.NamespacedName()) + + users := mdb.Spec.Users + if len(users) == 1 { + clientOpts = WithX509().ApplyOption(clientOpts...) + } + + // add any additional options + for _, opt := range opts { + clientOpts = opt.ApplyOption(clientOpts...) + } + + return newTester(ctx, &mdb, clientOpts...), nil +} + +// ConnectivitySucceeds performs a basic check that ensures that it is possible +// to connect to the MongoDB resource +func (m *Tester) ConnectivitySucceeds(opts ...OptionApplier) func(t *testing.T) { + return m.connectivityCheck(true, opts...) +} + +// ConnectivityFails performs a basic check that ensures that it is not possible +// to connect to the MongoDB resource +func (m *Tester) ConnectivityFails(opts ...OptionApplier) func(t *testing.T) { + return m.connectivityCheck(false, opts...) +} + +func (m *Tester) ConnectivityRejected(ctx context.Context, opts ...OptionApplier) func(t *testing.T) { + clientOpts := make([]*options.ClientOptions, 0) + for _, optApplier := range opts { + clientOpts = optApplier.ApplyOption(clientOpts...) + } + + return func(t *testing.T) { + // We can optionally skip connectivity tests locally + if testing.Short() { + t.Skip() + } + + if err := m.ensureClient(ctx, clientOpts...); err == nil { + t.Fatalf("No error, but it should have failed") + } + } +} + +func (m *Tester) HasKeyfileAuth(tries int, opts ...OptionApplier) func(t *testing.T) { + return m.hasAdminParameter("clusterAuthMode", "keyFile", tries, opts...) +} + +func (m *Tester) HasFCV(fcv string, tries int, opts ...OptionApplier) func(t *testing.T) { + return m.hasAdminParameter("featureCompatibilityVersion", map[string]interface{}{"version": fcv}, tries, opts...) +} + +func (m *Tester) ScramIsConfigured(tries int, opts ...OptionApplier) func(t *testing.T) { + return m.hasAdminParameter("authenticationMechanisms", primitive.A{"SCRAM-SHA-256"}, tries, opts...) +} + +func (m *Tester) ScramWithAuthIsConfigured(tries int, enabledMechanisms primitive.A, opts ...OptionApplier) func(t *testing.T) { + return m.hasAdminParameter("authenticationMechanisms", enabledMechanisms, tries, opts...) +} + +func (m *Tester) EnsureAuthenticationIsConfigured(tries int, opts ...OptionApplier) func(t *testing.T) { + return func(t *testing.T) { + t.Run("Ensure keyFile authentication is configured", m.HasKeyfileAuth(tries, opts...)) + t.Run("SCRAM-SHA-256 is configured", m.ScramIsConfigured(tries, opts...)) + } +} + +func (m *Tester) EnsureAuthenticationWithAuthIsConfigured(tries int, enabledMechanisms primitive.A, opts ...OptionApplier) func(t *testing.T) { + return func(t *testing.T) { + t.Run("Ensure keyFile authentication is configured", m.HasKeyfileAuth(tries, opts...)) + t.Run(fmt.Sprintf("%q is configured", enabledMechanisms), m.ScramWithAuthIsConfigured(tries, enabledMechanisms, opts...)) + } +} + +func (m *Tester) HasTlsMode(tlsMode string, tries int, opts ...OptionApplier) func(t *testing.T) { + return m.hasAdminParameter("sslMode", tlsMode, tries, opts...) +} + +// CustomRolesResult is a type to decode the result of getting rolesInfo. +type CustomRolesResult struct { + Roles []automationconfig.CustomRole +} + +func (m *Tester) VerifyRoles(expectedRoles []automationconfig.CustomRole, tries int, opts ...OptionApplier) func(t *testing.T) { + return m.hasAdminCommandResult(func(t *testing.T) bool { + var result CustomRolesResult + err := m.mongoClient.Database("admin"). + RunCommand(m.ctx, + bson.D{ + {Key: "rolesInfo", Value: 1}, + {Key: "showPrivileges", Value: true}, + {Key: "showBuiltinRoles", Value: false}, + }).Decode(&result) + if err != nil { + t.Fatal(err) + return false + } + assert.ElementsMatch(t, result.Roles, expectedRoles) + return true + }, tries, opts...) +} + +type verifyAdminResultFunc func(t *testing.T) bool + +func (m *Tester) hasAdminCommandResult(verify verifyAdminResultFunc, tries int, opts ...OptionApplier) func(t *testing.T) { + clientOpts := make([]*options.ClientOptions, 0) + for _, optApplier := range opts { + clientOpts = optApplier.ApplyOption(clientOpts...) + } + + return func(t *testing.T) { + if err := m.ensureClient(m.ctx, clientOpts...); err != nil { + t.Fatal(err) + } + + database := m.mongoClient.Database("admin") + assert.NotNil(t, database) + + found := false + for !found && tries > 0 { + <-time.After(10 * time.Second) + found = verify(t) + tries-- + } + assert.True(t, found) + } +} + +func (m *Tester) hasAdminParameter(key string, expectedValue interface{}, tries int, opts ...OptionApplier) func(t *testing.T) { + return m.hasAdminCommandResult(func(t *testing.T) bool { + var result map[string]interface{} + err := m.mongoClient.Database("admin"). + RunCommand(m.ctx, bson.D{{Key: "getParameter", Value: 1}, {Key: key, Value: 1}}). + Decode(&result) + if err != nil { + t.Logf("Unable to get admin setting %s with error : %s", key, err) + return false + } + + actualValue := result[key] + t.Logf("Actual Value: %+v, type: %s", actualValue, reflect.TypeOf(actualValue)) + return reflect.DeepEqual(expectedValue, actualValue) + }, tries, opts...) +} + +func (m *Tester) connectivityCheck(shouldSucceed bool, opts ...OptionApplier) func(t *testing.T) { + + clientOpts := make([]*options.ClientOptions, 0) + for _, optApplier := range opts { + clientOpts = optApplier.ApplyOption(clientOpts...) + } + + connectivityOpts := defaults() + return func(t *testing.T) { + + // We can optionally skip connectivity tests locally + if testing.Short() { + t.Skip() + } + + ctx, cancel := context.WithTimeout(m.ctx, connectivityOpts.ContextTimeout) + defer cancel() + + if err := m.ensureClient(ctx, clientOpts...); err != nil { + t.Fatal(err) + } + + attempts := 0 + // There can be a short time before the user can auth as the user + err := wait.PollUntilContextTimeout(ctx, connectivityOpts.IntervalTime, connectivityOpts.TimeoutTime, false, func(ctx context.Context) (done bool, err error) { + attempts++ + collection := m.mongoClient.Database(connectivityOpts.Database).Collection(connectivityOpts.Collection) + _, err = collection.InsertOne(ctx, bson.M{"name": "pi", "value": 3.14159}) + if err != nil && shouldSucceed { + t.Logf("Was not able to connect, when we should have been able to!") + return false, nil + } + if err == nil && !shouldSucceed { + t.Logf("Was successfully able to connect, when we should not have been able to!") + return false, nil + } + // this information is only useful if we needed more than one attempt. + if attempts >= 2 { + t.Logf("Connectivity check was successful after %d attempt(s)", attempts) + } + return true, nil + }) + + if err != nil { + t.Fatal(fmt.Errorf("error during connectivity check: %s", err)) + } + } +} + +func (m *Tester) WaitForRotatedCertificate(mdb mdbv1.MongoDBCommunity, initialCertSerialNumber *big.Int) func(*testing.T) { + return func(t *testing.T) { + tls, err := getClientTLSConfig(m.ctx, mdb) + assert.NoError(t, err) + + // Reject all server certificates that don't have the expected serial number + tls.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + cert := verifiedChains[0][0] + if initialCertSerialNumber.Cmp(cert.SerialNumber) == 0 { + return fmt.Errorf("certificate serial number has not changed: %s", cert.SerialNumber) + } + return nil + } + + if err := m.ensureClient(m.ctx, &options.ClientOptions{TLSConfig: tls}); err != nil { + t.Fatal(err) + } + + // Ping the cluster until it succeeds. The ping will only succeed with the right certificate. + err = wait.PollUntilContextTimeout(m.ctx, 5*time.Second, 5*time.Minute, false, func(ctx context.Context) (done bool, err error) { + if err := m.mongoClient.Ping(m.ctx, nil); err != nil { + return false, nil + } + return true, nil + }) + assert.NoError(t, err) + } +} + +// EnsureMongodConfig is mostly used for checking port changes. Port changes take some until they finish. +// We cannot fully rely on the statefulset or resource being ready/running since it will change its state multiple +// times during a port change. That means a resource might leave, go into and leave running multiple times until +// it truly finished its port change. +func (m *Tester) EnsureMongodConfig(selector string, expected interface{}) func(*testing.T) { + return func(t *testing.T) { + connectivityOpts := defaults() + err := wait.PollUntilContextTimeout(m.ctx, connectivityOpts.IntervalTime, connectivityOpts.TimeoutTime, false, func(ctx context.Context) (done bool, err error) { + opts, err := m.getCommandLineOptions() + assert.NoError(t, err) + + parsed := objx.New(bsonToMap(opts)).Get("parsed").ObjxMap() + + return expected == parsed.Get(selector).Data(), nil + }) + + assert.NoError(t, err) + + } +} + +// getCommandLineOptions will get the command line options from the admin database +// and return the results as a map. +func (m *Tester) getCommandLineOptions() (bson.M, error) { + var result bson.M + err := m.mongoClient. + Database("admin"). + RunCommand(m.ctx, bson.D{primitive.E{Key: "getCmdLineOpts", Value: 1}}). + Decode(&result) + + return result, err +} + +// bsonToMap will convert a bson map to a regular map recursively. +// objx does not work when the nested objects are bson.M. +func bsonToMap(m bson.M) map[string]interface{} { + out := make(map[string]interface{}) + for key, value := range m { + if subMap, ok := value.(bson.M); ok { + out[key] = bsonToMap(subMap) + } else { + out[key] = value + } + } + return out +} + +// StartBackgroundConnectivityTest starts periodically checking connectivity to the MongoDB deployment +// with the defined interval. A cancel function is returned, which can be called to stop testing connectivity. +func (m *Tester) StartBackgroundConnectivityTest(t *testing.T, interval time.Duration, opts ...OptionApplier) func() { + ctx, cancel := context.WithCancel(m.ctx) + t.Logf("Starting background connectivity test") + + // start a go routine which will periodically check basic MongoDB connectivity + go func() { //nolint + for { + select { + case <-ctx.Done(): + return + case <-time.After(interval): + m.ConnectivitySucceeds(opts...)(t) + } + } + }() + + return func() { + cancel() + if t != nil { + t.Log("TestContext cancelled, no longer checking connectivity") + } + } +} + +// ensureClient establishes a mongo client connection applying any addition +// client options on top of what were provided at construction. +func (t *Tester) ensureClient(ctx context.Context, opts ...*options.ClientOptions) error { + allOpts := t.clientOpts + allOpts = append(allOpts, opts...) + mongoClient, err := mongo.Connect(ctx, allOpts...) + if err != nil { + return err + } + t.mongoClient = mongoClient + return nil +} + +// PrometheusEndpointIsReachable returns a testing function that will check for +// the Prometheus endpoint to be rechable. It can be configued to use HTTPS if +// `useTls` is set to `true`. +func (m *Tester) PrometheusEndpointIsReachable(username, password string, useTls bool) func(t *testing.T) { + scheme := "http" + customTransport := http.DefaultTransport.(*http.Transport).Clone() + if useTls { + customTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint + scheme = "https" + } + client := &http.Client{Transport: customTransport} + + return func(t *testing.T) { + _ = wait.PollUntilContextTimeout(m.ctx, 5*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) { + var idx int + + // Verify that the Prometheus port is enabled and responding with 200 + // on every Pod. + for idx = 0; idx < m.resource.Spec.Members; idx++ { + url := fmt.Sprintf("%s://%s-%d.%s-svc.%s.svc.cluster.local:9216/metrics", scheme, m.resource.Name, idx, m.resource.Name, m.resource.Namespace) + req, err := http.NewRequest("GET", url, nil) + assert.NoError(t, err) + req.SetBasicAuth(username, password) + + response, err := client.Do(req) + assert.NoError(t, err) + assert.Equal(t, response.StatusCode, 200) + } + + return true, nil + }) + } +} + +// clientOptionAdder is the standard implementation that simply adds a +// new options.ClientOption to the mongo client +type clientOptionAdder struct { + option *options.ClientOptions +} + +func (c clientOptionAdder) ApplyOption(opts ...*options.ClientOptions) []*options.ClientOptions { + return append(opts, c.option) +} + +// clientOptionRemover is used if a value from the client array of options should be removed. +// assigning a nil value will not take precedence over an existing value, so we need a mechanism +// to remove elements that are present + +// e.g. to disable TLS, you need to remove the options.ClientOption that has a non-nil tls config +// it is not enough to add a tls config that has a nil value. +type clientOptionRemover struct { + // removalPredicate is a function which returns a bool indicating + // if a given options.ClientOption should be removed. + removalPredicate func(opt *options.ClientOptions) bool +} + +func (c clientOptionRemover) ApplyOption(opts ...*options.ClientOptions) []*options.ClientOptions { + newOpts := make([]*options.ClientOptions, 0) + for _, opt := range opts { + if !c.removalPredicate(opt) { + newOpts = append(newOpts, opt) + } + } + return newOpts +} + +// WithScram provides a configuration option that will configure the MongoDB resource +// with the given username and password +func WithScram(username, password string) OptionApplier { + return clientOptionAdder{ + option: &options.ClientOptions{ + Auth: &options.Credential{ + AuthMechanism: "SCRAM-SHA-256", + AuthSource: "admin", + Username: username, + Password: password, + }, + }, + } +} + +func WithScramWithAuth(username, password string, authenticationMechanism string) OptionApplier { + return clientOptionAdder{ + option: &options.ClientOptions{ + Auth: &options.Credential{ + AuthMechanism: authenticationMechanism, + AuthSource: "admin", + Username: username, + Password: password, + }, + }, + } +} + +func WithX509() OptionApplier { + return clientOptionAdder{ + option: &options.ClientOptions{ + Auth: &options.Credential{ + AuthMechanism: "MONGODB-X509", + }, + }, + } +} + +// WithHosts configures the hosts of the deployment +func WithHosts(hosts []string) OptionApplier { + return clientOptionAdder{ + option: &options.ClientOptions{ + Hosts: hosts, + }, + } +} + +// WithTls configures the client to use tls +func WithTls(ctx context.Context, mdb mdbv1.MongoDBCommunity) OptionApplier { + tlsConfig, err := getClientTLSConfig(ctx, mdb) + if err != nil { + panic(fmt.Errorf("could not retrieve TLS config: %s", err)) + } + + return withTls(tlsConfig) +} + +func withTls(tls *tls.Config) OptionApplier { + return clientOptionAdder{ + option: &options.ClientOptions{ + TLSConfig: tls, + }, + } +} + +// WithoutTls will remove the tls configuration +func WithoutTls() OptionApplier { + return clientOptionRemover{ + removalPredicate: func(opt *options.ClientOptions) bool { + return opt.TLSConfig != nil + }, + } +} + +// WithURI will add URI connection string +func WithURI(uri string) OptionApplier { + opt := &options.ClientOptions{} + opt.ApplyURI(uri) + return clientOptionAdder{option: opt} +} + +// WithReplicaSet will explicitly add a replicaset name +func WithReplicaSet(rsname string) OptionApplier { + return clientOptionAdder{ + option: &options.ClientOptions{ + ReplicaSet: &rsname, + }, + } +} + +// getClientTLSConfig reads in the tls fixtures +func getClientTLSConfig(ctx context.Context, mdb mdbv1.MongoDBCommunity) (*tls.Config, error) { + caSecret := corev1.Secret{} + caSecretName := types.NamespacedName{Name: mdb.Spec.Security.TLS.CaCertificateSecret.Name, Namespace: mdb.Namespace} + if err := e2eutil.TestClient.Get(ctx, caSecretName, &caSecret); err != nil { + return nil, err + } + caPEM := caSecret.Data["ca.crt"] + caPool := x509.NewCertPool() + caPool.AppendCertsFromPEM(caPEM) + return &tls.Config{ //nolint + RootCAs: caPool, + }, nil + +} + +// GetAgentCert reads the agent key certificate +func GetAgentCert(ctx context.Context, mdb mdbv1.MongoDBCommunity) (*x509.Certificate, error) { + certSecret := corev1.Secret{} + certSecretName := mdb.AgentCertificateSecretNamespacedName() + if err := e2eutil.TestClient.Get(ctx, certSecretName, &certSecret); err != nil { + return nil, err + } + block, _ := pem.Decode(certSecret.Data["tls.crt"]) + if block == nil { + return nil, fmt.Errorf("error decoding client cert key") + } + return x509.ParseCertificate(block.Bytes) +} + +// GetClientCert reads the client key certificate +func GetClientCert(ctx context.Context, mdb mdbv1.MongoDBCommunity) (*x509.Certificate, error) { + certSecret := corev1.Secret{} + certSecretName := types.NamespacedName{Name: mdb.Spec.Security.TLS.CertificateKeySecret.Name, Namespace: mdb.Namespace} + if err := e2eutil.TestClient.Get(ctx, certSecretName, &certSecret); err != nil { + return nil, err + } + block, _ := pem.Decode(certSecret.Data["tls.crt"]) + if block == nil { + return nil, fmt.Errorf("error decoding client cert key") + } + return x509.ParseCertificate(block.Bytes) +} + +func GetUserCert(ctx context.Context, mdb mdbv1.MongoDBCommunity, userCertSecret string) (string, error) { + certSecret := corev1.Secret{} + certSecretName := types.NamespacedName{Name: userCertSecret, Namespace: mdb.Namespace} + if err := e2eutil.TestClient.Get(ctx, certSecretName, &certSecret); err != nil { + return "", err + } + crt, _ := pem.Decode(certSecret.Data["tls.crt"]) + if crt == nil { + return "", fmt.Errorf("error decoding client cert key") + } + key, _ := pem.Decode(certSecret.Data["tls.key"]) + if key == nil { + return "", fmt.Errorf("error decoding client cert key") + } + return string(crt.Bytes) + string(key.Bytes), nil +} + +// defaults returns the default connectivity options +// that our used in our tests. +// TODO: allow these to be configurable +func defaults() connectivityOpts { + return connectivityOpts{ + IntervalTime: 1 * time.Second, + TimeoutTime: 30 * time.Second, + ContextTimeout: 10 * time.Minute, + Database: "testing", + Collection: "numbers", + } +} + +type connectivityOpts struct { + Retries int + IntervalTime time.Duration + TimeoutTime time.Duration + ContextTimeout time.Duration + Database string + Collection string +} diff --git a/test/e2e/util/mongotester/mongotester_test.go b/test/e2e/util/mongotester/mongotester_test.go new file mode 100644 index 000000000..91ca6489b --- /dev/null +++ b/test/e2e/util/mongotester/mongotester_test.go @@ -0,0 +1,43 @@ +package mongotester + +import ( + "crypto/tls" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "go.mongodb.org/mongo-driver/mongo/options" +) + +func TestTlsRemoval_RemovesCorrectConfig(t *testing.T) { + var opts []*options.ClientOptions + + // configure TLS and hosts + opts = withTls(&tls.Config{ //nolint + ServerName: "some-name", + }).ApplyOption(opts...) + opts = WithHosts([]string{"host1", "host2", "host3"}).ApplyOption(opts...) + + removalOpt := WithoutTls() + + // remove the tls value + opts = removalOpt.ApplyOption(opts...) + + assert.Len(t, opts, 1, "tls removal should remove an element") + assert.NotNil(t, opts[0].Hosts, "tls removal should not effect other configs") + assert.Len(t, opts[0].Hosts, 3, "original configs should not be changed") + assert.True(t, reflect.DeepEqual(opts[0].Hosts, []string{"host1", "host2", "host3"})) +} + +func TestWithScram_AddsScramOption(t *testing.T) { + var opts []*options.ClientOptions + + opts = WithScram("username", "password").ApplyOption(opts...) + + assert.Len(t, opts, 1) + assert.NotNil(t, opts[0]) + assert.Equal(t, opts[0].Auth.AuthMechanism, "SCRAM-SHA-256") + assert.Equal(t, opts[0].Auth.Username, "username") + assert.Equal(t, opts[0].Auth.Password, "password") + assert.Equal(t, opts[0].Auth.AuthSource, "admin") +} diff --git a/test/e2e/util/wait/wait.go b/test/e2e/util/wait/wait.go new file mode 100644 index 000000000..54798860e --- /dev/null +++ b/test/e2e/util/wait/wait.go @@ -0,0 +1,215 @@ +package wait + +import ( + "context" + "fmt" + "testing" + "time" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" + e2eutil "github.com/mongodb/mongodb-kubernetes-operator/test/e2e" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type StatefulSetType int + +const ( + MembersStatefulSet StatefulSetType = iota + ArbitersStatefulSet +) + +// ForConfigMapToExist waits until a ConfigMap of the given name exists +// using the provided retryInterval and timeout +func ForConfigMapToExist(ctx context.Context, cmName string, retryInterval, timeout time.Duration) (corev1.ConfigMap, error) { + cm := corev1.ConfigMap{} + return cm, waitForRuntimeObjectToExist(ctx, cmName, retryInterval, timeout, &cm, e2eutil.OperatorNamespace) +} + +// ForSecretToExist waits until a Secret of the given name exists +// using the provided retryInterval and timeout +func ForSecretToExist(ctx context.Context, cmName string, retryInterval, timeout time.Duration, namespace string) (corev1.Secret, error) { + s := corev1.Secret{} + return s, waitForRuntimeObjectToExist(ctx, cmName, retryInterval, timeout, &s, namespace) +} + +// ForMongoDBToReachPhase waits until the given MongoDB resource reaches the expected phase +func ForMongoDBToReachPhase(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, phase mdbv1.Phase, retryInterval, timeout time.Duration) error { + return waitForMongoDBCondition(ctx, mdb, retryInterval, timeout, func(db mdbv1.MongoDBCommunity) bool { + t.Logf("current phase: %s, waiting for phase: %s", db.Status.Phase, phase) + return db.Status.Phase == phase + }) +} + +// ForMongoDBMessageStatus waits until the given MongoDB resource gets the expected message status +func ForMongoDBMessageStatus(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, retryInterval, timeout time.Duration, message string) error { + return waitForMongoDBCondition(ctx, mdb, retryInterval, timeout, func(db mdbv1.MongoDBCommunity) bool { + t.Logf("current message: %s, waiting for message: %s", db.Status.Message, message) + return db.Status.Message == message + }) +} + +// waitForMongoDBCondition polls and waits for a given condition to be true +func waitForMongoDBCondition(ctx context.Context, mdb *mdbv1.MongoDBCommunity, retryInterval, timeout time.Duration, condition func(mdbv1.MongoDBCommunity) bool) error { + mdbNew := mdbv1.MongoDBCommunity{} + return wait.PollUntilContextTimeout(ctx, retryInterval, timeout, false, func(ctx context.Context) (done bool, err error) { + err = e2eutil.TestClient.Get(ctx, mdb.NamespacedName(), &mdbNew) + if err != nil { + return false, err + } + ready := condition(mdbNew) + return ready, nil + }) +} + +// ForDeploymentToExist waits until a Deployment of the given name exists +// using the provided retryInterval and timeout +func ForDeploymentToExist(ctx context.Context, deployName string, retryInterval, timeout time.Duration, namespace string) (appsv1.Deployment, error) { + deploy := appsv1.Deployment{} + return deploy, waitForRuntimeObjectToExist(ctx, deployName, retryInterval, timeout, &deploy, namespace) +} + +// ForStatefulSetToExist waits until a StatefulSet of the given name exists +// using the provided retryInterval and timeout +func ForStatefulSetToExist(ctx context.Context, stsName string, retryInterval, timeout time.Duration, namespace string) (appsv1.StatefulSet, error) { + sts := appsv1.StatefulSet{} + return sts, waitForRuntimeObjectToExist(ctx, stsName, retryInterval, timeout, &sts, namespace) +} + +// ForStatefulSetToBeDeleted waits until a StatefulSet of the given name is deleted +// using the provided retryInterval and timeout +func ForStatefulSetToBeDeleted(ctx context.Context, stsName string, retryInterval, timeout time.Duration, namespace string) error { + sts := appsv1.StatefulSet{} + return waitForRuntimeObjectToBeDeleted(ctx, stsName, retryInterval, timeout, &sts, namespace) +} + +// ForStatefulSetToHaveUpdateStrategy waits until all replicas of the StatefulSet with the given name +// have reached the ready status +func ForStatefulSetToHaveUpdateStrategy(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, strategy appsv1.StatefulSetUpdateStrategyType, opts ...Configuration) error { + options := newOptions(opts...) + return waitForStatefulSetCondition(ctx, t, mdb, options, func(sts appsv1.StatefulSet) bool { + return sts.Spec.UpdateStrategy.Type == strategy + }) +} + +// ForStatefulSetToBeReady waits until all replicas of the StatefulSet with the given name +// have reached the ready status +func ForStatefulSetToBeReady(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, opts ...Configuration) error { + options := newOptions(opts...) + return waitForStatefulSetCondition(ctx, t, mdb, options, func(sts appsv1.StatefulSet) bool { + return statefulset.IsReady(sts, mdb.Spec.Members) + }) +} + +// ForStatefulSetToBeUnready waits until all replicas of the StatefulSet with the given name +// is not ready. +func ForStatefulSetToBeUnready(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, opts ...Configuration) error { + options := newOptions(opts...) + return waitForStatefulSetCondition(ctx, t, mdb, options, func(sts appsv1.StatefulSet) bool { + return !statefulset.IsReady(sts, mdb.Spec.Members) + }) +} + +// ForArbitersStatefulSetToBeReady waits until all replicas of the StatefulSet with the given name +// have reached the ready status. +func ForArbitersStatefulSetToBeReady(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, opts ...Configuration) error { + options := newOptions(opts...) + return waitForStatefulSetConditionWithSpecificSts(ctx, t, mdb, ArbitersStatefulSet, options, func(sts appsv1.StatefulSet) bool { + return statefulset.IsReady(sts, mdb.Spec.Arbiters) + }) +} + +// ForStatefulSetToBeReadyAfterScaleDown waits for just the ready replicas to be correct +// and does not account for the updated replicas +func ForStatefulSetToBeReadyAfterScaleDown(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, opts ...Configuration) error { + options := newOptions(opts...) + return waitForStatefulSetCondition(ctx, t, mdb, options, func(sts appsv1.StatefulSet) bool { + return int32(mdb.Spec.Members) == sts.Status.ReadyReplicas + }) +} + +func waitForStatefulSetConditionWithSpecificSts(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, statefulSetType StatefulSetType, waitOpts Options, condition func(set appsv1.StatefulSet) bool) error { + _, err := ForStatefulSetToExist(ctx, mdb.Name, waitOpts.RetryInterval, waitOpts.Timeout, mdb.Namespace) + if err != nil { + return fmt.Errorf("error waiting for stateful set to be created: %s", err) + } + + sts := appsv1.StatefulSet{} + name := mdb.NamespacedName() + if statefulSetType == ArbitersStatefulSet { + name = mdb.ArbiterNamespacedName() + } + return wait.PollUntilContextTimeout(ctx, waitOpts.RetryInterval, waitOpts.Timeout, false, func(ctx context.Context) (done bool, err error) { + err = e2eutil.TestClient.Get(ctx, name, &sts) + if err != nil { + return false, err + } + t.Logf("Waiting for %s to have %d replicas. Current ready replicas: %d, Current updated replicas: %d, Current generation: %d, Observed Generation: %d\n", + name, *sts.Spec.Replicas, sts.Status.ReadyReplicas, sts.Status.UpdatedReplicas, sts.Generation, sts.Status.ObservedGeneration) + ready := condition(sts) + return ready, nil + }) +} + +func waitForStatefulSetCondition(ctx context.Context, t *testing.T, mdb *mdbv1.MongoDBCommunity, waitOpts Options, condition func(set appsv1.StatefulSet) bool) error { + // uses members statefulset + return waitForStatefulSetConditionWithSpecificSts(ctx, t, mdb, MembersStatefulSet, waitOpts, condition) +} + +func ForPodReadiness(ctx context.Context, t *testing.T, isReady bool, containerName string, timeout time.Duration, pod corev1.Pod) error { + return wait.PollUntilContextTimeout(ctx, time.Second*3, timeout, false, func(ctx context.Context) (done bool, err error) { + err = e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &pod) + if err != nil { + return false, err + } + for _, status := range pod.Status.ContainerStatuses { + t.Logf("%s (%s), ready: %v\n", pod.Name, status.Name, status.Ready) + if status.Name == containerName && status.Ready == isReady { + return true, nil + } + } + return false, nil + }) +} + +func ForPodPhase(ctx context.Context, t *testing.T, timeout time.Duration, pod corev1.Pod, podPhase corev1.PodPhase) error { + return wait.PollUntilContextTimeout(ctx, time.Second*3, timeout, false, func(ctx context.Context) (done bool, err error) { + err = e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, &pod) + if err != nil { + return false, err + } + t.Logf("Current phase %s, expected phase %s", pod.Status.Phase, podPhase) + return pod.Status.Phase == podPhase, nil + }) +} + +// waitForRuntimeObjectToExist waits until a runtime.Object of the given name exists +// using the provided retryInterval and timeout provided. +func waitForRuntimeObjectToExist(ctx context.Context, name string, retryInterval, timeout time.Duration, obj client.Object, namespace string) error { + return wait.PollUntilContextTimeout(ctx, retryInterval, timeout, false, func(ctx context.Context) (done bool, err error) { + return runtimeObjectExists(ctx, name, obj, namespace) + }) +} + +// waitForRuntimeObjectToBeDeleted waits until a runtime.Object of the given name is deleted +// using the provided retryInterval and timeout provided. +func waitForRuntimeObjectToBeDeleted(ctx context.Context, name string, retryInterval, timeout time.Duration, obj client.Object, namespace string) error { + return wait.PollUntilContextTimeout(ctx, retryInterval, timeout, false, func(ctx context.Context) (done bool, err error) { + exists, err := runtimeObjectExists(ctx, name, obj, namespace) + return !exists, err + }) +} + +// runtimeObjectExists checks if a runtime.Object of the given name exists +func runtimeObjectExists(ctx context.Context, name string, obj client.Object, namespace string) (bool, error) { + err := e2eutil.TestClient.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, obj) + if err != nil { + return false, client.IgnoreNotFound(err) + } + return true, nil +} diff --git a/test/e2e/util/wait/wait_options.go b/test/e2e/util/wait/wait_options.go new file mode 100644 index 000000000..01aac62dd --- /dev/null +++ b/test/e2e/util/wait/wait_options.go @@ -0,0 +1,41 @@ +package wait + +import "time" + +type Configuration func(*Options) + +// Options holds values which can be configured when waiting for specific confitions. +type Options struct { + RetryInterval time.Duration + Timeout time.Duration +} + +// RetryInterval specifies the RetryInterval +func RetryInterval(retryInterval time.Duration) Configuration { + return func(options *Options) { + options.RetryInterval = retryInterval + } +} + +// Timeout specifies the Timeout +func Timeout(timeout time.Duration) Configuration { + return func(options *Options) { + options.Timeout = timeout + } +} + +// newOptions returns an Options that has been configured with default values. +func newOptions(fns ...Configuration) Options { + defaults := defaultStatefulSetReadinessOptions() + for _, fn := range fns { + fn(&defaults) + } + return defaults +} + +func defaultStatefulSetReadinessOptions() Options { + return Options{ + RetryInterval: time.Second * 15, + Timeout: time.Minute * 12, + } +} diff --git a/test/operator-sdk-test.yaml b/test/operator-sdk-test.yaml deleted file mode 100644 index 8bcafecf2..000000000 --- a/test/operator-sdk-test.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: operator-sdk-test - labels: - app: operator-sdk-test -spec: - containers: - - image: E2E_TEST_IMAGE - imagePullPolicy: Always - name: operator-sdk-test - volumeMounts: - - mountPath: /etc/config - name: kube-config-volume - restartPolicy: Never - serviceAccount: mongodb-kubernetes-operator - volumes: - - configMap: - name: kube-config - name: kube-config-volume diff --git a/test/test-app/Dockerfile b/test/test-app/Dockerfile new file mode 100644 index 000000000..5082def84 --- /dev/null +++ b/test/test-app/Dockerfile @@ -0,0 +1,11 @@ +FROM python:3.8-slim-buster + +WORKDIR /app + +COPY requirements.txt requirements.txt + +RUN pip3 install -r requirements.txt + +COPY main.py main.py + +CMD [ "python3", "/app/main.py"] diff --git a/test/test-app/README.md b/test/test-app/README.md new file mode 100644 index 000000000..2efc509bb --- /dev/null +++ b/test/test-app/README.md @@ -0,0 +1,9 @@ +This Dockerfile is a for an image which can be found +[here](https://quay.io/repository/mongodb/mongodb-kubernetes-operator-test-app) + +The E2E tests use this to ensure that the secret generated by the operator can be mounted into an application pod and +used to successfully connect with a Mongo client. + +```bash +docker build . -t quay.io/mongodb/mongodb-kubernetes-operator-test-app:1.0.0 +``` diff --git a/test/test-app/main.py b/test/test-app/main.py new file mode 100644 index 000000000..fec8734cc --- /dev/null +++ b/test/test-app/main.py @@ -0,0 +1,22 @@ +import os +import sys + +from pymongo import MongoClient + + +def main() -> int: + connection_string = os.getenv("CONNECTION_STRING") + print(f"Using connection String: {connection_string}") + client = MongoClient(connection_string) + + try: + client.testing.col.insert_one({}) + except Exception as e: + print(f"Error inserting document {e}") + return 1 + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/test/test-app/requirements.txt b/test/test-app/requirements.txt new file mode 100644 index 000000000..c4165c3f4 --- /dev/null +++ b/test/test-app/requirements.txt @@ -0,0 +1,2 @@ +PyMongo==4.6.3 +dnspython==2.6.1 diff --git a/testdata/tls/ca.crt b/testdata/tls/ca.crt new file mode 100644 index 000000000..4e7bae6d0 --- /dev/null +++ b/testdata/tls/ca.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFGDCCAwCgAwIBAgIUKYge8FLQT5AJgyJuurGLeeu/qEMwDQYJKoZIhvcNAQEL +BQAwJDEQMA4GA1UECgwHTW9uZ29EQjEQMA4GA1UEAwwHUm9vdCBDQTAeFw0yMTA0 +MjgxMjA4NTlaFw0zMTA0MjYxMjA4NTlaMCQxEDAOBgNVBAoMB01vbmdvREIxEDAO +BgNVBAMMB1Jvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCu +Y0D6TPSlpR+M7s4QRwFxOcZ/X0qmELmOw579ZbFBK+sEoepnyTiJaHpmHOHKr12g +0KFa/cFo3hZNx4wV6QimdygIzSJzf/h3IJzn1JjhRTQLOz0WdVajg6ITJzxi8Y6V +BpzizoGeQeTKxABLRDslsZ2TtpmNjkJIyqsbhQKoNx0JU64nzGcpuPt5duRSqbxy +iJpYH19OBgrET/clDDwvk04Wi0X3wCESiZG08Zy3oW/Fpn3CQWZeANpsFGDtqLDU +m7YZsejHL8uzhY4Q5bHmLWwMOEwR5j3+7gDojzdL4wjGd1wzgwcFHLTF6WyV1w3q +mSIAOZ2RmkrBDMDRURJs9eG5yonWS+XS2m/H8EyG65if9GXV5mnKHIt+bZgTD98s +xJM4Wa0v46w17rcynQK/OrGF1NG7NLlORasDq0VCtqAbQnGmbgNzmYGFX3US2H/o +u1lOmHRsDZTpQ9gLUqSfQh4mdVPT9CE/C+DGqsqsi5iNXMP6z6JJzibzInnx8URd +ug6iI3YjeP3GYsuLnwp9RoOAq5/FdsKAb63AzY+mRZ7tkANNFenJAzbgwXeO86YZ +JK89gZ5rEp9RlTH9yZ6et7zan0FVaP/fwqsJp6o+ZmAfSmtHZvbCK45q+0KYvyKG +/h5bweER/cRCmQl7UA8Cb5ZEXaa8mdQu1WVzl6VBSwIDAQABo0IwQDAdBgNVHQ4E +FgQU0qBFL0HfYJevdR0b0vAaYxSLpZswDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAuQwDQYJKoZIhvcNAQELBQADggIBAFFQzP/UDDxRX1C4OBG5Ak4PcBMb +i2BEObBTGJpGzHsK4na8wO+9p2DFNTipE4BYLmLaUzcKP+Q3pef32Ks5i88bn9sr +KmqOzrcgzcLpIOR6CZoJ4VfVIelGzKqbdDB6At6/tj42TO7UMt49fFOVgvM/uVX8 +/G5l5ZJGUVmD3NgyazLEsnp8AJAR3Yq0S/ODHWswWlgw7oucuZJ6bMaafqSzCA7i +pqdBLfFVRv0DshifnM2sy3fq6X1iDWhiGqj43FdTHqYjAB0zi/4fBVYuQ0zlKDNp +JRMNgIWzjMw6mhl5aMGskazVmQQC5s8ET1mXZ3RyGcFD872EQ/JLjKOARXIO442z +f+Sr50i5KU5mmgSKFYDyKavVs4XlTiagg/2hw+uOdas8IiQb3wkFITco7kFWeg7r +qQbUllAJlvaNV/wX27NMyUp6wt5lhjN6HYS0l7FvhzR9UgGpRC/KOjrmuCk9AMoS +Gfz5rmkiQbLhQUnmxTXUy5ddkCJ5uSP3NMoMLOte3tUZJXqvleb77xkltL9oXSqr +P/EzTaTuVgXBXEU5ODvaFjOtsXfgBuYIKT4/xDCFFK6jwkRZ/JyIS81j9dINAZez +SLeM3BYydpb+ZRcJR4A5710VAs6U05eAlcW7Zzb6sO6Ex/iGS37FS2N1l3YnPHr0 +RjPbH3tDZRVxi4Vt +-----END CERTIFICATE----- diff --git a/testdata/tls/ca.key b/testdata/tls/ca.key new file mode 100644 index 000000000..fb14ad0e5 --- /dev/null +++ b/testdata/tls/ca.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEArmNA+kz0paUfjO7OEEcBcTnGf19KphC5jsOe/WWxQSvrBKHq +Z8k4iWh6Zhzhyq9doNChWv3BaN4WTceMFekIpncoCM0ic3/4dyCc59SY4UU0Czs9 +FnVWo4OiEyc8YvGOlQac4s6BnkHkysQAS0Q7JbGdk7aZjY5CSMqrG4UCqDcdCVOu +J8xnKbj7eXbkUqm8coiaWB9fTgYKxE/3JQw8L5NOFotF98AhEomRtPGct6FvxaZ9 +wkFmXgDabBRg7aiw1Ju2GbHoxy/Ls4WOEOWx5i1sDDhMEeY9/u4A6I83S+MIxndc +M4MHBRy0xelsldcN6pkiADmdkZpKwQzA0VESbPXhucqJ1kvl0tpvx/BMhuuYn/Rl +1eZpyhyLfm2YEw/fLMSTOFmtL+OsNe63Mp0CvzqxhdTRuzS5TkWrA6tFQragG0Jx +pm4Dc5mBhV91Eth/6LtZTph0bA2U6UPYC1Kkn0IeJnVT0/QhPwvgxqrKrIuYjVzD ++s+iSc4m8yJ58fFEXboOoiN2I3j9xmLLi58KfUaDgKufxXbCgG+twM2PpkWe7ZAD +TRXpyQM24MF3jvOmGSSvPYGeaxKfUZUx/cmenre82p9BVWj/38KrCaeqPmZgH0pr +R2b2wiuOavtCmL8ihv4eW8HhEf3EQpkJe1APAm+WRF2mvJnULtVlc5elQUsCAwEA +AQKCAgBBfCAQXgmYklMwtxRGZIOUIx/5AK2lgq5LgAYaHa/cS0Orr6m4Y4WJg+RC +qCHZ3NSJ6Q5Ofu+8E3nIp2Bhceq/qAsukumW+b0x7ts860algTkz5oDgCBwKtwmL +q7YvaYojSCJtwSJHbXMe+U8q9GpJk1Ma/vzWfU9CymhKoz4GMPwEXpoNc+Jhdodo +a07+A6MyVz2uTcmaIQa2BVlHXjrTmrs+F/qkOE+zCFng2sIA6uxwCj87TRfFwQhE +gbqMREZy8C4HpBlHgxk5RrO8gKS3TONAC3v1VMJ/Epzgt7cKFi1bacMDzPY74zW3 +BBq0gddF+08bSody/7+GkuVtNMZ36YV2Cr7tBSCrji2phXEZp2yfJB6H8ER0xSmI +CMIy8fIaQ38nKjzoqREM2z3+5n7L/rLbHmI17cFR2IvR6E32x7PnWVau49aGx+gp +SMFAN6bWnmXfolOaBKreDtzjU2csPuWqQRuBiACOPxxDcTkyoc97tdpNPLF/OJQf +r9/all91jn+9icC7gY1GJ5HZU0aexHgelQsMINobp4zZ/85o42Rcf0icRnBv/uKh +FZ2Infu1nblelmOrfqaZN4AeWkTnQH9GCECyN81lFe3xk4R0AWYws2NUAJlC0VX8 +wkREOYjAOhGk4QEhfnAxH6JskC9JG7f822RYeIp40Yum5ksw0QKCAQEA0/0l2t7f +tzRCQTcRU7rSFqXKUZfZ5MuQDWQ20JNqDlhl4V6+hlcWYEj65OaDHMIvFEO3YDxJ +KEPIUQykbkbbPfFlTeQYN0HIGBEMcYjCZfOsSh6V+C7YBAoUUNNeVvK0A9/8ox0C +KGA9hjXWveoqohjE+WBmMYzj7bKV+0roJM3zAiVcQYU4QRQpPy9s7rBGc7hO7C8x +E2tP2J8ppX+Cm1BtrmSSyfSHzl5H/z2teVh1WMY29qJk4kSz9ar8Wgql97jr1De6 +trkLC2ELHJZRvj/AH2aOPrfXHZ9hgp+Zm6Y0QYjU9sXtNgqrcXXkpIhABA5qnrvv +oEDtlGTgXzxqpwKCAQEA0perAZyxl6atTMsziAymXgGO6eBarfV4ZMLSqv8Wb7ef +sidnW4BYJ+8PDfJIjhHPZrTFzwHgJY6jdTG51z1zlUgXfrHgvXjY+thLpi72QBX1 +nFoiv3yPpQwP6YWELrPbfk0MJVSwLk3e+CEJ9QaU0B3MXmqkix99Mqj/IycGEoLR +LmaEF+0T42rK1oIVhe63clWokkGR6dGZOD7R8X0LPibOPCZgw4kBDV+Ts11+6SrW +hh8dOO27j0NrhlcIJtUNpOF5lc/YkF6J85HmZRFO2xMjlpPivwt5fzoZTQULGnJB +myG38qJzh6Z0bwDUyTyj46phUI6t94bMtOZGA+XcvQKCAQAmyweXYvu7kfOh7Yrd +MK3reRFqFwjHxryoxrMFPkTFNUYHlQf2m487tX88TjamF5737WBsWvvkQ2sv8clR +aOQMFNW+CESqL/6G8O+/AxDYCVx4/9nf3eqn6pRHKjb0YFuy7dVUoCVZ8CqyGb4f +aO++VBwPqqVo4eiAAhNSNiX/PjT/KokUcGWX+zGFH4+mqllqKcs/i29Gp3eoI5BC +efATrgc0R7FZBceoazZvrgDF2Ps4cKV0QsmFYp8wEMc3TwWKLKvzXPNtJrWvsmWP +KK7yysXEuUbEkW3rPNRiTASXKDYd3AVk11mObytqivF5bnmQhHbcb3XtdJRFeKRa +qTq3AoIBAHETy1jkQF/Duc5AVf05faOAhrKCK265HdpM2j3DFtMJ8BADJHZ3Zd6b +eKPucpMGcS0e6BANW8QO4SoudzFh58xCl4sKDDjwyOF4frZPBR7IK8e2hm33flIL +NggYDy/cl4Er8dehr7BNU+o8I1KUJBfTyEbyUEP7togbQlQnwhTydUvK9Ca8wC8e +yqd/1frCgXI9GHTgDG9WgkU3Rhe7IuxOffLCULbtu8johzCKhXjB7cq6rkBBNrLo +arYJyXDAecEM6PJOVmS+vqgwsVuXfZYYosVZxyj6ClUMZ0V57cpXi263HkSEgjBW +arKn6Zncm8vFUtekpTkCAq0n8HRrWqkCggEASI96vzDeUdt52bThenMdQMC1jCwr +eN289YFjP6llfX3MSxg3IW/moebMqJ5MULj7BrNErqYYCE9NsDp14fnXHgpAkxyi +EduxU+JFgHtjTnVLyX0NNIPGHfI7iQvqEJYwpjjyF8uQZgQAknNGjJFu80mwzuZi +LVzl3ssM8erk6KGpkMa35CKH7MvcfSRZ6//T0NxPceste42Af5ZdT8OZyF/VBgci +EXrk42dHzUlJkzAWzLAV0IUzcXvnP517cP7KOIuNo4gYNpZd0Esj62HZfWMIK5A3 +zKhW/vj04bmjb5wkoly/2ydKXBPbCOnl0/+6UKCdQ+9j3dyH9iRuFFj4sA== +-----END RSA PRIVATE KEY----- diff --git a/testdata/tls/server.crt b/testdata/tls/server.crt new file mode 100644 index 000000000..2772fe138 --- /dev/null +++ b/testdata/tls/server.crt @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEWjCCAkKgAwIBAgIUKXYivNfzneHnf77o/hmGJPPZmiMwDQYJKoZIhvcNAQEL +BQAwJDEQMA4GA1UECgwHTW9uZ29EQjEQMA4GA1UEAwwHUm9vdCBDQTAeFw0yMTA0 +MjgxMjI0MjRaFw0zNTAxMDUxMjI0MjRaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQI +DAJOWTELMAkGA1UEBwwCTlkxEDAOBgNVBAoMB01vbmdvREIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDMgVLGC5blAlvcmbggfnmFZ0wHAstOxbjOPija +53TzvKi9L2Smrwf5/RtRQSZ6cgNfTLzDbz+jKHn5v0jWqSW5TzWSL1VcDiYSoito ++RwJcRmrLBuceqP8anUjCgqmDH5xFL2w+QNh9knGdOvbUkGr+gaUxeQxNclup8jV +v9qyRva2an8MB7VbSG8ZVDVkcBkH2xlO+S1ITl/SPBXKsDbOB/hWEAqkOoEom5lQ +6a4IjUYU8HUhebzERH71Jhgc53hcs1RourMLQmAZQoqy7E8On6B/jZxMqq4HsqiQ +PYV/FLPlT12hgKMBZ6POwIFxEueTGVuGzHU37aoxPwNT7cp/AgMBAAGjbzBtMB8G +A1UdIwQYMBaAFNKgRS9B32CXr3UdG9LwGmMUi6WbMAkGA1UdEwQCMAAwCwYDVR0P +BAQDAgTwMDIGA1UdEQQrMCmCJyoubWRiLXRscy1zdmMuZGVmYXVsdC5zdmMuY2x1 +c3Rlci5sb2NhbDANBgkqhkiG9w0BAQsFAAOCAgEAeeVcqSuI3UjmThAufNN5I+Z5 +jIUyU/kTcOHUr5hDA83+W8IuEHo/g+ZsvtCVqTqiXNd5Ehn5rdO+YB8fqXC2jgUr +VLbel87qdxqTwdZ6pO3X0StO1AuSN/ZydnfZqRyI7fJn28A0fzTHP5AZdOAYtGBR +nld9omH85p2EsZkhtdsZpRPr11mQoFnJ9lGcz2z/6GRbrlEYrM9nU4Ij8cBAlhrM +hkqNpQT56XM1QxJ6MdEwYQv4Fbkr5Aa75NGyb0m6uQNYDPyXgvvkSZ+lZTXBhVl1 +5GouRqRMe+hlGPYL4VKy23PAwag7dNlQ1GQLur+pWkXfHLdKIaoPLDwFx4m8PWGr +rErXOXKKYZIw+xsQYKOXNePMM3/bRlEZTt52wrBEDB3LNhNkuKB8J1+/dfE557l1 +5/Gyt+MuRAq/gi+ffR7KxuzYDipGSUmmWzFF/5LyOCAS9lKi8xyKzsYpdDDkcx8k +aC86zOjYseMKytk2hgOmNPjva9iG4mlQQ/S7FgOn01jJadpu9X0zVgmq7uKIemUM +6ts8qEK3zIGir10FfT0zxwaXQOMLMHrLvELGJEhHJPTQDMjPopKVEXtk9Upeveas +PK3QLsn3xE2XytH2HJnAHL3GR1nLT3HgdyrOlJlV37ZPXr3di7nfQQM4UJoghyWH +JZ6umbgvvVMWeLFX/IE= +-----END CERTIFICATE----- diff --git a/testdata/tls/server.key b/testdata/tls/server.key new file mode 100644 index 000000000..a7289b9af --- /dev/null +++ b/testdata/tls/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAzIFSxguW5QJb3Jm4IH55hWdMBwLLTsW4zj4o2ud087yovS9k +pq8H+f0bUUEmenIDX0y8w28/oyh5+b9I1qkluU81ki9VXA4mEqIraPkcCXEZqywb +nHqj/Gp1IwoKpgx+cRS9sPkDYfZJxnTr21JBq/oGlMXkMTXJbqfI1b/askb2tmp/ +DAe1W0hvGVQ1ZHAZB9sZTvktSE5f0jwVyrA2zgf4VhAKpDqBKJuZUOmuCI1GFPB1 +IXm8xER+9SYYHOd4XLNUaLqzC0JgGUKKsuxPDp+gf42cTKquB7KokD2FfxSz5U9d +oYCjAWejzsCBcRLnkxlbhsx1N+2qMT8DU+3KfwIDAQABAoIBAQC7HjVbim0l25Or +9Gb6LF8KhiqVW6Qkzls7Mrr1GMT045FNkRi6PvrAbSvanA8WCE43m6I3/AmxQy7g +Knr+FsSymtw8htzGnxeNAx9PLGfP59GBwpj9A2YaZloJln2J03K6Cy1JyX6j2tNE +J+VKxyfZsKrm427Y7AsEGbd0hNgZN5s9l70q0FSCkFcb37b497k0gYcE+63wEaq3 +FHGoYvbjUVKqp1YpVQyALlHk2toDMOOVBt4MQzP6RsVQJ3LY7K0ZYlNu83EWutsJ +oIMjDwMoCpDtFqrUDzCgbYoDPAaREOBFJZcUrqQ3oTMCo8qEZgiinOVQks6vqnpd +vke/qfsRAoGBAPcm/4AkVeRCmEmR16U9K8pk2KyOJxbXvSvLPO55ytAHSeHEQYaE +FevTOYj+Whd5B/OWOcGXrvby0OpzfEizpE/cLyCGPQqONh5RyJUeG9mzmSCGfJKw +dru99Sg2njU+ZYmHtf6FtY6RGZ4OrwiifVzk/slGE9r0LJt0uVJ/Db83AoGBANPT +fWAetG/JJVG8RoQnddHzZhpmJAnqQt6QbKiYZ/WsH5mchsuJg2oybY9uf9TL1OMy +yxhCie1vFBBRD1s6j06btqF38i9D2H6R55i2PtP5AKFD6S9wucpFRiR0A5r5r69V +KwnYA1fu0uA6tYw457f0vS8NfIaiEDmERfiy4qL5AoGBANYsXUzWL/hWHVHjqFPw +5nnFWl5t8UHCQpQo0ux1bmNHbabPQ1kmLTjnGfy1La0ZnOJhVDuHDn/Be3kwCouV +4NWzoMM2kL8M7ajohkFyjf/hutiMsncLpFidDE2ExySspaDAkd22UNbytphZcSSy +aqCNcJ1KtPoQjndIdzAeGfORAoGABbbm4vjxFTLv9syFens2CnvufTfUMRBIzYhH +5iR2aYJDN/mpCUSkbvD9U6k/eZYmIBr2r6jb37PnbqlBKMzjoNNCkgiSWAQUixWU +keIYv88v3Snf2I/J81L7GXCnyD6EJs69Yn6ZWH3w4muzCh1e4u+PSv2qJleo6GRR +Hux0gMECgYBN0BaeyUPqRLgq3JsrPTK0VQ+8J5+3la7wZWU5vCr2cvLftzb7Devj +m5K901mFCPdtO5LJ8OdeOi1PHnG/+WCfuwDitN8OufPJ+tdSteG+F9XIu5sTMGLB +QJeIyHolsPZhW4OA3C7p6uZHAeDIqIpkv8j7974cLBWhGlQJx9403A== +-----END RSA PRIVATE KEY----- diff --git a/testdata/tls/server.pem b/testdata/tls/server.pem new file mode 100644 index 000000000..fa38bc24c --- /dev/null +++ b/testdata/tls/server.pem @@ -0,0 +1,53 @@ +-----BEGIN CERTIFICATE----- +MIIEWjCCAkKgAwIBAgIUKXYivNfzneHnf77o/hmGJPPZmiMwDQYJKoZIhvcNAQEL +BQAwJDEQMA4GA1UECgwHTW9uZ29EQjEQMA4GA1UEAwwHUm9vdCBDQTAeFw0yMTA0 +MjgxMjI0MjRaFw0zNTAxMDUxMjI0MjRaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQI +DAJOWTELMAkGA1UEBwwCTlkxEDAOBgNVBAoMB01vbmdvREIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDMgVLGC5blAlvcmbggfnmFZ0wHAstOxbjOPija +53TzvKi9L2Smrwf5/RtRQSZ6cgNfTLzDbz+jKHn5v0jWqSW5TzWSL1VcDiYSoito ++RwJcRmrLBuceqP8anUjCgqmDH5xFL2w+QNh9knGdOvbUkGr+gaUxeQxNclup8jV +v9qyRva2an8MB7VbSG8ZVDVkcBkH2xlO+S1ITl/SPBXKsDbOB/hWEAqkOoEom5lQ +6a4IjUYU8HUhebzERH71Jhgc53hcs1RourMLQmAZQoqy7E8On6B/jZxMqq4HsqiQ +PYV/FLPlT12hgKMBZ6POwIFxEueTGVuGzHU37aoxPwNT7cp/AgMBAAGjbzBtMB8G +A1UdIwQYMBaAFNKgRS9B32CXr3UdG9LwGmMUi6WbMAkGA1UdEwQCMAAwCwYDVR0P +BAQDAgTwMDIGA1UdEQQrMCmCJyoubWRiLXRscy1zdmMuZGVmYXVsdC5zdmMuY2x1 +c3Rlci5sb2NhbDANBgkqhkiG9w0BAQsFAAOCAgEAeeVcqSuI3UjmThAufNN5I+Z5 +jIUyU/kTcOHUr5hDA83+W8IuEHo/g+ZsvtCVqTqiXNd5Ehn5rdO+YB8fqXC2jgUr +VLbel87qdxqTwdZ6pO3X0StO1AuSN/ZydnfZqRyI7fJn28A0fzTHP5AZdOAYtGBR +nld9omH85p2EsZkhtdsZpRPr11mQoFnJ9lGcz2z/6GRbrlEYrM9nU4Ij8cBAlhrM +hkqNpQT56XM1QxJ6MdEwYQv4Fbkr5Aa75NGyb0m6uQNYDPyXgvvkSZ+lZTXBhVl1 +5GouRqRMe+hlGPYL4VKy23PAwag7dNlQ1GQLur+pWkXfHLdKIaoPLDwFx4m8PWGr +rErXOXKKYZIw+xsQYKOXNePMM3/bRlEZTt52wrBEDB3LNhNkuKB8J1+/dfE557l1 +5/Gyt+MuRAq/gi+ffR7KxuzYDipGSUmmWzFF/5LyOCAS9lKi8xyKzsYpdDDkcx8k +aC86zOjYseMKytk2hgOmNPjva9iG4mlQQ/S7FgOn01jJadpu9X0zVgmq7uKIemUM +6ts8qEK3zIGir10FfT0zxwaXQOMLMHrLvELGJEhHJPTQDMjPopKVEXtk9Upeveas +PK3QLsn3xE2XytH2HJnAHL3GR1nLT3HgdyrOlJlV37ZPXr3di7nfQQM4UJoghyWH +JZ6umbgvvVMWeLFX/IE= +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAzIFSxguW5QJb3Jm4IH55hWdMBwLLTsW4zj4o2ud087yovS9k +pq8H+f0bUUEmenIDX0y8w28/oyh5+b9I1qkluU81ki9VXA4mEqIraPkcCXEZqywb +nHqj/Gp1IwoKpgx+cRS9sPkDYfZJxnTr21JBq/oGlMXkMTXJbqfI1b/askb2tmp/ +DAe1W0hvGVQ1ZHAZB9sZTvktSE5f0jwVyrA2zgf4VhAKpDqBKJuZUOmuCI1GFPB1 +IXm8xER+9SYYHOd4XLNUaLqzC0JgGUKKsuxPDp+gf42cTKquB7KokD2FfxSz5U9d +oYCjAWejzsCBcRLnkxlbhsx1N+2qMT8DU+3KfwIDAQABAoIBAQC7HjVbim0l25Or +9Gb6LF8KhiqVW6Qkzls7Mrr1GMT045FNkRi6PvrAbSvanA8WCE43m6I3/AmxQy7g +Knr+FsSymtw8htzGnxeNAx9PLGfP59GBwpj9A2YaZloJln2J03K6Cy1JyX6j2tNE +J+VKxyfZsKrm427Y7AsEGbd0hNgZN5s9l70q0FSCkFcb37b497k0gYcE+63wEaq3 +FHGoYvbjUVKqp1YpVQyALlHk2toDMOOVBt4MQzP6RsVQJ3LY7K0ZYlNu83EWutsJ +oIMjDwMoCpDtFqrUDzCgbYoDPAaREOBFJZcUrqQ3oTMCo8qEZgiinOVQks6vqnpd +vke/qfsRAoGBAPcm/4AkVeRCmEmR16U9K8pk2KyOJxbXvSvLPO55ytAHSeHEQYaE +FevTOYj+Whd5B/OWOcGXrvby0OpzfEizpE/cLyCGPQqONh5RyJUeG9mzmSCGfJKw +dru99Sg2njU+ZYmHtf6FtY6RGZ4OrwiifVzk/slGE9r0LJt0uVJ/Db83AoGBANPT +fWAetG/JJVG8RoQnddHzZhpmJAnqQt6QbKiYZ/WsH5mchsuJg2oybY9uf9TL1OMy +yxhCie1vFBBRD1s6j06btqF38i9D2H6R55i2PtP5AKFD6S9wucpFRiR0A5r5r69V +KwnYA1fu0uA6tYw457f0vS8NfIaiEDmERfiy4qL5AoGBANYsXUzWL/hWHVHjqFPw +5nnFWl5t8UHCQpQo0ux1bmNHbabPQ1kmLTjnGfy1La0ZnOJhVDuHDn/Be3kwCouV +4NWzoMM2kL8M7ajohkFyjf/hutiMsncLpFidDE2ExySspaDAkd22UNbytphZcSSy +aqCNcJ1KtPoQjndIdzAeGfORAoGABbbm4vjxFTLv9syFens2CnvufTfUMRBIzYhH +5iR2aYJDN/mpCUSkbvD9U6k/eZYmIBr2r6jb37PnbqlBKMzjoNNCkgiSWAQUixWU +keIYv88v3Snf2I/J81L7GXCnyD6EJs69Yn6ZWH3w4muzCh1e4u+PSv2qJleo6GRR +Hux0gMECgYBN0BaeyUPqRLgq3JsrPTK0VQ+8J5+3la7wZWU5vCr2cvLftzb7Devj +m5K901mFCPdtO5LJ8OdeOi1PHnG/+WCfuwDitN8OufPJ+tdSteG+F9XIu5sTMGLB +QJeIyHolsPZhW4OA3C7p6uZHAeDIqIpkv8j7974cLBWhGlQJx9403A== +-----END RSA PRIVATE KEY----- diff --git a/testdata/tls/server_rotated.crt b/testdata/tls/server_rotated.crt new file mode 100644 index 000000000..22c9abff6 --- /dev/null +++ b/testdata/tls/server_rotated.crt @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEWjCCAkKgAwIBAgIUKXYivNfzneHnf77o/hmGJPPZmiQwDQYJKoZIhvcNAQEL +BQAwJDEQMA4GA1UECgwHTW9uZ29EQjEQMA4GA1UEAwwHUm9vdCBDQTAeFw0yMTA0 +MjgxMjI2MDFaFw0zNTAxMDUxMjI2MDFaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQI +DAJOWTELMAkGA1UEBwwCTlkxEDAOBgNVBAoMB01vbmdvREIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzFl/uTaH7b3+VGaa6iyYvYYxmp4zmV9O0SSoF +ihwqU7v4Ar7F+cKMduU3ueLlZh7BXdziqWeiIVOxhPTPPpcLzKndc6xzGuJyng7G +paDjPgDBjCMGZd9z+u56rYxqBRbIILyB6t1p6BK4DUMQ+tbZyaikAMLvNlw1c6se +oWO6+fr/wGyaw84VDW7qGmn0eWx2hvatY44BDthypoa3UFW54OIx8uzn9/Xye43J +V/+15Qwc9YADgXducDkSBussC891hwVJJ//r3ljkHavmCFlwnquf4eLQgyzma85w +EEMNrVDsNUO8kmLrm3se9WNiPMW41p/o/vBiBZuid4rr02YvAgMBAAGjbzBtMB8G +A1UdIwQYMBaAFNKgRS9B32CXr3UdG9LwGmMUi6WbMAkGA1UdEwQCMAAwCwYDVR0P +BAQDAgTwMDIGA1UdEQQrMCmCJyoubWRiLXRscy1zdmMuZGVmYXVsdC5zdmMuY2x1 +c3Rlci5sb2NhbDANBgkqhkiG9w0BAQsFAAOCAgEAi0snRlGNjVn/tfA2wGz+LRT5 +cMnYdESgdNdvqj7VA3etNEPxiZ4zBt6yRTN1Q0R1q1gHVzWdlWBUS3TCd3nyorPE +tS5BkKApICa7m1rVj9ZkDT+aRZsI1Gqve4p8/Ofdn3uEJ7xHEBtX6dlodHSr88UW +ig0xVhKEjc6Z9WviuCBTog3jsonW64iG01D3W3h/9dh0uCbZAoVbAO281qWv6Mov +fqg+FJ6iZ9FMhTUrSIyaJB1VaJvuprchcBjAPQkw7/AYyloOTbajejTprFdqmwHk +JTydwMleW4PnRLxqk/+/csJJfZt0acGC4XxJ414pXVMWoQBYQsaNVgGEgQ0SXKtK +FnenswMyYorZaOd3SxJXbSA6eF5q+3zvS4zxW9Qj6aVklUYcHAx1+WPlAz/7w80x +SsVP/qYYlLm8u5yd1pi/xDGiG+RrPedSltKb3hQk1NAci45WwAbYONiEhvJ6+/UH +5NJxgVqfCBDoi8+TtZPC7xXl07SZwsmhVfBNyow6miK0lkko4n1i7mU42XTYNZZD +51OkDAV2oKZhrSfV19zKn8AO0RFYDcmSRBFIC366qWUIQeLxPKLZ4Tu5wH0F8/y/ +epaut2X2S36LRGBEg9Qsrd3a4oLwvqKADO4oGN2pSCHWJznhxxoak3DtinSu9YBW +JV2fmZcawmu1X6FHRI4= +-----END CERTIFICATE----- diff --git a/testdata/tls/server_rotated.key b/testdata/tls/server_rotated.key new file mode 100644 index 000000000..2ea518aa0 --- /dev/null +++ b/testdata/tls/server_rotated.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsxZf7k2h+29/lRmmuosmL2GMZqeM5lfTtEkqBYocKlO7+AK+ +xfnCjHblN7ni5WYewV3c4qlnoiFTsYT0zz6XC8yp3XOscxricp4OxqWg4z4AwYwj +BmXfc/rueq2MagUWyCC8gerdaegSuA1DEPrW2cmopADC7zZcNXOrHqFjuvn6/8Bs +msPOFQ1u6hpp9Hlsdob2rWOOAQ7YcqaGt1BVueDiMfLs5/f18nuNyVf/teUMHPWA +A4F3bnA5EgbrLAvPdYcFSSf/695Y5B2r5ghZcJ6rn+Hi0IMs5mvOcBBDDa1Q7DVD +vJJi65t7HvVjYjzFuNaf6P7wYgWboneK69NmLwIDAQABAoIBAGnVjTe9dT6sM8+f +ayLO3PAfS+PWnLP7r0bZ/hVr+x0ggvMcXDWPVmPAV9HI7sf2w7IukDz7NB1iaJ1+ +H1bifE0i1DflBkK33p8xvTWz6BKjL7sx3/kF9zoJTyn8qgB1pXL7tatpaxQNbBKM +89dzBcmLHTheotTPYUrNYpEle1ShLXUYTr2oJ+ggCCuaPsUCFIbrvdj1KwlRFMls +B9MRe1jgsrsAJp2VTNWlh2OVq2NYcVRJ7xZgrsUNjWgK+F34YsZGLRob13RRPGSB +OGVtZJ8iWqRtQ6IvR6WxU8eVpn08W9sVWU/CGY0q95vXckz+2IxFo1lcN87rQRFp +bpZWCXECgYEA3Y8q1ZKWrhJKm8Zdd6C5AEkFWPWd4C8uQ0Xk8Rzw8sLcIoC+FMxL +UnNzzcE9rY2DsPLul3LCBKlE8yRIVc77Y8FmtWUROeUbLApZLvfi/CB8rpUQvYdq +4FCp3uG/nGpERdOXN7AWHPNKqsUYgbJlAB3Ktnz2Hkqu+8LjUEuC/h0CgYEAzu0Q +FcUKYvC+Mib2AB7JrfB0sgFpnenN4Ek2fczdAsn96rKuY40xVePhk/MhHAQozDOC +qZWnZODFCr8kdxWKWsPJCiDrfrs7yYu4iAs0TW7BpQHGehf9pji174zFqIJ5cRpv +CPzZs6HoeolT8AA81vgPLfkQu0eVVsbvJB1UM7sCgYEAz/xgB5HOpaZCJ621fGGC +igQCYxpflF52HWz1mGrEvf+yyyj0R23on4QGB/cJwWyBXZEP7VgrTljggydiSDs/ +vsuFcW9pFmI+eb9VLURC7tBIe1MwHduLtvvCG5rsVxdi8/HHmN1SROcXBuxzLv2a +1tsguuLf4FvXnDu9TFk2pPkCgYEAkRxQJmQax426Y6NM6oKsvk0dGOPCtFoM5VeS +XiA3cUhDx73sa6XNTkVToRRkSKhmD+WjoNdxH1488C9hHB2/+6zHJFI3s5UL9WgF +TWpGEHM2W0fmbyK530Jv3ADlcCmnjniSG0RuSvmk9aGuemVARSSKSJwcbqlhggxa +EgJ7VRcCgYA6YY42K6B0zqt/dhq2vcEYgz+lRVROZ5R0VYUWsF1pYx2JRjPoTYq/ +cl47VQOqS+m06Mz9OSI/QrH/kaO2PhQBi2FP2Y39GQPVxs+6nVoWzirszstSfy9A +TRlkCp4NMixsn/kTIeUbnYEoLGmQ7XvMIDRl0ZjcuFms0Q6kFEqWmw== +-----END RSA PRIVATE KEY----- diff --git a/tools.go b/tools.go index 3d5e5c0bc..2403594c4 100644 --- a/tools.go +++ b/tools.go @@ -1,3 +1,4 @@ +//go:build tools // +build tools // Place any runtime dependencies as imports in this file. diff --git a/version/version.go b/version/version.go deleted file mode 100644 index e3e130bf9..000000000 --- a/version/version.go +++ /dev/null @@ -1,5 +0,0 @@ -package version - -var ( - Version = "0.0.1" -)